summary refs log tree commit diff
path: root/synapse
diff options
context:
space:
mode:
authorRichard van der Hoff <richard@matrix.org>2021-02-26 14:05:40 +0000
committerRichard van der Hoff <richard@matrix.org>2021-02-26 14:05:40 +0000
commitfdbccc1e74c56da19bffc33bfe4f9f8d2b2d26f8 (patch)
tree4e2e1bdc58f6f77d7856e56261ace9b0218bb9f6 /synapse
parentRevert "Redirect redirect requests if they arrive on the wrong URI" (diff)
parentSSO: redirect to public URL before setting cookies (#9436) (diff)
downloadsynapse-fdbccc1e74c56da19bffc33bfe4f9f8d2b2d26f8.tar.xz
Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes
Diffstat (limited to 'synapse')
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/app/admin_cmd.py2
-rw-r--r--synapse/app/generic_worker.py35
-rw-r--r--synapse/config/_base.py36
-rw-r--r--synapse/config/_base.pyi2
-rw-r--r--synapse/config/push.py5
-rw-r--r--synapse/config/repository.py1
-rw-r--r--synapse/config/server.py1
-rw-r--r--synapse/config/workers.py93
-rw-r--r--synapse/handlers/deactivate_account.py5
-rw-r--r--synapse/handlers/sync.py3
-rw-r--r--synapse/http/__init__.py37
-rw-r--r--synapse/http/site.py85
-rw-r--r--synapse/push/httppusher.py3
-rw-r--r--synapse/push/pusherpool.py33
-rw-r--r--synapse/python_dependencies.py3
-rw-r--r--synapse/replication/http/__init__.py2
-rw-r--r--synapse/replication/http/push.py72
-rw-r--r--synapse/replication/tcp/commands.py27
-rw-r--r--synapse/replication/tcp/handler.py23
-rw-r--r--synapse/rest/admin/users.py28
-rw-r--r--synapse/rest/client/v1/login.py28
-rw-r--r--synapse/rest/synapse/client/__init__.py7
-rw-r--r--synapse/server.py10
-rw-r--r--synapse/storage/database.py16
-rw-r--r--synapse/storage/databases/__init__.py2
-rw-r--r--synapse/storage/databases/main/events.py14
-rw-r--r--synapse/storage/databases/main/events_bg_updates.py1
-rw-r--r--synapse/storage/databases/main/events_worker.py16
-rw-r--r--synapse/storage/databases/main/media_repository.py41
-rw-r--r--synapse/storage/databases/main/pusher.py43
-rw-r--r--synapse/storage/databases/main/registration.py19
-rw-r--r--synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql21
-rw-r--r--synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql19
-rw-r--r--synapse/storage/databases/main/user_directory.py4
-rw-r--r--synapse/storage/databases/state/store.py10
-rw-r--r--synapse/storage/util/sequence.py24
-rw-r--r--synapse/util/caches/response_cache.py34
38 files changed, 632 insertions, 175 deletions
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 2e70f46186..869e860fb0 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -48,7 +48,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.28.0rc1"
+__version__ = "1.28.0"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index b4bd4d8e7a..9f99651aa2 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -210,7 +210,9 @@ def start(config_options):
     config.update_user_directory = False
     config.run_background_tasks = False
     config.start_pushers = False
+    config.pusher_shard_config.instances = []
     config.send_federation = False
+    config.federation_shard_config.instances = []
 
     synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
 
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 6526acb2f2..dc0d3eb725 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -645,9 +645,6 @@ class GenericWorkerServer(HomeServer):
 
         self.get_tcp_replication().start_replication(self)
 
-    async def remove_pusher(self, app_id, push_key, user_id):
-        self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
-
     @cache_in_self
     def get_replication_data_handler(self):
         return GenericWorkerReplicationHandler(self)
@@ -922,22 +919,6 @@ def start(config_options):
         # For other worker types we force this to off.
         config.appservice.notify_appservices = False
 
-    if config.worker_app == "synapse.app.pusher":
-        if config.server.start_pushers:
-            sys.stderr.write(
-                "\nThe pushers must be disabled in the main synapse process"
-                "\nbefore they can be run in a separate worker."
-                "\nPlease add ``start_pushers: false`` to the main config"
-                "\n"
-            )
-            sys.exit(1)
-
-        # Force the pushers to start since they will be disabled in the main config
-        config.server.start_pushers = True
-    else:
-        # For other worker types we force this to off.
-        config.server.start_pushers = False
-
     if config.worker_app == "synapse.app.user_dir":
         if config.server.update_user_directory:
             sys.stderr.write(
@@ -954,22 +935,6 @@ def start(config_options):
         # For other worker types we force this to off.
         config.server.update_user_directory = False
 
-    if config.worker_app == "synapse.app.federation_sender":
-        if config.worker.send_federation:
-            sys.stderr.write(
-                "\nThe send_federation must be disabled in the main synapse process"
-                "\nbefore they can be run in a separate worker."
-                "\nPlease add ``send_federation: false`` to the main config"
-                "\n"
-            )
-            sys.exit(1)
-
-        # Force the pushers to start since they will be disabled in the main config
-        config.worker.send_federation = True
-    else:
-        # For other worker types we force this to off.
-        config.worker.send_federation = False
-
     synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
 
     hs = GenericWorkerServer(
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index e89decda34..4026966711 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -844,22 +844,23 @@ class ShardedWorkerHandlingConfig:
 
     def should_handle(self, instance_name: str, key: str) -> bool:
         """Whether this instance is responsible for handling the given key."""
-        # If multiple instances are not defined we always return true
-        if not self.instances or len(self.instances) == 1:
-            return True
+        # If no instances are defined we assume some other worker is handling
+        # this.
+        if not self.instances:
+            return False
 
-        return self.get_instance(key) == instance_name
+        return self._get_instance(key) == instance_name
 
-    def get_instance(self, key: str) -> str:
+    def _get_instance(self, key: str) -> str:
         """Get the instance responsible for handling the given key.
 
-        Note: For things like federation sending the config for which instance
-        is sending is known only to the sender instance if there is only one.
-        Therefore `should_handle` should be used where possible.
+        Note: For federation sending and pushers the config for which instance
+        is sending is known only to the sender instance, so we don't expose this
+        method by default.
         """
 
         if not self.instances:
-            return "master"
+            raise Exception("Unknown worker")
 
         if len(self.instances) == 1:
             return self.instances[0]
@@ -876,4 +877,21 @@ class ShardedWorkerHandlingConfig:
         return self.instances[remainder]
 
 
+@attr.s
+class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
+    """A version of `ShardedWorkerHandlingConfig` that is used for config
+    options where all instances know which instances are responsible for the
+    sharded work.
+    """
+
+    def __attrs_post_init__(self):
+        # We require that `self.instances` is non-empty.
+        if not self.instances:
+            raise Exception("Got empty list of instances for shard config")
+
+    def get_instance(self, key: str) -> str:
+        """Get the instance responsible for handling the given key."""
+        return self._get_instance(key)
+
+
 __all__ = ["Config", "RootConfig", "ShardedWorkerHandlingConfig"]
diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
index 70025b5d60..db16c86f50 100644
--- a/synapse/config/_base.pyi
+++ b/synapse/config/_base.pyi
@@ -149,4 +149,6 @@ class ShardedWorkerHandlingConfig:
     instances: List[str]
     def __init__(self, instances: List[str]) -> None: ...
     def should_handle(self, instance_name: str, key: str) -> bool: ...
+
+class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
     def get_instance(self, key: str) -> str: ...
diff --git a/synapse/config/push.py b/synapse/config/push.py
index 3adbfb73e6..7831a2ef79 100644
--- a/synapse/config/push.py
+++ b/synapse/config/push.py
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import Config, ShardedWorkerHandlingConfig
+from ._base import Config
 
 
 class PushConfig(Config):
@@ -27,9 +27,6 @@ class PushConfig(Config):
             "group_unread_count_by_room", True
         )
 
-        pusher_instances = config.get("pusher_instances") or []
-        self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
-
         # There was a a 'redact_content' setting but mistakenly read from the
         # 'email'section'. Check for the flag in the 'push' section, and log,
         # but do not honour it to avoid nasty surprises when people upgrade.
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 52849c3256..69d9de5a43 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -206,7 +206,6 @@ class ContentRepositoryConfig(Config):
 
     def generate_config_section(self, data_dir_path, **kwargs):
         media_store = os.path.join(data_dir_path, "media_store")
-        uploads_path = os.path.join(data_dir_path, "uploads")
 
         formatted_thumbnail_sizes = "".join(
             THUMBNAIL_SIZE_YAML % s for s in DEFAULT_THUMBNAIL_SIZES
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 0bfd4398e2..2afca36e7d 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -397,7 +397,6 @@ class ServerConfig(Config):
         if self.public_baseurl is not None:
             if self.public_baseurl[-1] != "/":
                 self.public_baseurl += "/"
-        self.start_pushers = config.get("start_pushers", True)
 
         # (undocumented) option for torturing the worker-mode replication a bit,
         # for testing. The value defines the number of milliseconds to pause before
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 7a0ca16da8..ac92375a85 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -17,9 +17,28 @@ from typing import List, Union
 
 import attr
 
-from ._base import Config, ConfigError, ShardedWorkerHandlingConfig
+from ._base import (
+    Config,
+    ConfigError,
+    RoutableShardedWorkerHandlingConfig,
+    ShardedWorkerHandlingConfig,
+)
 from .server import ListenerConfig, parse_listener_def
 
+_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """
+The send_federation config option must be disabled in the main
+synapse process before they can be run in a separate worker.
+
+Please add ``send_federation: false`` to the main config
+"""
+
+_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR = """
+The start_pushers config option must be disabled in the main
+synapse process before they can be run in a separate worker.
+
+Please add ``start_pushers: false`` to the main config
+"""
+
 
 def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
     """Helper for allowing parsing a string or list of strings to a config
@@ -103,6 +122,7 @@ class WorkerConfig(Config):
         self.worker_replication_secret = config.get("worker_replication_secret", None)
 
         self.worker_name = config.get("worker_name", self.worker_app)
+        self.instance_name = self.worker_name or "master"
 
         self.worker_main_http_uri = config.get("worker_main_http_uri", None)
 
@@ -118,12 +138,41 @@ class WorkerConfig(Config):
                 )
             )
 
-        # Whether to send federation traffic out in this process. This only
-        # applies to some federation traffic, and so shouldn't be used to
-        # "disable" federation
-        self.send_federation = config.get("send_federation", True)
+        # Handle federation sender configuration.
+        #
+        # There are two ways of configuring which instances handle federation
+        # sending:
+        #   1. The old way where "send_federation" is set to false and running a
+        #      `synapse.app.federation_sender` worker app.
+        #   2. Specifying the workers sending federation in
+        #      `federation_sender_instances`.
+        #
+
+        send_federation = config.get("send_federation", True)
+
+        federation_sender_instances = config.get("federation_sender_instances")
+        if federation_sender_instances is None:
+            # Default to an empty list, which means "another, unknown, worker is
+            # responsible for it".
+            federation_sender_instances = []
 
-        federation_sender_instances = config.get("federation_sender_instances") or []
+            # If no federation sender instances are set we check if
+            # `send_federation` is set, which means use master
+            if send_federation:
+                federation_sender_instances = ["master"]
+
+            if self.worker_app == "synapse.app.federation_sender":
+                if send_federation:
+                    # If we're running federation senders, and not using
+                    # `federation_sender_instances`, then we should have
+                    # explicitly set `send_federation` to false.
+                    raise ConfigError(
+                        _FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR
+                    )
+
+                federation_sender_instances = [self.worker_name]
+
+        self.send_federation = self.instance_name in federation_sender_instances
         self.federation_shard_config = ShardedWorkerHandlingConfig(
             federation_sender_instances
         )
@@ -164,7 +213,37 @@ class WorkerConfig(Config):
                 "Must only specify one instance to handle `receipts` messages."
             )
 
-        self.events_shard_config = ShardedWorkerHandlingConfig(self.writers.events)
+        if len(self.writers.events) == 0:
+            raise ConfigError("Must specify at least one instance to handle `events`.")
+
+        self.events_shard_config = RoutableShardedWorkerHandlingConfig(
+            self.writers.events
+        )
+
+        # Handle sharded push
+        start_pushers = config.get("start_pushers", True)
+        pusher_instances = config.get("pusher_instances")
+        if pusher_instances is None:
+            # Default to an empty list, which means "another, unknown, worker is
+            # responsible for it".
+            pusher_instances = []
+
+            # If no pushers instances are set we check if `start_pushers` is
+            # set, which means use master
+            if start_pushers:
+                pusher_instances = ["master"]
+
+            if self.worker_app == "synapse.app.pusher":
+                if start_pushers:
+                    # If we're running pushers, and not using
+                    # `pusher_instances`, then we should have explicitly set
+                    # `start_pushers` to false.
+                    raise ConfigError(_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR)
+
+                pusher_instances = [self.instance_name]
+
+        self.start_pushers = self.instance_name in pusher_instances
+        self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
 
         # Whether this worker should run background tasks or not.
         #
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 94f3f3163f..3886d3124d 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -120,6 +120,11 @@ class DeactivateAccountHandler(BaseHandler):
 
         await self.store.user_set_password_hash(user_id, None)
 
+        # Most of the pushers will have been deleted when we logged out the
+        # associated devices above, but we still need to delete pushers not
+        # associated with devices, e.g. email pushers.
+        await self.store.delete_all_pushers_for_user(user_id)
+
         # Add the user to a table of users pending deactivation (ie.
         # removal from all the rooms they're a member of)
         await self.store.add_user_pending_deactivation(user_id)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 6c8e361402..16b68d630a 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -278,8 +278,9 @@ class SyncHandler:
         user_id = sync_config.user.to_string()
         await self.auth.check_auth_blocking(requester=requester)
 
-        res = await self.response_cache.wrap(
+        res = await self.response_cache.wrap_conditional(
             sync_config.request_key,
+            lambda result: since_token != result.next_batch,
             self._wait_for_sync_for_user,
             sync_config,
             since_token,
diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py
index c658862fe6..142b007d01 100644
--- a/synapse/http/__init__.py
+++ b/synapse/http/__init__.py
@@ -14,8 +14,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import re
+from typing import Union
 
-from twisted.internet import task
+from twisted.internet import address, task
 from twisted.web.client import FileBodyProducer
 from twisted.web.iweb import IRequest
 
@@ -53,6 +54,40 @@ class QuieterFileBodyProducer(FileBodyProducer):
             pass
 
 
+def get_request_uri(request: IRequest) -> bytes:
+    """Return the full URI that was requested by the client"""
+    return b"%s://%s%s" % (
+        b"https" if request.isSecure() else b"http",
+        _get_requested_host(request),
+        # despite its name, "request.uri" is only the path and query-string.
+        request.uri,
+    )
+
+
+def _get_requested_host(request: IRequest) -> bytes:
+    hostname = request.getHeader(b"host")
+    if hostname:
+        return hostname
+
+    # no Host header, use the address/port that the request arrived on
+    host = request.getHost()  # type: Union[address.IPv4Address, address.IPv6Address]
+
+    hostname = host.host.encode("ascii")
+
+    if request.isSecure() and host.port == 443:
+        # default port for https
+        return hostname
+
+    if not request.isSecure() and host.port == 80:
+        # default port for http
+        return hostname
+
+    return b"%s:%i" % (
+        hostname,
+        host.port,
+    )
+
+
 def get_request_user_agent(request: IRequest, default: str = "") -> str:
     """Return the last User-Agent header, or the given default."""
     # There could be raw utf-8 bytes in the User-Agent header.
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 4a4fb5ef26..30153237e3 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -16,6 +16,10 @@ import logging
 import time
 from typing import Optional, Union
 
+import attr
+from zope.interface import implementer
+
+from twisted.internet.interfaces import IAddress
 from twisted.python.failure import Failure
 from twisted.web.server import Request, Site
 
@@ -333,26 +337,77 @@ class SynapseRequest(Request):
 
 
 class XForwardedForRequest(SynapseRequest):
-    def __init__(self, *args, **kw):
-        SynapseRequest.__init__(self, *args, **kw)
+    """Request object which honours proxy headers
 
+    Extends SynapseRequest to replace getClientIP, getClientAddress, and isSecure with
+    information from request headers.
     """
-    Add a layer on top of another request that only uses the value of an
-    X-Forwarded-For header as the result of C{getClientIP}.
-    """
 
-    def getClientIP(self):
+    # the client IP and ssl flag, as extracted from the headers.
+    _forwarded_for = None  # type: Optional[_XForwardedForAddress]
+    _forwarded_https = False  # type: bool
+
+    def requestReceived(self, command, path, version):
+        # this method is called by the Channel once the full request has been
+        # received, to dispatch the request to a resource.
+        # We can use it to set the IP address and protocol according to the
+        # headers.
+        self._process_forwarded_headers()
+        return super().requestReceived(command, path, version)
+
+    def _process_forwarded_headers(self):
+        headers = self.requestHeaders.getRawHeaders(b"x-forwarded-for")
+        if not headers:
+            return
+
+        # for now, we just use the first x-forwarded-for header. Really, we ought
+        # to start from the client IP address, and check whether it is trusted; if it
+        # is, work backwards through the headers until we find an untrusted address.
+        # see https://github.com/matrix-org/synapse/issues/9471
+        self._forwarded_for = _XForwardedForAddress(
+            headers[0].split(b",")[0].strip().decode("ascii")
+        )
+
+        # if we got an x-forwarded-for header, also look for an x-forwarded-proto header
+        header = self.getHeader(b"x-forwarded-proto")
+        if header is not None:
+            self._forwarded_https = header.lower() == b"https"
+        else:
+            # this is done largely for backwards-compatibility so that people that
+            # haven't set an x-forwarded-proto header don't get a redirect loop.
+            logger.warning(
+                "forwarded request lacks an x-forwarded-proto header: assuming https"
+            )
+            self._forwarded_https = True
+
+    def isSecure(self):
+        if self._forwarded_https:
+            return True
+        return super().isSecure()
+
+    def getClientIP(self) -> str:
         """
-        @return: The client address (the first address) in the value of the
-            I{X-Forwarded-For header}.  If the header is not present, return
-            C{b"-"}.
+        Return the IP address of the client who submitted this request.
+
+        This method is deprecated.  Use getClientAddress() instead.
         """
-        return (
-            self.requestHeaders.getRawHeaders(b"x-forwarded-for", [b"-"])[0]
-            .split(b",")[0]
-            .strip()
-            .decode("ascii")
-        )
+        if self._forwarded_for is not None:
+            return self._forwarded_for.host
+        return super().getClientIP()
+
+    def getClientAddress(self) -> IAddress:
+        """
+        Return the address of the client who submitted this request.
+        """
+        if self._forwarded_for is not None:
+            return self._forwarded_for
+        return super().getClientAddress()
+
+
+@implementer(IAddress)
+@attr.s(frozen=True, slots=True)
+class _XForwardedForAddress:
+    host = attr.ib(type=str)
 
 
 class SynapseSite(Site):
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index ed911f106a..c1a4906cd1 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -74,6 +74,7 @@ class HttpPusher(Pusher):
         self.timed_call = None
         self._is_processing = False
         self._group_unread_count_by_room = hs.config.push_group_unread_count_by_room
+        self._pusherpool = hs.get_pusherpool()
 
         self.data = pusher_config.data
         if self.data is None:
@@ -304,7 +305,7 @@ class HttpPusher(Pusher):
                     )
                 else:
                     logger.info("Pushkey %s was rejected: removing", pk)
-                    await self.hs.remove_pusher(self.app_id, pk, self.user_id)
+                    await self._pusherpool.remove_pusher(self.app_id, pk, self.user_id)
         return True
 
     async def _build_notification_dict(
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index ae1145be0e..4c7f5fecee 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -19,12 +19,14 @@ from typing import TYPE_CHECKING, Dict, Iterable, Optional
 
 from prometheus_client import Gauge
 
+from synapse.api.errors import Codes, SynapseError
 from synapse.metrics.background_process_metrics import (
     run_as_background_process,
     wrap_as_background_process,
 )
 from synapse.push import Pusher, PusherConfig, PusherConfigException
 from synapse.push.pusher import PusherFactory
+from synapse.replication.http.push import ReplicationRemovePusherRestServlet
 from synapse.types import JsonDict, RoomStreamToken
 from synapse.util.async_helpers import concurrently_execute
 
@@ -58,7 +60,6 @@ class PusherPool:
     def __init__(self, hs: "HomeServer"):
         self.hs = hs
         self.pusher_factory = PusherFactory(hs)
-        self._should_start_pushers = hs.config.start_pushers
         self.store = self.hs.get_datastore()
         self.clock = self.hs.get_clock()
 
@@ -67,6 +68,16 @@ class PusherPool:
         # We shard the handling of push notifications by user ID.
         self._pusher_shard_config = hs.config.push.pusher_shard_config
         self._instance_name = hs.get_instance_name()
+        self._should_start_pushers = (
+            self._instance_name in self._pusher_shard_config.instances
+        )
+
+        # We can only delete pushers on master.
+        self._remove_pusher_client = None
+        if hs.config.worker.worker_app:
+            self._remove_pusher_client = ReplicationRemovePusherRestServlet.make_client(
+                hs
+            )
 
         # Record the last stream ID that we were poked about so we can get
         # changes since then. We set this to the current max stream ID on
@@ -103,6 +114,11 @@ class PusherPool:
             The newly created pusher.
         """
 
+        if kind == "email":
+            email_owner = await self.store.get_user_id_by_threepid("email", pushkey)
+            if email_owner != user_id:
+                raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
+
         time_now_msec = self.clock.time_msec()
 
         # create the pusher setting last_stream_ordering to the current maximum
@@ -175,9 +191,6 @@ class PusherPool:
             user_id: user to remove pushers for
             access_tokens: access token *ids* to remove pushers for
         """
-        if not self._pusher_shard_config.should_handle(self._instance_name, user_id):
-            return
-
         tokens = set(access_tokens)
         for p in await self.store.get_pushers_by_user_id(user_id):
             if p.access_token in tokens:
@@ -380,6 +393,12 @@ class PusherPool:
 
             synapse_pushers.labels(type(pusher).__name__, pusher.app_id).dec()
 
-        await self.store.delete_pusher_by_app_id_pushkey_user_id(
-            app_id, pushkey, user_id
-        )
+        # We can only delete pushers on master.
+        if self._remove_pusher_client:
+            await self._remove_pusher_client(
+                app_id=app_id, pushkey=pushkey, user_id=user_id
+            )
+        else:
+            await self.store.delete_pusher_by_app_id_pushkey_user_id(
+                app_id, pushkey, user_id
+            )
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 8a2b73b75e..321a333820 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -106,6 +106,9 @@ CONDITIONAL_REQUIREMENTS = {
         "pysaml2>=4.5.0;python_version>='3.6'",
     ],
     "oidc": ["authlib>=0.14.0"],
+    # systemd-python is necessary for logging to the systemd journal via
+    # `systemd.journal.JournalHandler`, as is documented in
+    # `contrib/systemd/log_config.yaml`.
     "systemd": ["systemd-python>=231"],
     "url_preview": ["lxml>=3.5.0"],
     "sentry": ["sentry-sdk>=0.7.2"],
diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py
index dd527e807f..cb4a52dbe9 100644
--- a/synapse/replication/http/__init__.py
+++ b/synapse/replication/http/__init__.py
@@ -21,6 +21,7 @@ from synapse.replication.http import (
     login,
     membership,
     presence,
+    push,
     register,
     send_event,
     streams,
@@ -42,6 +43,7 @@ class ReplicationRestResource(JsonResource):
         membership.register_servlets(hs, self)
         streams.register_servlets(hs, self)
         account_data.register_servlets(hs, self)
+        push.register_servlets(hs, self)
 
         # The following can't currently be instantiated on workers.
         if hs.config.worker.worker_app is None:
diff --git a/synapse/replication/http/push.py b/synapse/replication/http/push.py
new file mode 100644
index 0000000000..054ed64d34
--- /dev/null
+++ b/synapse/replication/http/push.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import TYPE_CHECKING
+
+from synapse.http.servlet import parse_json_object_from_request
+from synapse.replication.http._base import ReplicationEndpoint
+
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class ReplicationRemovePusherRestServlet(ReplicationEndpoint):
+    """Deletes the given pusher.
+
+    Request format:
+
+        POST /_synapse/replication/remove_pusher/:user_id
+
+        {
+            "app_id": "<some_id>",
+            "pushkey": "<some_key>"
+        }
+
+    """
+
+    NAME = "add_user_account_data"
+    PATH_ARGS = ("user_id",)
+    CACHE = False
+
+    def __init__(self, hs: "HomeServer"):
+        super().__init__(hs)
+
+        self.pusher_pool = hs.get_pusherpool()
+
+    @staticmethod
+    async def _serialize_payload(app_id, pushkey, user_id):
+        payload = {
+            "app_id": app_id,
+            "pushkey": pushkey,
+        }
+
+        return payload
+
+    async def _handle_request(self, request, user_id):
+        content = parse_json_object_from_request(request)
+
+        app_id = content["app_id"]
+        pushkey = content["pushkey"]
+
+        await self.pusher_pool.remove_pusher(app_id, pushkey, user_id)
+
+        return 200, {}
+
+
+def register_servlets(hs, http_server):
+    ReplicationRemovePusherRestServlet(hs).register(http_server)
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index 0a9da79c32..bb447f75b4 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -325,31 +325,6 @@ class FederationAckCommand(Command):
         return "%s %s" % (self.instance_name, self.token)
 
 
-class RemovePusherCommand(Command):
-    """Sent by the client to request the master remove the given pusher.
-
-    Format::
-
-        REMOVE_PUSHER <app_id> <push_key> <user_id>
-    """
-
-    NAME = "REMOVE_PUSHER"
-
-    def __init__(self, app_id, push_key, user_id):
-        self.user_id = user_id
-        self.app_id = app_id
-        self.push_key = push_key
-
-    @classmethod
-    def from_line(cls, line):
-        app_id, push_key, user_id = line.split(" ", 2)
-
-        return cls(app_id, push_key, user_id)
-
-    def to_line(self):
-        return " ".join((self.app_id, self.push_key, self.user_id))
-
-
 class UserIpCommand(Command):
     """Sent periodically when a worker sees activity from a client.
 
@@ -416,7 +391,6 @@ _COMMANDS = (
     ReplicateCommand,
     UserSyncCommand,
     FederationAckCommand,
-    RemovePusherCommand,
     UserIpCommand,
     RemoteServerUpCommand,
     ClearUserSyncsCommand,
@@ -443,7 +417,6 @@ VALID_CLIENT_COMMANDS = (
     UserSyncCommand.NAME,
     ClearUserSyncsCommand.NAME,
     FederationAckCommand.NAME,
-    RemovePusherCommand.NAME,
     UserIpCommand.NAME,
     ErrorCommand.NAME,
     RemoteServerUpCommand.NAME,
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index d1d00c3717..a7245da152 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -44,7 +44,6 @@ from synapse.replication.tcp.commands import (
     PositionCommand,
     RdataCommand,
     RemoteServerUpCommand,
-    RemovePusherCommand,
     ReplicateCommand,
     UserIpCommand,
     UserSyncCommand,
@@ -373,23 +372,6 @@ class ReplicationCommandHandler:
         if self._federation_sender:
             self._federation_sender.federation_ack(cmd.instance_name, cmd.token)
 
-    def on_REMOVE_PUSHER(
-        self, conn: AbstractConnection, cmd: RemovePusherCommand
-    ) -> Optional[Awaitable[None]]:
-        remove_pusher_counter.inc()
-
-        if self._is_master:
-            return self._handle_remove_pusher(cmd)
-        else:
-            return None
-
-    async def _handle_remove_pusher(self, cmd: RemovePusherCommand):
-        await self._store.delete_pusher_by_app_id_pushkey_user_id(
-            app_id=cmd.app_id, pushkey=cmd.push_key, user_id=cmd.user_id
-        )
-
-        self._notifier.on_new_replication_data()
-
     def on_USER_IP(
         self, conn: AbstractConnection, cmd: UserIpCommand
     ) -> Optional[Awaitable[None]]:
@@ -684,11 +666,6 @@ class ReplicationCommandHandler:
             UserSyncCommand(instance_id, user_id, is_syncing, last_sync_ms)
         )
 
-    def send_remove_pusher(self, app_id: str, push_key: str, user_id: str):
-        """Poke the master to remove a pusher for a user"""
-        cmd = RemovePusherCommand(app_id, push_key, user_id)
-        self.send_command(cmd)
-
     def send_user_ip(
         self,
         user_id: str,
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 998a0ef671..9c701c7348 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -35,6 +35,7 @@ from synapse.rest.admin._base import (
     assert_user_is_admin,
 )
 from synapse.rest.client.v2_alpha._base import client_patterns
+from synapse.storage.databases.main.media_repository import MediaSortOrder
 from synapse.types import JsonDict, UserID
 
 if TYPE_CHECKING:
@@ -832,8 +833,33 @@ class UserMediaRestServlet(RestServlet):
                 errcode=Codes.INVALID_PARAM,
             )
 
+        # If neither `order_by` nor `dir` is set, set the default order
+        # to newest media is on top for backward compatibility.
+        if b"order_by" not in request.args and b"dir" not in request.args:
+            order_by = MediaSortOrder.CREATED_TS.value
+            direction = "b"
+        else:
+            order_by = parse_string(
+                request,
+                "order_by",
+                default=MediaSortOrder.CREATED_TS.value,
+                allowed_values=(
+                    MediaSortOrder.MEDIA_ID.value,
+                    MediaSortOrder.UPLOAD_NAME.value,
+                    MediaSortOrder.CREATED_TS.value,
+                    MediaSortOrder.LAST_ACCESS_TS.value,
+                    MediaSortOrder.MEDIA_LENGTH.value,
+                    MediaSortOrder.MEDIA_TYPE.value,
+                    MediaSortOrder.QUARANTINED_BY.value,
+                    MediaSortOrder.SAFE_FROM_QUARANTINE.value,
+                ),
+            )
+            direction = parse_string(
+                request, "dir", default="f", allowed_values=("f", "b")
+            )
+
         media, total = await self.store.get_local_media_by_user_paginate(
-            start, limit, user_id
+            start, limit, user_id, order_by, direction
         )
 
         ret = {"media": media, "total": total}
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 6e2fbedd99..925edfc402 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -20,6 +20,7 @@ from synapse.api.errors import Codes, LoginError, SynapseError
 from synapse.api.ratelimiting import Ratelimiter
 from synapse.appservice import ApplicationService
 from synapse.handlers.sso import SsoIdentityProvider
+from synapse.http import get_request_uri
 from synapse.http.server import HttpServer, finish_request
 from synapse.http.servlet import (
     RestServlet,
@@ -354,6 +355,7 @@ class SsoRedirectServlet(RestServlet):
             hs.get_oidc_handler()
         self._sso_handler = hs.get_sso_handler()
         self._msc2858_enabled = hs.config.experimental.msc2858_enabled
+        self._public_baseurl = hs.config.public_baseurl
 
     def register(self, http_server: HttpServer) -> None:
         super().register(http_server)
@@ -373,6 +375,32 @@ class SsoRedirectServlet(RestServlet):
     async def on_GET(
         self, request: SynapseRequest, idp_id: Optional[str] = None
     ) -> None:
+        if not self._public_baseurl:
+            raise SynapseError(400, "SSO requires a valid public_baseurl")
+
+        # if this isn't the expected hostname, redirect to the right one, so that we
+        # get our cookies back.
+        requested_uri = get_request_uri(request)
+        baseurl_bytes = self._public_baseurl.encode("utf-8")
+        if not requested_uri.startswith(baseurl_bytes):
+            # swap out the incorrect base URL for the right one.
+            #
+            # The idea here is to redirect from
+            #    https://foo.bar/whatever/_matrix/...
+            # to
+            #    https://public.baseurl/_matrix/...
+            #
+            i = requested_uri.index(b"/_matrix")
+            new_uri = baseurl_bytes[:-1] + requested_uri[i:]
+            logger.info(
+                "Requested URI %s is not canonical: redirecting to %s",
+                requested_uri.decode("utf-8", errors="replace"),
+                new_uri.decode("utf-8", errors="replace"),
+            )
+            request.redirect(new_uri)
+            finish_request(request)
+            return
+
         client_redirect_url = parse_string(
             request, "redirectUrl", required=True, encoding=None
         )
diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py
index 8588b6d271..9eeb970580 100644
--- a/synapse/rest/synapse/client/__init__.py
+++ b/synapse/rest/synapse/client/__init__.py
@@ -54,7 +54,12 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc
     if hs.config.saml2_enabled:
         from synapse.rest.synapse.client.saml2 import SAML2Resource
 
-        resources["/_synapse/client/saml2"] = SAML2Resource(hs)
+        res = SAML2Resource(hs)
+        resources["/_synapse/client/saml2"] = res
+
+        # This is also mounted under '/_matrix' for backwards-compatibility.
+        # To be removed in Synapse v1.32.0.
+        resources["/_matrix/saml2"] = res
 
     return resources
 
diff --git a/synapse/server.py b/synapse/server.py
index 6b3892e3cd..4b9ec7f0ae 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -248,7 +248,7 @@ class HomeServer(metaclass=abc.ABCMeta):
         self.start_time = None  # type: Optional[int]
 
         self._instance_id = random_string(5)
-        self._instance_name = config.worker_name or "master"
+        self._instance_name = config.worker.instance_name
 
         self.version_string = version_string
 
@@ -758,12 +758,6 @@ class HomeServer(metaclass=abc.ABCMeta):
             reconnect=True,
         )
 
-    async def remove_pusher(self, app_id: str, push_key: str, user_id: str):
-        return await self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
-
     def should_send_federation(self) -> bool:
         "Should this server be sending federation traffic directly?"
-        return self.config.send_federation and (
-            not self.config.worker_app
-            or self.config.worker_app == "synapse.app.federation_sender"
-        )
+        return self.config.send_federation
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 4646926449..f1ba529a2d 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -49,7 +49,6 @@ from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.background_updates import BackgroundUpdater
 from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
 from synapse.storage.types import Connection, Cursor
-from synapse.storage.util.sequence import build_sequence_generator
 from synapse.types import Collection
 
 # python 3 does not have a maximum int value
@@ -381,7 +380,10 @@ class DatabasePool:
     _TXN_ID = 0
 
     def __init__(
-        self, hs, database_config: DatabaseConnectionConfig, engine: BaseDatabaseEngine
+        self,
+        hs,
+        database_config: DatabaseConnectionConfig,
+        engine: BaseDatabaseEngine,
     ):
         self.hs = hs
         self._clock = hs.get_clock()
@@ -420,16 +422,6 @@ class DatabasePool:
                 self._check_safe_to_upsert,
             )
 
-        # We define this sequence here so that it can be referenced from both
-        # the DataStore and PersistEventStore.
-        def get_chain_id_txn(txn):
-            txn.execute("SELECT COALESCE(max(chain_id), 0) FROM event_auth_chains")
-            return txn.fetchone()[0]
-
-        self.event_chain_id_gen = build_sequence_generator(
-            engine, get_chain_id_txn, "event_auth_chain_id"
-        )
-
     def is_running(self) -> bool:
         """Is the database pool currently running"""
         return self._db_pool.running
diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py
index e84f8b42f7..379c78bb83 100644
--- a/synapse/storage/databases/__init__.py
+++ b/synapse/storage/databases/__init__.py
@@ -79,7 +79,7 @@ class Databases:
                     # If we're on a process that can persist events also
                     # instantiate a `PersistEventsStore`
                     if hs.get_instance_name() in hs.config.worker.writers.events:
-                        persist_events = PersistEventsStore(hs, database, main)
+                        persist_events = PersistEventsStore(hs, database, main, db_conn)
 
                 if "state" in database_config.databases:
                     logger.info(
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 287606cb4f..cd1ceac50e 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -42,7 +42,9 @@ from synapse.logging.utils import log_function
 from synapse.storage._base import db_to_json, make_in_list_sql_clause
 from synapse.storage.database import DatabasePool, LoggingTransaction
 from synapse.storage.databases.main.search import SearchEntry
+from synapse.storage.types import Connection
 from synapse.storage.util.id_generators import MultiWriterIdGenerator
+from synapse.storage.util.sequence import SequenceGenerator
 from synapse.types import StateMap, get_domain_from_id
 from synapse.util import json_encoder
 from synapse.util.iterutils import batch_iter, sorted_topologically
@@ -90,7 +92,11 @@ class PersistEventsStore:
     """
 
     def __init__(
-        self, hs: "HomeServer", db: DatabasePool, main_data_store: "DataStore"
+        self,
+        hs: "HomeServer",
+        db: DatabasePool,
+        main_data_store: "DataStore",
+        db_conn: Connection,
     ):
         self.hs = hs
         self.db_pool = db
@@ -474,6 +480,7 @@ class PersistEventsStore:
         self._add_chain_cover_index(
             txn,
             self.db_pool,
+            self.store.event_chain_id_gen,
             event_to_room_id,
             event_to_types,
             event_to_auth_chain,
@@ -484,6 +491,7 @@ class PersistEventsStore:
         cls,
         txn,
         db_pool: DatabasePool,
+        event_chain_id_gen: SequenceGenerator,
         event_to_room_id: Dict[str, str],
         event_to_types: Dict[str, Tuple[str, str]],
         event_to_auth_chain: Dict[str, List[str]],
@@ -630,6 +638,7 @@ class PersistEventsStore:
         new_chain_tuples = cls._allocate_chain_ids(
             txn,
             db_pool,
+            event_chain_id_gen,
             event_to_room_id,
             event_to_types,
             event_to_auth_chain,
@@ -768,6 +777,7 @@ class PersistEventsStore:
     def _allocate_chain_ids(
         txn,
         db_pool: DatabasePool,
+        event_chain_id_gen: SequenceGenerator,
         event_to_room_id: Dict[str, str],
         event_to_types: Dict[str, Tuple[str, str]],
         event_to_auth_chain: Dict[str, List[str]],
@@ -880,7 +890,7 @@ class PersistEventsStore:
             chain_to_max_seq_no[new_chain_tuple[0]] = new_chain_tuple[1]
 
         # Generate new chain IDs for all unallocated chain IDs.
-        newly_allocated_chain_ids = db_pool.event_chain_id_gen.get_next_mult_txn(
+        newly_allocated_chain_ids = event_chain_id_gen.get_next_mult_txn(
             txn, len(unallocated_chain_ids)
         )
 
diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py
index 89274e75f7..c1626ccf28 100644
--- a/synapse/storage/databases/main/events_bg_updates.py
+++ b/synapse/storage/databases/main/events_bg_updates.py
@@ -917,6 +917,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
         PersistEventsStore._add_chain_cover_index(
             txn,
             self.db_pool,
+            self.event_chain_id_gen,
             event_to_room_id,
             event_to_types,
             event_to_auth_chain,
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index c8850a4707..edbe42f2bf 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -45,6 +45,7 @@ from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_cla
 from synapse.storage.database import DatabasePool
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
+from synapse.storage.util.sequence import build_sequence_generator
 from synapse.types import Collection, JsonDict, get_domain_from_id
 from synapse.util.caches.descriptors import cached
 from synapse.util.caches.lrucache import LruCache
@@ -156,6 +157,21 @@ class EventsWorkerStore(SQLBaseStore):
         self._event_fetch_list = []
         self._event_fetch_ongoing = 0
 
+        # We define this sequence here so that it can be referenced from both
+        # the DataStore and PersistEventStore.
+        def get_chain_id_txn(txn):
+            txn.execute("SELECT COALESCE(max(chain_id), 0) FROM event_auth_chains")
+            return txn.fetchone()[0]
+
+        self.event_chain_id_gen = build_sequence_generator(
+            db_conn,
+            database.engine,
+            get_chain_id_txn,
+            "event_auth_chain_id",
+            table="event_auth_chains",
+            id_column="chain_id",
+        )
+
     def process_replication_rows(self, stream_name, instance_name, token, rows):
         if stream_name == EventsStream.NAME:
             self._stream_id_gen.advance(instance_name, token)
diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py
index 9ee642c668..274f8de595 100644
--- a/synapse/storage/databases/main/media_repository.py
+++ b/synapse/storage/databases/main/media_repository.py
@@ -13,6 +13,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from enum import Enum
 from typing import Any, Dict, Iterable, List, Optional, Tuple
 
 from synapse.storage._base import SQLBaseStore
@@ -23,6 +24,22 @@ BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD = (
 )
 
 
+class MediaSortOrder(Enum):
+    """
+    Enum to define the sorting method used when returning media with
+    get_local_media_by_user_paginate
+    """
+
+    MEDIA_ID = "media_id"
+    UPLOAD_NAME = "upload_name"
+    CREATED_TS = "created_ts"
+    LAST_ACCESS_TS = "last_access_ts"
+    MEDIA_LENGTH = "media_length"
+    MEDIA_TYPE = "media_type"
+    QUARANTINED_BY = "quarantined_by"
+    SAFE_FROM_QUARANTINE = "safe_from_quarantine"
+
+
 class MediaRepositoryBackgroundUpdateStore(SQLBaseStore):
     def __init__(self, database: DatabasePool, db_conn, hs):
         super().__init__(database, db_conn, hs)
@@ -118,7 +135,12 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         )
 
     async def get_local_media_by_user_paginate(
-        self, start: int, limit: int, user_id: str
+        self,
+        start: int,
+        limit: int,
+        user_id: str,
+        order_by: MediaSortOrder = MediaSortOrder.CREATED_TS.value,
+        direction: str = "f",
     ) -> Tuple[List[Dict[str, Any]], int]:
         """Get a paginated list of metadata for a local piece of media
         which an user_id has uploaded
@@ -127,6 +149,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
             start: offset in the list
             limit: maximum amount of media_ids to retrieve
             user_id: fully-qualified user id
+            order_by: the sort order of the returned list
+            direction: sort ascending or descending
         Returns:
             A paginated list of all metadata of user's media,
             plus the total count of all the user's media
@@ -134,6 +158,14 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
 
         def get_local_media_by_user_paginate_txn(txn):
 
+            # Set ordering
+            order_by_column = MediaSortOrder(order_by).value
+
+            if direction == "b":
+                order = "DESC"
+            else:
+                order = "ASC"
+
             args = [user_id]
             sql = """
                 SELECT COUNT(*) as total_media
@@ -155,9 +187,12 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
                     "safe_from_quarantine"
                 FROM local_media_repository
                 WHERE user_id = ?
-                ORDER BY created_ts DESC, media_id DESC
+                ORDER BY {order_by_column} {order}, media_id ASC
                 LIMIT ? OFFSET ?
-            """
+            """.format(
+                order_by_column=order_by_column,
+                order=order,
+            )
 
             args += [limit, start]
             txn.execute(sql, args)
diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py
index 7cb69dd6bd..74219cb05e 100644
--- a/synapse/storage/databases/main/pusher.py
+++ b/synapse/storage/databases/main/pusher.py
@@ -373,3 +373,46 @@ class PusherStore(PusherWorkerStore):
             await self.db_pool.runInteraction(
                 "delete_pusher", delete_pusher_txn, stream_id
             )
+
+    async def delete_all_pushers_for_user(self, user_id: str) -> None:
+        """Delete all pushers associated with an account."""
+
+        # We want to generate a row in `deleted_pushers` for each pusher we're
+        # deleting, so we fetch the list now so we can generate the appropriate
+        # number of stream IDs.
+        #
+        # Note: technically there could be a race here between adding/deleting
+        # pushers, but a) the worst case if we don't stop a pusher until the
+        # next restart and b) this is only called when we're deactivating an
+        # account.
+        pushers = list(await self.get_pushers_by_user_id(user_id))
+
+        def delete_pushers_txn(txn, stream_ids):
+            self._invalidate_cache_and_stream(  # type: ignore
+                txn, self.get_if_user_has_pusher, (user_id,)
+            )
+
+            self.db_pool.simple_delete_txn(
+                txn,
+                table="pushers",
+                keyvalues={"user_name": user_id},
+            )
+
+            self.db_pool.simple_insert_many_txn(
+                txn,
+                table="deleted_pushers",
+                values=[
+                    {
+                        "stream_id": stream_id,
+                        "app_id": pusher.app_id,
+                        "pushkey": pusher.pushkey,
+                        "user_id": user_id,
+                    }
+                    for stream_id, pusher in zip(stream_ids, pushers)
+                ],
+            )
+
+        async with self._pushers_id_gen.get_next_mult(len(pushers)) as stream_ids:
+            await self.db_pool.runInteraction(
+                "delete_all_pushers_for_user", delete_pushers_txn, stream_ids
+            )
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index d5b5507815..61a7556e56 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -23,7 +23,7 @@ import attr
 from synapse.api.constants import UserTypes
 from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError
 from synapse.metrics.background_process_metrics import wrap_as_background_process
-from synapse.storage.database import DatabasePool
+from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
 from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
 from synapse.storage.databases.main.stats import StatsStore
 from synapse.storage.types import Connection, Cursor
@@ -70,7 +70,12 @@ class TokenLookupResult:
 
 
 class RegistrationWorkerStore(CacheInvalidationWorkerStore):
-    def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"):
+    def __init__(
+        self,
+        database: DatabasePool,
+        db_conn: LoggingDatabaseConnection,
+        hs: "HomeServer",
+    ):
         super().__init__(database, db_conn, hs)
 
         self.config = hs.config
@@ -79,9 +84,12 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
         # call `find_max_generated_user_id_localpart` each time, which is
         # expensive if there are many entries.
         self._user_id_seq = build_sequence_generator(
+            db_conn,
             database.engine,
             find_max_generated_user_id_localpart,
             "user_id_seq",
+            table=None,
+            id_column=None,
         )
 
         self._account_validity = hs.config.account_validity
@@ -1036,7 +1044,12 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
 
 
 class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
-    def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"):
+    def __init__(
+        self,
+        database: DatabasePool,
+        db_conn: LoggingDatabaseConnection,
+        hs: "HomeServer",
+    ):
         super().__init__(database, db_conn, hs)
 
         self._clock = hs.get_clock()
diff --git a/synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql b/synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql
new file mode 100644
index 0000000000..20ba4abca3
--- /dev/null
+++ b/synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql
@@ -0,0 +1,21 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- We may not have deleted all pushers for deactivated accounts. Do so now.
+--
+-- Note: We don't bother updating the `deleted_pushers` table as it's just use
+-- to stop pushers on workers, and that will happen when they get next restarted.
+DELETE FROM pushers WHERE user_name IN (SELECT name FROM users WHERE deactivated = 1);
diff --git a/synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql b/synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql
new file mode 100644
index 0000000000..2442eea6bc
--- /dev/null
+++ b/synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql
@@ -0,0 +1,19 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- Delete all pushers associated with deleted devices. This is to clear up after
+-- a bug where they weren't correctly deleted when using workers.
+DELETE FROM pushers WHERE access_token NOT IN (SELECT id FROM access_tokens);
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 02ee15676c..1026f321e5 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -497,8 +497,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
     async def add_users_in_public_rooms(
         self, room_id: str, user_ids: Iterable[str]
     ) -> None:
-        """Insert entries into the users_who_share_private_rooms table. The first
-        user should be a local user.
+        """Insert entries into the users_in_public_rooms table.
 
         Args:
             room_id
@@ -670,7 +669,6 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
         users.update(rows)
         return list(users)
 
-    @cached()
     async def get_shared_rooms_for_users(
         self, user_id: str, other_user_id: str
     ) -> Set[str]:
diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py
index b16b9905d8..e2240703a7 100644
--- a/synapse/storage/databases/state/store.py
+++ b/synapse/storage/databases/state/store.py
@@ -97,10 +97,12 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             return txn.fetchone()[0]
 
         self._state_group_seq_gen = build_sequence_generator(
-            self.database_engine, get_max_state_group_txn, "state_group_id_seq"
-        )
-        self._state_group_seq_gen.check_consistency(
-            db_conn, table="state_groups", id_column="id"
+            db_conn,
+            self.database_engine,
+            get_max_state_group_txn,
+            "state_group_id_seq",
+            table="state_groups",
+            id_column="id",
         )
 
     @cached(max_entries=10000, iterable=True)
diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py
index 3ea637b281..36a67e7019 100644
--- a/synapse/storage/util/sequence.py
+++ b/synapse/storage/util/sequence.py
@@ -251,9 +251,14 @@ class LocalSequenceGenerator(SequenceGenerator):
 
 
 def build_sequence_generator(
+    db_conn: "LoggingDatabaseConnection",
     database_engine: BaseDatabaseEngine,
     get_first_callback: GetFirstCallbackType,
     sequence_name: str,
+    table: Optional[str],
+    id_column: Optional[str],
+    stream_name: Optional[str] = None,
+    positive: bool = True,
 ) -> SequenceGenerator:
     """Get the best impl of SequenceGenerator available
 
@@ -265,8 +270,23 @@ def build_sequence_generator(
         get_first_callback: a callback which gets the next sequence ID. Used if
             we're on sqlite.
         sequence_name: the name of a postgres sequence to use.
+        table, id_column, stream_name, positive: If set then `check_consistency`
+            is called on the created sequence. See docstring for
+            `check_consistency` details.
     """
     if isinstance(database_engine, PostgresEngine):
-        return PostgresSequenceGenerator(sequence_name)
+        seq = PostgresSequenceGenerator(sequence_name)  # type: SequenceGenerator
     else:
-        return LocalSequenceGenerator(get_first_callback)
+        seq = LocalSequenceGenerator(get_first_callback)
+
+    if table:
+        assert id_column
+        seq.check_consistency(
+            db_conn=db_conn,
+            table=table,
+            id_column=id_column,
+            stream_name=stream_name,
+            positive=positive,
+        )
+
+    return seq
diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py
index 32228f42ee..53f85195a7 100644
--- a/synapse/util/caches/response_cache.py
+++ b/synapse/util/caches/response_cache.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Optional, TypeVar
+from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Optional, Set, TypeVar
 
 from twisted.internet import defer
 
@@ -40,6 +40,7 @@ class ResponseCache(Generic[T]):
     def __init__(self, hs: "HomeServer", name: str, timeout_ms: float = 0):
         # Requests that haven't finished yet.
         self.pending_result_cache = {}  # type: Dict[T, ObservableDeferred]
+        self.pending_conditionals = {}  # type: Dict[T, Set[Callable[[Any], bool]]]
 
         self.clock = hs.get_clock()
         self.timeout_sec = timeout_ms / 1000.0
@@ -101,7 +102,11 @@ class ResponseCache(Generic[T]):
         self.pending_result_cache[key] = result
 
         def remove(r):
-            if self.timeout_sec:
+            should_cache = all(
+                func(r) for func in self.pending_conditionals.pop(key, [])
+            )
+
+            if self.timeout_sec and should_cache:
                 self.clock.call_later(
                     self.timeout_sec, self.pending_result_cache.pop, key, None
                 )
@@ -112,6 +117,31 @@ class ResponseCache(Generic[T]):
         result.addBoth(remove)
         return result.observe()
 
+    def add_conditional(self, key: T, conditional: Callable[[Any], bool]):
+        self.pending_conditionals.setdefault(key, set()).add(conditional)
+
+    def wrap_conditional(
+        self,
+        key: T,
+        should_cache: Callable[[Any], bool],
+        callback: "Callable[..., Any]",
+        *args: Any,
+        **kwargs: Any
+    ) -> defer.Deferred:
+        """The same as wrap(), but adds a conditional to the final execution.
+
+        When the final execution completes, *all* conditionals need to return True for it to properly cache,
+        else it'll not be cached in a timed fashion.
+        """
+
+        # See if there's already a result on this key that hasn't yet completed. Due to the single-threaded nature of
+        # python, adding a key immediately in the same execution thread will not cause a race condition.
+        result = self.get(key)
+        if not result or isinstance(result, defer.Deferred) and not result.called:
+            self.add_conditional(key, should_cache)
+
+        return self.wrap(key, callback, *args, **kwargs)
+
     def wrap(
         self, key: T, callback: "Callable[..., Any]", *args: Any, **kwargs: Any
     ) -> defer.Deferred: