summary refs log tree commit diff
diff options
context:
space:
mode:
-rwxr-xr-x.ci/scripts/test_old_deps.sh10
-rw-r--r--.github/workflows/docker.yml14
-rw-r--r--.github/workflows/tests.yml36
-rw-r--r--.github/workflows/twisted_trunk.yml2
-rw-r--r--CHANGES.md60
-rw-r--r--changelog.d/11215.feature1
-rw-r--r--changelog.d/11615.misc1
-rw-r--r--changelog.d/11616.misc1
-rw-r--r--changelog.d/11655.feature1
-rw-r--r--changelog.d/11832.misc1
-rw-r--r--changelog.d/11859.doc1
-rw-r--r--changelog.d/11868.feature1
-rw-r--r--changelog.d/11869.misc1
-rw-r--r--changelog.d/11870.misc1
-rw-r--r--changelog.d/11876.misc1
-rw-r--r--changelog.d/11878.misc1
-rw-r--r--changelog.d/11880.misc1
-rw-r--r--changelog.d/11884.misc1
-rw-r--r--changelog.d/11888.misc1
-rw-r--r--changelog.d/11890.bugfix1
-rw-r--r--changelog.d/11892.feature1
-rw-r--r--changelog.d/11895.removal1
-rw-r--r--debian/changelog6
-rw-r--r--docker/Dockerfile-pgtests2
-rwxr-xr-xdocker/run_pg_tests.sh2
-rw-r--r--docs/MSC1711_certificates_FAQ.md21
-rw-r--r--docs/admin_api/account_validity.md3
-rw-r--r--docs/admin_api/delete_group.md6
-rw-r--r--docs/admin_api/event_reports.md7
-rw-r--r--docs/admin_api/media_admin_api.md25
-rw-r--r--docs/admin_api/purge_history_api.md9
-rw-r--r--docs/admin_api/room_membership.md6
-rw-r--r--docs/admin_api/rooms.md21
-rw-r--r--docs/admin_api/statistics.md6
-rw-r--r--docs/admin_api/user_admin_api.md81
-rw-r--r--docs/admin_api/version_api.md2
-rw-r--r--docs/development/contributing_guide.md3
-rw-r--r--docs/development/database_schema.md54
-rw-r--r--docs/modules/password_auth_provider_callbacks.md62
-rw-r--r--docs/sample_config.yaml41
-rw-r--r--docs/setup/installation.md2
-rw-r--r--docs/upgrade.md13
-rw-r--r--docs/usage/administration/admin_api/federation.md100
-rw-r--r--mypy.ini3
-rwxr-xr-xscripts/synapse_port_db4
-rwxr-xr-xsetup.py2
-rw-r--r--synapse/__init__.py6
-rw-r--r--synapse/api/urls.py1
-rw-r--r--synapse/app/_base.py14
-rw-r--r--synapse/app/homeserver.py34
-rw-r--r--synapse/appservice/__init__.py3
-rw-r--r--synapse/appservice/api.py29
-rw-r--r--synapse/appservice/scheduler.py97
-rw-r--r--synapse/config/experimental.py9
-rw-r--r--synapse/config/modules.py10
-rw-r--r--synapse/config/ratelimiting.py15
-rw-r--r--synapse/config/registration.py12
-rw-r--r--synapse/config/server.py88
-rw-r--r--synapse/events/__init__.py13
-rw-r--r--synapse/events/snapshot.py2
-rw-r--r--synapse/events/utils.py57
-rw-r--r--synapse/events/validator.py4
-rw-r--r--synapse/federation/transport/server/__init__.py16
-rw-r--r--synapse/federation/transport/server/_base.py81
-rw-r--r--synapse/federation/transport/server/federation.py28
-rw-r--r--synapse/federation/transport/server/groups_local.py8
-rw-r--r--synapse/federation/transport/server/groups_server.py8
-rw-r--r--synapse/handlers/appservice.py136
-rw-r--r--synapse/handlers/auth.py58
-rw-r--r--synapse/handlers/deactivate_account.py3
-rw-r--r--synapse/handlers/profile.py67
-rw-r--r--synapse/handlers/register.py26
-rw-r--r--synapse/handlers/room.py77
-rw-r--r--synapse/handlers/room_member.py15
-rw-r--r--synapse/handlers/search.py45
-rw-r--r--synapse/handlers/sync.py9
-rw-r--r--synapse/http/client.py15
-rw-r--r--synapse/http/site.py5
-rw-r--r--synapse/logging/opentracing.py41
-rw-r--r--synapse/logging/scopecontextmanager.py76
-rw-r--r--synapse/metrics/__init__.py10
-rw-r--r--synapse/module_api/__init__.py48
-rw-r--r--synapse/notifier.py4
-rw-r--r--synapse/push/baserules.py219
-rw-r--r--synapse/push/mailer.py2
-rw-r--r--synapse/python_dependencies.py4
-rw-r--r--synapse/replication/slave/storage/events.py2
-rw-r--r--synapse/rest/admin/__init__.py8
-rw-r--r--synapse/rest/admin/federation.py100
-rw-r--r--synapse/rest/admin/rooms.py39
-rw-r--r--synapse/rest/client/account_data.py2
-rw-r--r--synapse/rest/client/push_rule.py13
-rw-r--r--synapse/rest/client/register.py23
-rw-r--r--synapse/rest/client/room.py39
-rw-r--r--synapse/rest/client/sync.py3
-rw-r--r--synapse/rest/media/v1/preview_html.py31
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py224
-rw-r--r--synapse/storage/_base.py11
-rw-r--r--synapse/storage/database.py11
-rw-r--r--synapse/storage/databases/main/account_data.py137
-rw-r--r--synapse/storage/databases/main/appservice.py26
-rw-r--r--synapse/storage/databases/main/cache.py16
-rw-r--r--synapse/storage/databases/main/deviceinbox.py276
-rw-r--r--synapse/storage/databases/main/event_federation.py2
-rw-r--r--synapse/storage/databases/main/events.py7
-rw-r--r--synapse/storage/databases/main/purge_events.py1
-rw-r--r--synapse/storage/databases/main/push_rule.py20
-rw-r--r--synapse/storage/databases/main/relations.py65
-rw-r--r--synapse/storage/databases/main/signatures.py54
-rw-r--r--synapse/storage/databases/main/stream.py22
-rw-r--r--synapse/storage/databases/main/transactions.py48
-rw-r--r--synapse/storage/engines/_base.py19
-rw-r--r--synapse/storage/engines/postgres.py33
-rw-r--r--synapse/storage/engines/sqlite.py7
-rw-r--r--synapse/storage/prepare_database.py9
-rw-r--r--synapse/storage/schema/__init__.py11
-rw-r--r--synapse/storage/schema/main/delta/67/01drop_public_room_list_stream.sql18
-rw-r--r--synapse/storage/schema/main/delta/68/01event_columns.sql26
-rw-r--r--synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql21
-rw-r--r--synapse/storage/schema/main/delta/68/03_delete_account_data_for_deactivated_accounts.sql20
-rw-r--r--synapse/storage/state.py14
-rw-r--r--synapse/util/caches/deferred_cache.py5
-rw-r--r--synapse/util/caches/descriptors.py8
-rw-r--r--synapse/util/caches/lrucache.py6
-rw-r--r--synapse/visibility.py2
-rw-r--r--tests/appservice/test_appservice.py19
-rw-r--r--tests/appservice/test_scheduler.py109
-rw-r--r--tests/handlers/test_appservice.py281
-rw-r--r--tests/handlers/test_deactivate_account.py325
-rw-r--r--tests/handlers/test_password_providers.py79
-rw-r--r--tests/handlers/test_profile.py94
-rw-r--r--tests/handlers/test_user_directory.py6
-rw-r--r--tests/http/test_webclient.py108
-rw-r--r--tests/logging/test_opentracing.py184
-rw-r--r--tests/replication/slave/storage/test_account_data.py4
-rw-r--r--tests/rest/admin/test_admin.py134
-rw-r--r--tests/rest/admin/test_federation.py357
-rw-r--r--tests/rest/admin/test_room.py1
-rw-r--r--tests/rest/admin/test_user.py262
-rw-r--r--tests/rest/admin/test_username_available.py16
-rw-r--r--tests/rest/client/test_profile.py156
-rw-r--r--tests/rest/client/test_register.py41
-rw-r--r--tests/rest/client/test_relations.py2
-rw-r--r--tests/rest/client/test_room_batch.py2
-rw-r--r--tests/rest/client/utils.py31
-rw-r--r--tests/rest/media/v1/test_html_preview.py (renamed from tests/test_preview.py)34
-rw-r--r--tests/rest/media/v1/test_url_preview.py81
-rw-r--r--tests/server.py2
-rw-r--r--tests/storage/test_appservice.py26
-rw-r--r--tests/storage/test_event_chain.py5
-rw-r--r--tests/storage/test_user_directory.py4
-rw-r--r--tests/unittest.py9
-rw-r--r--tox.ini3
153 files changed, 4273 insertions, 1496 deletions
diff --git a/.ci/scripts/test_old_deps.sh b/.ci/scripts/test_old_deps.sh
index 8b473936f8..54ec3c8b0d 100755
--- a/.ci/scripts/test_old_deps.sh
+++ b/.ci/scripts/test_old_deps.sh
@@ -1,16 +1,18 @@
 #!/usr/bin/env bash
-
-# this script is run by GitHub Actions in a plain `bionic` container; it installs the
+# this script is run by GitHub Actions in a plain `focal` container; it installs the
 # minimal requirements for tox and hands over to the py3-old tox environment.
 
+# Prevent tzdata from asking for user input
+export DEBIAN_FRONTEND=noninteractive
+
 set -ex
 
 apt-get update
-apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
+apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox libjpeg-dev libwebp-dev
 
 export LANG="C.UTF-8"
 
 # Prevent virtualenv from auto-updating pip to an incompatible version
 export VIRTUALENV_NO_DOWNLOAD=1
 
-exec tox -e py3-old,combine
+exec tox -e py3-old
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index 3276d1e122..124b17458f 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -34,6 +34,8 @@ jobs:
           username: ${{ secrets.DOCKERHUB_USERNAME }}
           password: ${{ secrets.DOCKERHUB_TOKEN }}
 
+      # TODO: consider using https://github.com/docker/metadata-action instead of this
+      # custom magic
       - name: Calculate docker image tag
         id: set-tag
         run: |
@@ -53,18 +55,6 @@ jobs:
           esac
           echo "::set-output name=tag::$tag"
 
-        # for release builds, we want to get the amd64 image out asap, so first
-        # we do an amd64-only build, before following up with a multiarch build.
-      - name: Build and push amd64
-        uses: docker/build-push-action@v2
-        if: "${{ startsWith(github.ref, 'refs/tags/v') }}"
-        with:
-          push: true
-          labels: "gitsha1=${{ github.sha }}"
-          tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
-          file: "docker/Dockerfile"
-          platforms: linux/amd64
-
       - name: Build and push all platforms
         uses: docker/build-push-action@v2
         with:
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 4f58069702..e0f80aaaa7 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -141,7 +141,7 @@ jobs:
     steps:
       - uses: actions/checkout@v2
       - name: Test with old deps
-        uses: docker://ubuntu:bionic # For old python and sqlite
+        uses: docker://ubuntu:focal # For old python and sqlite
         with:
           workdir: /github/workspace
           entrypoint: .ci/scripts/test_old_deps.sh
@@ -213,15 +213,15 @@ jobs:
       fail-fast: false
       matrix:
         include:
-          - sytest-tag: bionic
+          - sytest-tag: focal
 
-          - sytest-tag: bionic
+          - sytest-tag: focal
             postgres: postgres
 
           - sytest-tag: testing
             postgres: postgres
 
-          - sytest-tag: bionic
+          - sytest-tag: focal
             postgres: multi-postgres
             workers: workers
 
@@ -323,17 +323,22 @@ jobs:
     if: ${{ !failure() && !cancelled() }}
     needs: linting-done
     runs-on: ubuntu-latest
-    container:
-      # https://github.com/matrix-org/complement/blob/master/dockerfiles/ComplementCIBuildkite.Dockerfile
-      image: matrixdotorg/complement:latest
-      env:
-        CI: true
-      ports:
-        - 8448:8448
-      volumes:
-        - /var/run/docker.sock:/var/run/docker.sock
 
     steps:
+      # The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
+      # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
+      - name: "Set Go Version"
+        run: |
+          # Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
+          echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
+          # Add the Go path to the PATH: We need this so we can call gotestfmt
+          echo "~/go/bin" >> $GITHUB_PATH
+
+      - name: "Install Complement Dependencies"
+        run: |
+          sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
+          go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
+
       - name: Run actions/checkout@v2 for synapse
         uses: actions/checkout@v2
         with:
@@ -376,8 +381,11 @@ jobs:
         working-directory: complement/dockerfiles
 
       # Run Complement
-      - run: set -o pipefail && go test -v -json -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt
+      - run: |
+          set -o pipefail
+          go test -v -json -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt
         shell: bash
+        name: Run Complement Tests
         env:
           COMPLEMENT_BASE_IMAGE: complement-synapse:latest
         working-directory: complement
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index e974ac7aba..fb9d46b7bf 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -25,7 +25,7 @@ jobs:
       - run: sudo apt-get -qq install xmlsec1
       - uses: actions/setup-python@v2
         with:
-          python-version: 3.6
+          python-version: 3.7
       - run: .ci/patch_for_twisted_trunk.sh
       - run: pip install tox
       - run: tox -e py
diff --git a/CHANGES.md b/CHANGES.md
index 37b9e6bb96..36707db03b 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,63 @@
+Synapse 1.52.0rc1 (2022-02-01)
+==============================
+
+Features
+--------
+
+- Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11621](https://github.com/matrix-org/synapse/issues/11621), [\#11788](https://github.com/matrix-org/synapse/issues/11788), [\#11789](https://github.com/matrix-org/synapse/issues/11789))
+- Add an admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639))
+- Add an admin API to get a list of rooms that federate with a given remote homeserver. ([\#11658](https://github.com/matrix-org/synapse/issues/11658))
+- Add a config flag to inhibit M_USER_IN_USE during registration. ([\#11743](https://github.com/matrix-org/synapse/issues/11743))
+- Add a module callback to set username at registration. ([\#11790](https://github.com/matrix-org/synapse/issues/11790))
+- Allow configuring a maximum file size as well as a list of allowed content types for avatars. ([\#11846](https://github.com/matrix-org/synapse/issues/11846))
+
+
+Bugfixes
+--------
+
+- Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612))
+- Fix a long-standing bug when previewing Reddit URLs which do not contain an image. ([\#11767](https://github.com/matrix-org/synapse/issues/11767))
+- Fix a long-standing bug that media streams could cause long-lived connections when generating URL previews. ([\#11784](https://github.com/matrix-org/synapse/issues/11784))
+- Include a `prev_content` field in state events sent to Application Services. Contributed by @totallynotvaishnav. ([\#11798](https://github.com/matrix-org/synapse/issues/11798))
+- Fix a bug introduced in Synapse 0.33.3 causing requests to sometimes log strings such as `HTTPStatus.OK` instead of integer status codes. ([\#11827](https://github.com/matrix-org/synapse/issues/11827))
+
+
+Improved Documentation
+----------------------
+
+- Update pypi installation docs to indicate that we now support Python 3.10. ([\#11820](https://github.com/matrix-org/synapse/issues/11820))
+- Add missing steps to the contribution submission process in the documentation.  Contributed by @sequentialread. ([\#11821](https://github.com/matrix-org/synapse/issues/11821))
+- Remove not needed old table of contents in documentation. ([\#11860](https://github.com/matrix-org/synapse/issues/11860))
+- Consolidate the `access_token` information at the top of each relevant page in the Admin API documentation. ([\#11861](https://github.com/matrix-org/synapse/issues/11861))
+
+
+Deprecations and Removals
+-------------------------
+
+- Drop support for Python 3.6, which is EOL. ([\#11683](https://github.com/matrix-org/synapse/issues/11683))
+- Remove the `experimental_msc1849_support_enabled` flag as the features are now stable. ([\#11843](https://github.com/matrix-org/synapse/issues/11843))
+
+
+Internal Changes
+----------------
+
+- Preparation for database schema simplifications: add `state_key` and `rejection_reason` columns to `events` table. ([\#11792](https://github.com/matrix-org/synapse/issues/11792))
+- Add `FrozenEvent.get_state_key` and use it in a couple of places. ([\#11793](https://github.com/matrix-org/synapse/issues/11793))
+- Preparation for database schema simplifications: stop reading from `event_reference_hashes`. ([\#11794](https://github.com/matrix-org/synapse/issues/11794))
+- Drop unused table `public_room_list_stream`. ([\#11795](https://github.com/matrix-org/synapse/issues/11795))
+- Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. ([\#11799](https://github.com/matrix-org/synapse/issues/11799), [\#11847](https://github.com/matrix-org/synapse/issues/11847))
+- Docker: skip the initial amd64-only build and go straight to multiarch. ([\#11810](https://github.com/matrix-org/synapse/issues/11810))
+- Run Complement on the Github Actions VM and not inside a Docker container. ([\#11811](https://github.com/matrix-org/synapse/issues/11811))
+- Log module names at startup. ([\#11813](https://github.com/matrix-org/synapse/issues/11813))
+- Improve type safety of bundled aggregations code. ([\#11815](https://github.com/matrix-org/synapse/issues/11815))
+- Correct a type annotation in the event validation logic. ([\#11817](https://github.com/matrix-org/synapse/issues/11817), [\#11830](https://github.com/matrix-org/synapse/issues/11830))
+- Minor updates and documentation for database schema delta files. ([\#11823](https://github.com/matrix-org/synapse/issues/11823))
+- Workaround a type annotation problem in `prometheus_client` 0.13.0. ([\#11834](https://github.com/matrix-org/synapse/issues/11834))
+- Minor performance improvement in room state lookup. ([\#11836](https://github.com/matrix-org/synapse/issues/11836))
+- Fix some indentation inconsistencies in the sample config. ([\#11838](https://github.com/matrix-org/synapse/issues/11838))
+- Add type hints to `tests/rest/admin`. ([\#11851](https://github.com/matrix-org/synapse/issues/11851))
+
+
 Synapse 1.51.0 (2022-01-25)
 ===========================
 
diff --git a/changelog.d/11215.feature b/changelog.d/11215.feature
new file mode 100644
index 0000000000..468020834b
--- /dev/null
+++ b/changelog.d/11215.feature
@@ -0,0 +1 @@
+Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). Disabled by default.
diff --git a/changelog.d/11615.misc b/changelog.d/11615.misc
new file mode 100644
index 0000000000..bbc551698d
--- /dev/null
+++ b/changelog.d/11615.misc
@@ -0,0 +1 @@
+Enhance user registration test helpers to make them more useful for tests involving Application Services and devices.
diff --git a/changelog.d/11616.misc b/changelog.d/11616.misc
new file mode 100644
index 0000000000..bbc551698d
--- /dev/null
+++ b/changelog.d/11616.misc
@@ -0,0 +1 @@
+Enhance user registration test helpers to make them more useful for tests involving Application Services and devices.
diff --git a/changelog.d/11655.feature b/changelog.d/11655.feature
new file mode 100644
index 0000000000..dc426fb658
--- /dev/null
+++ b/changelog.d/11655.feature
@@ -0,0 +1 @@
+Remove account data (including client config, push rules and ignored users) upon user deactivation.
\ No newline at end of file
diff --git a/changelog.d/11832.misc b/changelog.d/11832.misc
new file mode 100644
index 0000000000..5ff117d933
--- /dev/null
+++ b/changelog.d/11832.misc
@@ -0,0 +1 @@
+Fix type errors introduced by new annotations in the Prometheus Client library.
\ No newline at end of file
diff --git a/changelog.d/11859.doc b/changelog.d/11859.doc
new file mode 100644
index 0000000000..d903c8ddaf
--- /dev/null
+++ b/changelog.d/11859.doc
@@ -0,0 +1 @@
+Fix typo in User Admin API: unpind -> unbind.
diff --git a/changelog.d/11868.feature b/changelog.d/11868.feature
new file mode 100644
index 0000000000..3723dac4ea
--- /dev/null
+++ b/changelog.d/11868.feature
@@ -0,0 +1 @@
+Allow modules to retrieve the current instance's server name and worker name.
diff --git a/changelog.d/11869.misc b/changelog.d/11869.misc
new file mode 100644
index 0000000000..054fbf6101
--- /dev/null
+++ b/changelog.d/11869.misc
@@ -0,0 +1 @@
+Ensure that `opentracing` scopes are activated and closed at the right time.
diff --git a/changelog.d/11870.misc b/changelog.d/11870.misc
new file mode 100644
index 0000000000..2cb0efdb45
--- /dev/null
+++ b/changelog.d/11870.misc
@@ -0,0 +1 @@
+Improve opentracing for incoming federation requests.
diff --git a/changelog.d/11876.misc b/changelog.d/11876.misc
new file mode 100644
index 0000000000..09f2d0b67f
--- /dev/null
+++ b/changelog.d/11876.misc
@@ -0,0 +1 @@
+Improve internal docstrings in `synapse.util.caches`.
diff --git a/changelog.d/11878.misc b/changelog.d/11878.misc
new file mode 100644
index 0000000000..74915a47dd
--- /dev/null
+++ b/changelog.d/11878.misc
@@ -0,0 +1 @@
+Do not needlessly clear the `get_users_in_room` and `get_users_in_room_with_profiles` caches when any room state changes.
diff --git a/changelog.d/11880.misc b/changelog.d/11880.misc
new file mode 100644
index 0000000000..8125947b2a
--- /dev/null
+++ b/changelog.d/11880.misc
@@ -0,0 +1 @@
+Convert `ApplicationServiceTestCase` to use `simple_async_mock`.
\ No newline at end of file
diff --git a/changelog.d/11884.misc b/changelog.d/11884.misc
new file mode 100644
index 0000000000..d679d6038f
--- /dev/null
+++ b/changelog.d/11884.misc
@@ -0,0 +1 @@
+Remove experimental changes to the default push rules which were introduced in Synapse 1.19.0 but never enabled.
diff --git a/changelog.d/11888.misc b/changelog.d/11888.misc
new file mode 100644
index 0000000000..db1c9b8bbd
--- /dev/null
+++ b/changelog.d/11888.misc
@@ -0,0 +1 @@
+Disable coverage calculation for olddeps build.
diff --git a/changelog.d/11890.bugfix b/changelog.d/11890.bugfix
new file mode 100644
index 0000000000..6b696692e3
--- /dev/null
+++ b/changelog.d/11890.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`.
\ No newline at end of file
diff --git a/changelog.d/11892.feature b/changelog.d/11892.feature
new file mode 100644
index 0000000000..86e21a7f84
--- /dev/null
+++ b/changelog.d/11892.feature
@@ -0,0 +1 @@
+Use a dedicated configurable rate limiter for 3PID invites.
diff --git a/changelog.d/11895.removal b/changelog.d/11895.removal
new file mode 100644
index 0000000000..5973d96a33
--- /dev/null
+++ b/changelog.d/11895.removal
@@ -0,0 +1 @@
+Drop support for `webclient` listeners and configuring `web_client_location` to a non-HTTP(S) URL. Deprecated configurations are a configuration error.
diff --git a/debian/changelog b/debian/changelog
index 3a598c4148..a458885655 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.52.0~rc1) stable; urgency=medium
+
+  * New synapse release 1.52.0~rc1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 01 Feb 2022 11:04:09 +0000
+
 matrix-synapse-py3 (1.51.0) stable; urgency=medium
 
   * New synapse release 1.51.0.
diff --git a/docker/Dockerfile-pgtests b/docker/Dockerfile-pgtests
index 92b804d193..b94484ea7f 100644
--- a/docker/Dockerfile-pgtests
+++ b/docker/Dockerfile-pgtests
@@ -1,6 +1,6 @@
 # Use the Sytest image that comes with a lot of the build dependencies
 # pre-installed
-FROM matrixdotorg/sytest:bionic
+FROM matrixdotorg/sytest:focal
 
 # The Sytest image doesn't come with python, so install that
 RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
diff --git a/docker/run_pg_tests.sh b/docker/run_pg_tests.sh
index 58e2177d34..b22b6ef16b 100755
--- a/docker/run_pg_tests.sh
+++ b/docker/run_pg_tests.sh
@@ -16,4 +16,4 @@ sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/dat
 # Run the tests
 cd /src
 export TRIAL_FLAGS="-j 4"
-tox --workdir=./.tox-pg-container -e py36-postgres "$@"
+tox --workdir=./.tox-pg-container -e py37-postgres "$@"
diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md
index 086899a9d8..32ba15652d 100644
--- a/docs/MSC1711_certificates_FAQ.md
+++ b/docs/MSC1711_certificates_FAQ.md
@@ -44,27 +44,6 @@ For more details and context on the release of the r0.1 Server/Server API and
 imminent Matrix 1.0 release, you can also see our
 [main talk from FOSDEM 2019](https://matrix.org/blog/2019/02/04/matrix-at-fosdem-2019/).
 
-## Contents
-* Timeline
-* Configuring certificates for compatibility with Synapse 1.0
-* FAQ
-  * Synapse 0.99.0 has just been released, what do I need to do right now?
-  * How do I upgrade?
-  * What will happen if I do not set up a valid federation certificate
-    immediately?
-  * What will happen if I do nothing at all?
-  * When do I need a SRV record or .well-known URI?
-  * Can I still use an SRV record?
-  * I have created a .well-known URI. Do I still need an SRV record?
-  * It used to work just fine, why are you breaking everything?
-  * Can I manage my own certificates rather than having Synapse renew
-    certificates itself?
-  * Do you still recommend against using a reverse proxy on the federation port?
-  * Do I still need to give my TLS certificates to Synapse if I am using a
-    reverse proxy?
-  * Do I need the same certificate for the client and federation port?
-  * How do I tell Synapse to reload my keys/certificates after I replace them?
-
 ## Timeline
 
 **5th Feb 2019  - Synapse 0.99.0 is released.**
diff --git a/docs/admin_api/account_validity.md b/docs/admin_api/account_validity.md
index b74b5d0c1a..d878bf7451 100644
--- a/docs/admin_api/account_validity.md
+++ b/docs/admin_api/account_validity.md
@@ -4,6 +4,9 @@ This API allows a server administrator to manage the validity of an account. To
 use it, you must enable the account validity feature (under
 `account_validity`) in Synapse's configuration.
 
+To use it, you will need to authenticate by providing an `access_token`
+for a server admin: see [Admin API](../usage/administration/admin_api).
+
 ## Renew account
 
 This API extends the validity of an account by as much time as configured in the
diff --git a/docs/admin_api/delete_group.md b/docs/admin_api/delete_group.md
index 2e0a1d2474..73a96842ac 100644
--- a/docs/admin_api/delete_group.md
+++ b/docs/admin_api/delete_group.md
@@ -4,11 +4,11 @@ This API lets a server admin delete a local group. Doing so will kick all
 users out of the group so that their clients will correctly handle the group
 being deleted.
 
+To use it, you will need to authenticate by providing an `access_token`
+for a server admin: see [Admin API](../usage/administration/admin_api).
+
 The API is:
 
 ```
 POST /_synapse/admin/v1/delete_group/<group_id>
 ```
-
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [Admin API](../usage/administration/admin_api).
diff --git a/docs/admin_api/event_reports.md b/docs/admin_api/event_reports.md
index f523774ba8..be6f0961bf 100644
--- a/docs/admin_api/event_reports.md
+++ b/docs/admin_api/event_reports.md
@@ -2,12 +2,13 @@
 
 This API returns information about reported events.
 
+To use it, you will need to authenticate by providing an `access_token`
+for a server admin: see [Admin API](../usage/administration/admin_api).
+
 The api is:
 ```
 GET /_synapse/admin/v1/event_reports?from=0&limit=10
 ```
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [Admin API](../usage/administration/admin_api).
 
 It returns a JSON body like the following:
 
@@ -94,8 +95,6 @@ The api is:
 ```
 GET /_synapse/admin/v1/event_reports/<report_id>
 ```
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [Admin API](../usage/administration/admin_api).
 
 It returns a JSON body like the following:
 
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index 60b8bc7379..a8cdf19727 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -1,24 +1,10 @@
-# Contents
-- [Querying media](#querying-media)
-  * [List all media in a room](#list-all-media-in-a-room)
-  * [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
-- [Quarantine media](#quarantine-media)
-  * [Quarantining media by ID](#quarantining-media-by-id)
-  * [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id)
-  * [Quarantining media in a room](#quarantining-media-in-a-room)
-  * [Quarantining all media of a user](#quarantining-all-media-of-a-user)
-  * [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
-  * [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined)
-- [Delete local media](#delete-local-media)
-  * [Delete a specific local media](#delete-a-specific-local-media)
-  * [Delete local media by date or size](#delete-local-media-by-date-or-size)
-  * [Delete media uploaded by a user](#delete-media-uploaded-by-a-user)
-- [Purge Remote Media API](#purge-remote-media-api)
-
 # Querying media
 
 These APIs allow extracting media information from the homeserver.
 
+To use it, you will need to authenticate by providing an `access_token`
+for a server admin: see [Admin API](../usage/administration/admin_api).
+
 ## List all media in a room
 
 This API gets a list of known media in a room.
@@ -28,8 +14,6 @@ The API is:
 ```
 GET /_synapse/admin/v1/room/<room_id>/media
 ```
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [Admin API](../usage/administration/admin_api).
 
 The API returns a JSON body like the following:
 ```json
@@ -317,8 +301,5 @@ The following fields are returned in the JSON response body:
 
 * `deleted`: integer - The number of media items successfully deleted
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [Admin API](../usage/administration/admin_api).
-
 If the user re-requests purged remote media, synapse will re-request the media
 from the originating server.
diff --git a/docs/admin_api/purge_history_api.md b/docs/admin_api/purge_history_api.md
index 277e28d9cb..2527e2758b 100644
--- a/docs/admin_api/purge_history_api.md
+++ b/docs/admin_api/purge_history_api.md
@@ -10,15 +10,15 @@ paginate further back in the room from the point being purged from.
 Note that Synapse requires at least one message in each room, so it will never
 delete the last message in a room.
 
+To use it, you will need to authenticate by providing an `access_token`
+for a server admin: see [Admin API](../usage/administration/admin_api).
+
 The API is:
 
 ```
 POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 By default, events sent by local users are not deleted, as they may represent
 the only copies of this content in existence. (Events sent by remote users are
 deleted.)
@@ -57,9 +57,6 @@ It is possible to poll for updates on recent purges with a second API;
 GET /_synapse/admin/v1/purge_history_status/<purge_id>
 ```
 
-Again, you will need to authenticate by providing an `access_token` for a
-server admin.
-
 This API returns a JSON body like the following:
 
 ```json
diff --git a/docs/admin_api/room_membership.md b/docs/admin_api/room_membership.md
index 548b790a5c..310d6ae628 100644
--- a/docs/admin_api/room_membership.md
+++ b/docs/admin_api/room_membership.md
@@ -5,6 +5,9 @@ to a room with a given `room_id_or_alias`. You can only modify the membership of
 local users. The server administrator must be in the room and have permission to
 invite users.
 
+To use it, you will need to authenticate by providing an `access_token`
+for a server admin: see [Admin API](../usage/administration/admin_api).
+
 ## Parameters
 
 The following parameters are available:
@@ -23,9 +26,6 @@ POST /_synapse/admin/v1/join/<room_id_or_alias>
 }
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [Admin API](../usage/administration/admin_api).
-
 Response:
 
 ```json
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
index 0f1a74134f..d4873f9490 100644
--- a/docs/admin_api/rooms.md
+++ b/docs/admin_api/rooms.md
@@ -1,24 +1,12 @@
-# Contents
-- [List Room API](#list-room-api)
-- [Room Details API](#room-details-api)
-- [Room Members API](#room-members-api)
-- [Room State API](#room-state-api)
-- [Block Room API](#block-room-api)
-- [Delete Room API](#delete-room-api)
-  * [Version 1 (old version)](#version-1-old-version)
-  * [Version 2 (new version)](#version-2-new-version)
-  * [Status of deleting rooms](#status-of-deleting-rooms)
-  * [Undoing room shutdowns](#undoing-room-shutdowns)
-- [Make Room Admin API](#make-room-admin-api)
-- [Forward Extremities Admin API](#forward-extremities-admin-api)
-- [Event Context API](#event-context-api)
-
 # List Room API
 
 The List Room admin API allows server admins to get a list of rooms on their
 server. There are various parameters available that allow for filtering and
 sorting the returned list. This API supports pagination.
 
+To use it, you will need to authenticate by providing an `access_token`
+for a server admin: see [Admin API](../usage/administration/admin_api).
+
 **Parameters**
 
 The following query parameters are available:
@@ -493,9 +481,6 @@ several minutes or longer.
 The local server will only have the power to move local user and room aliases to
 the new room. Users on other servers will be unaffected.
 
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see [Admin API](../usage/administration/admin_api).
-
 ## Version 1 (old version)
 
 This version works synchronously. That means you only get the response once the server has
diff --git a/docs/admin_api/statistics.md b/docs/admin_api/statistics.md
index 1901f1eea0..a26c76f9f3 100644
--- a/docs/admin_api/statistics.md
+++ b/docs/admin_api/statistics.md
@@ -3,15 +3,15 @@
 Returns information about all local media usage of users. Gives the
 possibility to filter them by time and user.
 
+To use it, you will need to authenticate by providing an `access_token`
+for a server admin: see [Admin API](../usage/administration/admin_api).
+
 The API is:
 
 ```
 GET /_synapse/admin/v1/statistics/users/media
 ```
 
-To use it, you will need to authenticate by providing an `access_token`
-for a server admin: see [Admin API](../usage/administration/admin_api).
-
 A response body like the following is returned:
 
 ```json
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index c514cadb9d..995782c6bc 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -1,5 +1,8 @@
 # User Admin API
 
+To use it, you will need to authenticate by providing an `access_token`
+for a server admin: see [Admin API](../usage/administration/admin_api).
+
 ## Query User Account
 
 This API returns information about a specific user account.
@@ -10,9 +13,6 @@ The api is:
 GET /_synapse/admin/v2/users/<user_id>
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 It returns a JSON body like the following:
 
 ```jsonc
@@ -104,9 +104,6 @@ with a body of:
 }
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 Returns HTTP status code:
 - `201` - When a new user object was created.
 - `200` - When a user was modified.
@@ -156,9 +153,6 @@ By default, the response is ordered by ascending user ID.
 GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 A response body like the following is returned:
 
 ```json
@@ -278,9 +272,6 @@ GET /_matrix/client/r0/admin/whois/<userId>
 See also: [Client Server
 API Whois](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid).
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 It returns a JSON body like the following:
 
 ```json
@@ -335,15 +326,12 @@ with a body of:
 }
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 The erase parameter is optional and defaults to `false`.
 An empty body may be passed for backwards compatibility.
 
 The following actions are performed when deactivating an user:
 
-- Try to unpind 3PIDs from the identity server
+- Try to unbind 3PIDs from the identity server
 - Remove all 3PIDs from the homeserver
 - Delete all devices and E2EE keys
 - Delete all access tokens
@@ -353,6 +341,11 @@ The following actions are performed when deactivating an user:
 - Remove the user from the user directory
 - Reject all pending invites
 - Remove all account validity information related to the user
+- Remove the arbitrary data store known as *account data*. For example, this includes:
+    - list of ignored users;
+    - push rules;
+    - secret storage keys; and
+    - cross-signing keys.
 
 The following additional actions are performed during deactivation if `erase`
 is set to `true`:
@@ -366,7 +359,6 @@ The following actions are **NOT** performed. The list may be incomplete.
 - Remove mappings of SSO IDs
 - [Delete media uploaded](#delete-media-uploaded-by-a-user) by user (included avatar images)
 - Delete sent and received messages
-- Delete E2E cross-signing keys
 - Remove the user's creation (registration) timestamp
 - [Remove rate limit overrides](#override-ratelimiting-for-users)
 - Remove from monthly active users
@@ -390,9 +382,6 @@ with a body of:
 }
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 The parameter `new_password` is required.
 The parameter `logout_devices` is optional and defaults to `true`.
 
@@ -405,9 +394,6 @@ The api is:
 GET /_synapse/admin/v1/users/<user_id>/admin
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 A response body like the following is returned:
 
 ```json
@@ -435,10 +421,6 @@ with a body of:
 }
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
-
 ## List room memberships of a user
 
 Gets a list of all `room_id` that a specific `user_id` is member.
@@ -449,9 +431,6 @@ The API is:
 GET /_synapse/admin/v1/users/<user_id>/joined_rooms
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 A response body like the following is returned:
 
 ```json
@@ -570,9 +549,6 @@ The API is:
 GET /_synapse/admin/v1/users/<user_id>/media
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 A response body like the following is returned:
 
 ```json
@@ -687,9 +663,6 @@ The API is:
 DELETE /_synapse/admin/v1/users/<user_id>/media
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 A response body like the following is returned:
 
 ```json
@@ -762,9 +735,6 @@ The API is:
 GET /_synapse/admin/v2/users/<user_id>/devices
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 A response body like the following is returned:
 
 ```json
@@ -830,9 +800,6 @@ POST /_synapse/admin/v2/users/<user_id>/delete_devices
 }
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 An empty JSON dict is returned.
 
 **Parameters**
@@ -854,9 +821,6 @@ The API is:
 GET /_synapse/admin/v2/users/<user_id>/devices/<device_id>
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 A response body like the following is returned:
 
 ```json
@@ -902,9 +866,6 @@ PUT /_synapse/admin/v2/users/<user_id>/devices/<device_id>
 }
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 An empty JSON dict is returned.
 
 **Parameters**
@@ -931,9 +892,6 @@ DELETE /_synapse/admin/v2/users/<user_id>/devices/<device_id>
 {}
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 An empty JSON dict is returned.
 
 **Parameters**
@@ -952,9 +910,6 @@ The API is:
 GET /_synapse/admin/v1/users/<user_id>/pushers
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 A response body like the following is returned:
 
 ```json
@@ -1049,9 +1004,6 @@ To un-shadow-ban a user the API is:
 DELETE /_synapse/admin/v1/users/<user_id>/shadow_ban
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 An empty JSON dict is returned in both cases.
 
 **Parameters**
@@ -1074,9 +1026,6 @@ The API is:
 GET /_synapse/admin/v1/users/<user_id>/override_ratelimit
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 A response body like the following is returned:
 
 ```json
@@ -1116,9 +1065,6 @@ The API is:
 POST /_synapse/admin/v1/users/<user_id>/override_ratelimit
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 A response body like the following is returned:
 
 ```json
@@ -1161,9 +1107,6 @@ The API is:
 DELETE /_synapse/admin/v1/users/<user_id>/override_ratelimit
 ```
 
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
-
 An empty JSON dict is returned.
 
 ```json
@@ -1192,7 +1135,5 @@ The API is:
 GET /_synapse/admin/v1/username_available?username=$localpart
 ```
 
-The request and response format is the same as the [/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
-
-To use it, you will need to authenticate by providing an `access_token` for a
-server admin: [Admin API](../usage/administration/admin_api)
+The request and response format is the same as the
+[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
diff --git a/docs/admin_api/version_api.md b/docs/admin_api/version_api.md
index efb4a0c0f7..27977de0d3 100644
--- a/docs/admin_api/version_api.md
+++ b/docs/admin_api/version_api.md
@@ -16,6 +16,6 @@ It returns a JSON body like the following:
 ```json
 {
     "server_version": "0.99.2rc1 (b=develop, abcdef123)",
-    "python_version": "3.6.8"
+    "python_version": "3.7.8"
 }
 ```
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index c142981693..8448685952 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -55,6 +55,7 @@ setup a *virtualenv*, as follows:
 cd path/where/you/have/cloned/the/repository
 python3 -m venv ./env
 source ./env/bin/activate
+pip install wheel
 pip install -e ".[all,dev]"
 pip install tox
 ```
@@ -116,7 +117,7 @@ The linters look at your code and do two things:
 - ensure that your code follows the coding style adopted by the project;
 - catch a number of errors in your code.
 
-They're pretty fast, don't hesitate!
+The linter takes no time at all to run as soon as you've [downloaded the dependencies into your python virtual environment](#4-install-the-dependencies).
 
 ```sh
 source ./env/bin/activate
diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md
index 256a629210..a767d3af9f 100644
--- a/docs/development/database_schema.md
+++ b/docs/development/database_schema.md
@@ -96,6 +96,60 @@ Ensure postgres is installed, then run:
 NB at the time of writing, this script predates the split into separate `state`/`main`
 databases so will require updates to handle that correctly.
 
+## Delta files
+
+Delta files define the steps required to upgrade the database from an earlier version.
+They can be written as either a file containing a series of SQL statements, or a Python
+module.
+
+Synapse remembers which delta files it has applied to a database (they are stored in the
+`applied_schema_deltas` table) and will not re-apply them (even if a given file is
+subsequently updated).
+
+Delta files should be placed in a directory named `synapse/storage/schema/<database>/delta/<version>/`.
+They are applied in alphanumeric order, so  by convention the first two characters
+of the filename should be an integer such as `01`, to put the file in the right order.
+
+### SQL delta files
+
+These should be named `*.sql`, or —  for changes which should only be applied for a
+given database engine — `*.sql.posgres` or `*.sql.sqlite`. For example, a delta which
+adds a new column to the `foo` table might be called `01add_bar_to_foo.sql`.
+
+Note that our SQL parser is a bit simple - it understands comments (`--` and `/*...*/`),
+but complex statements which require a `;` in the middle of them (such as `CREATE
+TRIGGER`) are beyond it and you'll have to use a Python delta file.
+
+### Python delta files
+
+For more flexibility, a delta file can take the form of a python module. These should
+be named `*.py`. Note that database-engine-specific modules are not supported here –
+instead you can write `if isinstance(database_engine, PostgresEngine)` or similar.
+
+A Python delta module should define either or both of the following functions:
+
+```python
+import synapse.config.homeserver
+import synapse.storage.engines
+import synapse.storage.types
+
+
+def run_create(
+    cur: synapse.storage.types.Cursor,
+    database_engine: synapse.storage.engines.BaseDatabaseEngine,
+) -> None:
+    """Called whenever an existing or new database is to be upgraded"""
+    ...
+
+def run_upgrade(
+    cur: synapse.storage.types.Cursor,
+    database_engine: synapse.storage.engines.BaseDatabaseEngine,
+    config: synapse.config.homeserver.HomeServerConfig,
+) -> None:
+    """Called whenever an existing database is to be upgraded."""
+    ...
+```
+
 ## Boolean columns
 
 Boolean columns require special treatment, since SQLite treats booleans the
diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md
index e53abf6409..ec8324d292 100644
--- a/docs/modules/password_auth_provider_callbacks.md
+++ b/docs/modules/password_auth_provider_callbacks.md
@@ -105,6 +105,68 @@ device ID), and the (now deactivated) access token.
 
 If multiple modules implement this callback, Synapse runs them all in order.
 
+### `get_username_for_registration`
+
+_First introduced in Synapse v1.52.0_
+
+```python
+async def get_username_for_registration(
+    uia_results: Dict[str, Any],
+    params: Dict[str, Any],
+) -> Optional[str]
+```
+
+Called when registering a new user. The module can return a username to set for the user
+being registered by returning it as a string, or `None` if it doesn't wish to force a
+username for this user. If a username is returned, it will be used as the local part of a
+user's full Matrix ID (e.g. it's `alice` in `@alice:example.com`).
+
+This callback is called once [User-Interactive Authentication](https://spec.matrix.org/latest/client-server-api/#user-interactive-authentication-api)
+has been completed by the user. It is not called when registering a user via SSO. It is
+passed two dictionaries, which include the information that the user has provided during
+the registration process.
+
+The first dictionary contains the results of the [User-Interactive Authentication](https://spec.matrix.org/latest/client-server-api/#user-interactive-authentication-api)
+flow followed by the user. Its keys are the identifiers of every step involved in the flow,
+associated with either a boolean value indicating whether the step was correctly completed,
+or additional information (e.g. email address, phone number...). A list of most existing
+identifiers can be found in the [Matrix specification](https://spec.matrix.org/v1.1/client-server-api/#authentication-types).
+Here's an example featuring all currently supported keys:
+
+```python
+{
+    "m.login.dummy": True,  # Dummy authentication
+    "m.login.terms": True,  # User has accepted the terms of service for the homeserver
+    "m.login.recaptcha": True,  # User has completed the recaptcha challenge
+    "m.login.email.identity": {  # User has provided and verified an email address
+        "medium": "email",
+        "address": "alice@example.com",
+        "validated_at": 1642701357084,
+    },
+    "m.login.msisdn": {  # User has provided and verified a phone number
+        "medium": "msisdn",
+        "address": "33123456789",
+        "validated_at": 1642701357084,
+    },
+    "org.matrix.msc3231.login.registration_token": "sometoken",  # User has registered through the flow described in MSC3231
+}
+```
+
+The second dictionary contains the parameters provided by the user's client in the request
+to `/_matrix/client/v3/register`. See the [Matrix specification](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3register)
+for a complete list of these parameters.
+
+If the module cannot, or does not wish to, generate a username for this user, it must
+return `None`.
+
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `None`, Synapse falls through to the next one. The value of the first
+callback that does not return `None` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback. If every callback return `None`,
+the username provided by the user is used, if any (otherwise one is automatically
+generated).
+
+
 ## Example
 
 The example module below implements authentication checkers for two different login types: 
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 1b86d0295d..946cd281d2 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -41,11 +41,11 @@
 # documentation on how to configure or create custom modules for Synapse.
 #
 modules:
-    # - module: my_super_module.MySuperClass
-    #   config:
-    #       do_thing: true
-    # - module: my_other_super_module.SomeClass
-    #   config: {}
+  #- module: my_super_module.MySuperClass
+  #  config:
+  #    do_thing: true
+  #- module: my_other_super_module.SomeClass
+  #  config: {}
 
 
 ## Server ##
@@ -471,6 +471,20 @@ limit_remote_rooms:
 #
 #allow_per_room_profiles: false
 
+# The largest allowed file size for a user avatar. Defaults to no restriction.
+#
+# Note that user avatar changes will not work if this is set without
+# using Synapse's media repository.
+#
+#max_avatar_size: 10M
+
+# The MIME types allowed for user avatars. Defaults to no restriction.
+#
+# Note that user avatar changes will not work if this is set without
+# using Synapse's media repository.
+#
+#allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"]
+
 # How long to keep redacted events in unredacted form in the database. After
 # this period redacted events get replaced with their redacted form in the DB.
 #
@@ -843,6 +857,9 @@ log_config: "CONFDIR/SERVERNAME.log.config"
 #   - one for ratelimiting how often a user or IP can attempt to validate a 3PID.
 #   - two for ratelimiting how often invites can be sent in a room or to a
 #     specific user.
+#   - one for ratelimiting 3PID invites (i.e. invites sent to a third-party ID
+#     such as an email address or a phone number) based on the account that's
+#     sending the invite.
 #
 # The defaults are as shown below.
 #
@@ -892,6 +909,10 @@ log_config: "CONFDIR/SERVERNAME.log.config"
 #  per_user:
 #    per_second: 0.003
 #    burst_count: 5
+#
+#rc_third_party_invite:
+#  per_second: 0.2
+#  burst_count: 10
 
 # Ratelimiting settings for incoming federation
 #
@@ -1428,6 +1449,16 @@ account_threepid_delegates:
 #
 #auto_join_rooms_for_guests: false
 
+# Whether to inhibit errors raised when registering a new account if the user ID
+# already exists. If turned on, that requests to /register/available will always
+# show a user ID as available, and Synapse won't raise an error when starting
+# a registration with a user ID that already exists. However, Synapse will still
+# raise an error if the registration completes and the username conflicts.
+#
+# Defaults to false.
+#
+#inhibit_user_in_use_error: true
+
 
 ## Metrics ###
 
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index fe657a15df..69ade036c3 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -194,7 +194,7 @@ When following this route please make sure that the [Platform-specific prerequis
 System requirements:
 
 - POSIX-compliant system (tested on Linux & OS X)
-- Python 3.7 or later, up to Python 3.9.
+- Python 3.7 or later, up to Python 3.10.
 - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
 
 To install the Synapse homeserver run:
diff --git a/docs/upgrade.md b/docs/upgrade.md
index f455d257ba..7d582af0a7 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -85,6 +85,19 @@ process, for example:
     dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
     ```
 
+# Upgrading to v1.53.0
+
+## Dropping support for `webclient` listeners and non-HTTP(S) `web_client_location`
+
+Per the deprecation notice in Synapse v1.51.0, listeners of type  `webclient`
+are no longer supported and configuring them is a now a configuration error.
+
+Configuring a non-HTTP(S) `web_client_location` configuration is is now a
+configuration error. Since the `webclient` listener is no longer supported, this
+setting only applies to the root path `/` of Synapse's web server and no longer
+the `/_matrix/client/` path.
+
+
 # Upgrading to v1.51.0
 
 ## Deprecation of `webclient` listeners and non-HTTP(S) `web_client_location`
diff --git a/docs/usage/administration/admin_api/federation.md b/docs/usage/administration/admin_api/federation.md
index 8f9535f57b..60cbc5265e 100644
--- a/docs/usage/administration/admin_api/federation.md
+++ b/docs/usage/administration/admin_api/federation.md
@@ -86,7 +86,7 @@ The following fields are returned in the JSON response body:
 - `next_token`: string representing a positive integer - Indication for pagination. See above.
 - `total` - integer - Total number of destinations.
 
-# Destination Details API
+## Destination Details API
 
 This API gets the retry timing info for a specific remote server.
 
@@ -108,7 +108,105 @@ A response body like the following is returned:
 }
 ```
 
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `destination` - Name of the remote server.
+
 **Response**
 
 The response fields are the same like in the `destinations` array in
 [List of destinations](#list-of-destinations) response.
+
+## Destination rooms
+
+This API gets the rooms that federate with a specific remote server.
+
+The API is:
+
+```
+GET /_synapse/admin/v1/federation/destinations/<destination>/rooms
+```
+
+A response body like the following is returned:
+
+```json
+{
+   "rooms":[
+      {
+         "room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
+         "stream_ordering": 8326
+      },
+      {
+         "room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
+         "stream_ordering": 93534
+      }
+   ],
+   "total": 2
+}
+```
+
+To paginate, check for `next_token` and if present, call the endpoint again
+with `from` set to the value of `next_token`. This will return a new page.
+
+If the endpoint does not return a `next_token` then there are no more destinations
+to paginate through.
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `destination` - Name of the remote server.
+
+The following query parameters are available:
+
+- `from` - Offset in the returned list. Defaults to `0`.
+- `limit` - Maximum amount of destinations to return. Defaults to `100`.
+- `dir` - Direction of room order by `room_id`. Either `f` for forwards or `b` for
+  backwards. Defaults to `f`.
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+- `rooms` - An array of objects, each containing information about a room.
+  Room objects contain the following fields:
+  - `room_id` - string - The ID of the room.
+  - `stream_ordering` - integer -  The stream ordering of the most recent
+    successfully-sent [PDU](understanding_synapse_through_grafana_graphs.md#federation)
+    to this destination in this room.
+- `next_token`: string representing a positive integer - Indication for pagination. See above.
+- `total` - integer - Total number of destinations.
+
+## Reset connection timeout
+
+Synapse makes federation requests to other homeservers. If a federation request fails,
+Synapse will mark the destination homeserver as offline, preventing any future requests
+to that server for a "cooldown" period. This period grows over time if the server
+continues to fail its responses
+([exponential backoff](https://en.wikipedia.org/wiki/Exponential_backoff)).
+
+Admins can cancel the cooldown period with this API.
+
+This API resets the retry timing for a specific remote server and tries to connect to
+the remote server again. It does not wait for the next `retry_interval`.
+The connection must have previously run into an error and `retry_last_ts`
+([Destination Details API](#destination-details-api)) must not be equal to `0`.
+
+The connection attempt is carried out in the background and can take a while
+even if the API already returns the http status 200.
+
+The API is:
+
+```
+POST /_synapse/admin/v1/federation/destinations/<destination>/reset_connection
+
+{}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `destination` - Name of the remote server.
diff --git a/mypy.ini b/mypy.ini
index 85fa22d28f..2884078d0a 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -77,9 +77,6 @@ exclude = (?x)
    |tests/push/test_http.py
    |tests/push/test_presentable_names.py
    |tests/push/test_push_rule_evaluator.py
-   |tests/rest/admin/test_admin.py
-   |tests/rest/admin/test_user.py
-   |tests/rest/admin/test_username_available.py
    |tests/rest/client/test_account.py
    |tests/rest/client/test_events.py
    |tests/rest/client/test_filter.py
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 640ff15277..70ee4e5c7f 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -36,6 +36,8 @@ from synapse.logging.context import (
     run_in_background,
 )
 from synapse.storage.database import DatabasePool, make_conn
+from synapse.storage.databases.main import PushRuleStore
+from synapse.storage.databases.main.account_data import AccountDataWorkerStore
 from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
 from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
 from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
@@ -180,6 +182,8 @@ class Store(
     UserDirectoryBackgroundUpdateStore,
     EndToEndKeyBackgroundStore,
     StatsStore,
+    AccountDataWorkerStore,
+    PushRuleStore,
     PusherWorkerStore,
     PresenceBackgroundUpdateStore,
     GroupServerWorkerStore,
diff --git a/setup.py b/setup.py
index e618ff898b..d0511c767f 100755
--- a/setup.py
+++ b/setup.py
@@ -150,7 +150,7 @@ setup(
     zip_safe=False,
     long_description=long_description,
     long_description_content_type="text/x-rst",
-    python_requires="~=3.6",
+    python_requires="~=3.7",
     entry_points={
         "console_scripts": [
             "synapse_homeserver = synapse.app.homeserver:main",
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 26bdfec33a..5e65033061 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -21,8 +21,8 @@ import os
 import sys
 
 # Check that we're not running on an unsupported Python version.
-if sys.version_info < (3, 6):
-    print("Synapse requires Python 3.6 or above.")
+if sys.version_info < (3, 7):
+    print("Synapse requires Python 3.7 or above.")
     sys.exit(1)
 
 # Twisted and canonicaljson will fail to import when this file is executed to
@@ -47,7 +47,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.51.0"
+__version__ = "1.52.0rc1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index f9f9467dc1..bd49fa6a5f 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -28,7 +28,6 @@ FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
 FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
 FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
 STATIC_PREFIX = "/_matrix/static"
-WEB_CLIENT_PREFIX = "/_matrix/client"
 SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
 MEDIA_R0_PREFIX = "/_matrix/media/r0"
 MEDIA_V3_PREFIX = "/_matrix/media/v3"
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 579adbbca0..bbab8a052a 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -16,7 +16,6 @@ import atexit
 import gc
 import logging
 import os
-import platform
 import signal
 import socket
 import sys
@@ -436,7 +435,8 @@ async def start(hs: "HomeServer") -> None:
     # before we start the listeners.
     module_api = hs.get_module_api()
     for module, config in hs.config.modules.loaded_modules:
-        module(config=config, api=module_api)
+        m = module(config=config, api=module_api)
+        logger.info("Loaded module %s", m)
 
     load_legacy_spam_checkers(hs)
     load_legacy_third_party_event_rules(hs)
@@ -468,15 +468,13 @@ async def start(hs: "HomeServer") -> None:
     # everything currently allocated are things that will be used for the
     # rest of time. Doing so means less work each GC (hopefully).
     #
-    # This only works on Python 3.7
-    if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7):
+    # PyPy does not (yet?) implement gc.freeze()
+    if hasattr(gc, "freeze"):
         gc.collect()
         gc.freeze()
 
-    # Speed up shutdowns by freezing all allocated objects. This moves everything
-    # into the permanent generation and excludes them from the final GC.
-    # Unfortunately only works on Python 3.7
-    if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7):
+        # Speed up shutdowns by freezing all allocated objects. This moves everything
+        # into the permanent generation and excludes them from the final GC.
         atexit.register(gc.freeze)
 
 
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index efedcc8889..24d55b0494 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -21,7 +21,6 @@ from typing import Dict, Iterable, Iterator, List
 from twisted.internet.tcp import Port
 from twisted.web.resource import EncodingResourceWrapper, Resource
 from twisted.web.server import GzipEncoderFactory
-from twisted.web.static import File
 
 import synapse
 import synapse.config.logger
@@ -33,7 +32,6 @@ from synapse.api.urls import (
     MEDIA_V3_PREFIX,
     SERVER_KEY_V2_PREFIX,
     STATIC_PREFIX,
-    WEB_CLIENT_PREFIX,
 )
 from synapse.app import _base
 from synapse.app._base import (
@@ -53,7 +51,6 @@ from synapse.http.additional_resource import AdditionalResource
 from synapse.http.server import (
     OptionsResource,
     RootOptionsRedirectResource,
-    RootRedirect,
     StaticResource,
 )
 from synapse.http.site import SynapseSite
@@ -134,15 +131,12 @@ class SynapseHomeServer(HomeServer):
         # Try to find something useful to serve at '/':
         #
         # 1. Redirect to the web client if it is an HTTP(S) URL.
-        # 2. Redirect to the web client served via Synapse.
-        # 3. Redirect to the static "Synapse is running" page.
-        # 4. Do not redirect and use a blank resource.
-        if self.config.server.web_client_location_is_redirect:
+        # 2. Redirect to the static "Synapse is running" page.
+        # 3. Do not redirect and use a blank resource.
+        if self.config.server.web_client_location:
             root_resource: Resource = RootOptionsRedirectResource(
                 self.config.server.web_client_location
             )
-        elif WEB_CLIENT_PREFIX in resources:
-            root_resource = RootOptionsRedirectResource(WEB_CLIENT_PREFIX)
         elif STATIC_PREFIX in resources:
             root_resource = RootOptionsRedirectResource(STATIC_PREFIX)
         else:
@@ -270,28 +264,6 @@ class SynapseHomeServer(HomeServer):
         if name in ["keys", "federation"]:
             resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
 
-        if name == "webclient":
-            # webclient listeners are deprecated as of Synapse v1.51.0, remove it
-            # in > v1.53.0.
-            webclient_loc = self.config.server.web_client_location
-
-            if webclient_loc is None:
-                logger.warning(
-                    "Not enabling webclient resource, as web_client_location is unset."
-                )
-            elif self.config.server.web_client_location_is_redirect:
-                resources[WEB_CLIENT_PREFIX] = RootRedirect(webclient_loc)
-            else:
-                logger.warning(
-                    "Running webclient on the same domain is not recommended: "
-                    "https://github.com/matrix-org/synapse#security-note - "
-                    "after you move webclient to different host you can set "
-                    "web_client_location to its full URL to enable redirection."
-                )
-                # GZip is disabled here due to
-                # https://twistedmatrix.com/trac/ticket/7678
-                resources[WEB_CLIENT_PREFIX] = File(webclient_loc)
-
         if name == "metrics" and self.config.metrics.enable_metrics:
             resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
 
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 8c9ff93b2c..7dbebd97b5 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -351,11 +351,13 @@ class AppServiceTransaction:
         id: int,
         events: List[EventBase],
         ephemeral: List[JsonDict],
+        to_device_messages: List[JsonDict],
     ):
         self.service = service
         self.id = id
         self.events = events
         self.ephemeral = ephemeral
+        self.to_device_messages = to_device_messages
 
     async def send(self, as_api: "ApplicationServiceApi") -> bool:
         """Sends this transaction using the provided AS API interface.
@@ -369,6 +371,7 @@ class AppServiceTransaction:
             service=self.service,
             events=self.events,
             ephemeral=self.ephemeral,
+            to_device_messages=self.to_device_messages,
             txn_id=self.id,
         )
 
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index def4424af0..73be7ff3d4 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -218,8 +218,23 @@ class ApplicationServiceApi(SimpleHttpClient):
         service: "ApplicationService",
         events: List[EventBase],
         ephemeral: List[JsonDict],
+        to_device_messages: List[JsonDict],
         txn_id: Optional[int] = None,
     ) -> bool:
+        """
+        Push data to an application service.
+
+        Args:
+            service: The application service to send to.
+            events: The persistent events to send.
+            ephemeral: The ephemeral events to send.
+            to_device_messages: The to-device messages to send.
+            txn_id: An unique ID to assign to this transaction. Application services should
+                deduplicate transactions received with identitical IDs.
+
+        Returns:
+            True if the task succeeded, False if it failed.
+        """
         if service.url is None:
             return True
 
@@ -237,13 +252,15 @@ class ApplicationServiceApi(SimpleHttpClient):
         uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id)))
 
         # Never send ephemeral events to appservices that do not support it
+        body: Dict[str, List[JsonDict]] = {"events": serialized_events}
         if service.supports_ephemeral:
-            body = {
-                "events": serialized_events,
-                "de.sorunome.msc2409.ephemeral": ephemeral,
-            }
-        else:
-            body = {"events": serialized_events}
+            body.update(
+                {
+                    # TODO: Update to stable prefixes once MSC2409 completes FCP merge.
+                    "de.sorunome.msc2409.ephemeral": ephemeral,
+                    "de.sorunome.msc2409.to_device": to_device_messages,
+                }
+            )
 
         try:
             await self.put_json(
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index 185e3a5278..c42fa32fff 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -48,7 +48,16 @@ This is all tied together by the AppServiceScheduler which DIs the required
 components.
 """
 import logging
-from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Set
+from typing import (
+    TYPE_CHECKING,
+    Awaitable,
+    Callable,
+    Collection,
+    Dict,
+    List,
+    Optional,
+    Set,
+)
 
 from synapse.appservice import ApplicationService, ApplicationServiceState
 from synapse.appservice.api import ApplicationServiceApi
@@ -71,6 +80,9 @@ MAX_PERSISTENT_EVENTS_PER_TRANSACTION = 100
 # Maximum number of ephemeral events to provide in an AS transaction.
 MAX_EPHEMERAL_EVENTS_PER_TRANSACTION = 100
 
+# Maximum number of to-device messages to provide in an AS transaction.
+MAX_TO_DEVICE_MESSAGES_PER_TRANSACTION = 100
+
 
 class ApplicationServiceScheduler:
     """Public facing API for this module. Does the required DI to tie the
@@ -97,15 +109,40 @@ class ApplicationServiceScheduler:
         for service in services:
             self.txn_ctrl.start_recoverer(service)
 
-    def submit_event_for_as(
-        self, service: ApplicationService, event: EventBase
+    def enqueue_for_appservice(
+        self,
+        appservice: ApplicationService,
+        events: Optional[Collection[EventBase]] = None,
+        ephemeral: Optional[Collection[JsonDict]] = None,
+        to_device_messages: Optional[Collection[JsonDict]] = None,
     ) -> None:
-        self.queuer.enqueue_event(service, event)
+        """
+        Enqueue some data to be sent off to an application service.
 
-    def submit_ephemeral_events_for_as(
-        self, service: ApplicationService, events: List[JsonDict]
-    ) -> None:
-        self.queuer.enqueue_ephemeral(service, events)
+        Args:
+            appservice: The application service to create and send a transaction to.
+            events: The persistent room events to send.
+            ephemeral: The ephemeral events to send.
+            to_device_messages: The to-device messages to send. These differ from normal
+                to-device messages sent to clients, as they have 'to_device_id' and
+                'to_user_id' fields.
+        """
+        # We purposefully allow this method to run with empty events/ephemeral
+        # collections, so that callers do not need to check iterable size themselves.
+        if not events and not ephemeral and not to_device_messages:
+            return
+
+        if events:
+            self.queuer.queued_events.setdefault(appservice.id, []).extend(events)
+        if ephemeral:
+            self.queuer.queued_ephemeral.setdefault(appservice.id, []).extend(ephemeral)
+        if to_device_messages:
+            self.queuer.queued_to_device_messages.setdefault(appservice.id, []).extend(
+                to_device_messages
+            )
+
+        # Kick off a new application service transaction
+        self.queuer.start_background_request(appservice)
 
 
 class _ServiceQueuer:
@@ -121,13 +158,15 @@ class _ServiceQueuer:
         self.queued_events: Dict[str, List[EventBase]] = {}
         # dict of {service_id: [events]}
         self.queued_ephemeral: Dict[str, List[JsonDict]] = {}
+        # dict of {service_id: [to_device_message_json]}
+        self.queued_to_device_messages: Dict[str, List[JsonDict]] = {}
 
         # the appservices which currently have a transaction in flight
         self.requests_in_flight: Set[str] = set()
         self.txn_ctrl = txn_ctrl
         self.clock = clock
 
-    def _start_background_request(self, service: ApplicationService) -> None:
+    def start_background_request(self, service: ApplicationService) -> None:
         # start a sender for this appservice if we don't already have one
         if service.id in self.requests_in_flight:
             return
@@ -136,16 +175,6 @@ class _ServiceQueuer:
             "as-sender-%s" % (service.id,), self._send_request, service
         )
 
-    def enqueue_event(self, service: ApplicationService, event: EventBase) -> None:
-        self.queued_events.setdefault(service.id, []).append(event)
-        self._start_background_request(service)
-
-    def enqueue_ephemeral(
-        self, service: ApplicationService, events: List[JsonDict]
-    ) -> None:
-        self.queued_ephemeral.setdefault(service.id, []).extend(events)
-        self._start_background_request(service)
-
     async def _send_request(self, service: ApplicationService) -> None:
         # sanity-check: we shouldn't get here if this service already has a sender
         # running.
@@ -162,11 +191,21 @@ class _ServiceQueuer:
                 ephemeral = all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION]
                 del all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION]
 
-                if not events and not ephemeral:
+                all_to_device_messages = self.queued_to_device_messages.get(
+                    service.id, []
+                )
+                to_device_messages_to_send = all_to_device_messages[
+                    :MAX_TO_DEVICE_MESSAGES_PER_TRANSACTION
+                ]
+                del all_to_device_messages[:MAX_TO_DEVICE_MESSAGES_PER_TRANSACTION]
+
+                if not events and not ephemeral and not to_device_messages_to_send:
                     return
 
                 try:
-                    await self.txn_ctrl.send(service, events, ephemeral)
+                    await self.txn_ctrl.send(
+                        service, events, ephemeral, to_device_messages_to_send
+                    )
                 except Exception:
                     logger.exception("AS request failed")
         finally:
@@ -198,10 +237,24 @@ class _TransactionController:
         service: ApplicationService,
         events: List[EventBase],
         ephemeral: Optional[List[JsonDict]] = None,
+        to_device_messages: Optional[List[JsonDict]] = None,
     ) -> None:
+        """
+        Create a transaction with the given data and send to the provided
+        application service.
+
+        Args:
+            service: The application service to send the transaction to.
+            events: The persistent events to include in the transaction.
+            ephemeral: The ephemeral events to include in the transaction.
+            to_device_messages: The to-device messages to include in the transaction.
+        """
         try:
             txn = await self.store.create_appservice_txn(
-                service=service, events=events, ephemeral=ephemeral or []
+                service=service,
+                events=events,
+                ephemeral=ephemeral or [],
+                to_device_messages=to_device_messages or [],
             )
             service_is_up = await self._is_service_up(service)
             if service_is_up:
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index dbaeb10918..e4719d19b8 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -24,8 +24,6 @@ class ExperimentalConfig(Config):
     def read_config(self, config: JsonDict, **kwargs):
         experimental = config.get("experimental_features") or {}
 
-        # Whether to enable experimental MSC1849 (aka relations) support
-        self.msc1849_enabled = config.get("experimental_msc1849_support_enabled", True)
         # MSC3440 (thread relation)
         self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False)
 
@@ -54,3 +52,10 @@ class ExperimentalConfig(Config):
         self.msc3202_device_masquerading_enabled: bool = experimental.get(
             "msc3202_device_masquerading", False
         )
+
+        # MSC2409 (this setting only relates to optionally sending to-device messages).
+        # Presence, typing and read receipt EDUs are already sent to application services that
+        # have opted in to receive them. If enabled, this adds to-device messages to that list.
+        self.msc2409_to_device_messages_enabled: bool = experimental.get(
+            "msc2409_to_device_messages_enabled", False
+        )
diff --git a/synapse/config/modules.py b/synapse/config/modules.py
index 85fb05890d..2ef02b8f55 100644
--- a/synapse/config/modules.py
+++ b/synapse/config/modules.py
@@ -41,9 +41,9 @@ class ModulesConfig(Config):
             # documentation on how to configure or create custom modules for Synapse.
             #
             modules:
-                # - module: my_super_module.MySuperClass
-                #   config:
-                #       do_thing: true
-                # - module: my_other_super_module.SomeClass
-                #   config: {}
+              #- module: my_super_module.MySuperClass
+              #  config:
+              #    do_thing: true
+              #- module: my_other_super_module.SomeClass
+              #  config: {}
             """
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 36636ab07e..e9ccf1bd62 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -134,6 +134,14 @@ class RatelimitConfig(Config):
             defaults={"per_second": 0.003, "burst_count": 5},
         )
 
+        self.rc_third_party_invite = RateLimitConfig(
+            config.get("rc_third_party_invite", {}),
+            defaults={
+                "per_second": self.rc_message.per_second,
+                "burst_count": self.rc_message.burst_count,
+            },
+        )
+
     def generate_config_section(self, **kwargs):
         return """\
         ## Ratelimiting ##
@@ -168,6 +176,9 @@ class RatelimitConfig(Config):
         #   - one for ratelimiting how often a user or IP can attempt to validate a 3PID.
         #   - two for ratelimiting how often invites can be sent in a room or to a
         #     specific user.
+        #   - one for ratelimiting 3PID invites (i.e. invites sent to a third-party ID
+        #     such as an email address or a phone number) based on the account that's
+        #     sending the invite.
         #
         # The defaults are as shown below.
         #
@@ -217,6 +228,10 @@ class RatelimitConfig(Config):
         #  per_user:
         #    per_second: 0.003
         #    burst_count: 5
+        #
+        #rc_third_party_invite:
+        #  per_second: 0.2
+        #  burst_count: 10
 
         # Ratelimiting settings for incoming federation
         #
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 7a059c6dec..ea9b50fe97 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -190,6 +190,8 @@ class RegistrationConfig(Config):
         # The success template used during fallback auth.
         self.fallback_success_template = self.read_template("auth_success.html")
 
+        self.inhibit_user_in_use_error = config.get("inhibit_user_in_use_error", False)
+
     def generate_config_section(self, generate_secrets=False, **kwargs):
         if generate_secrets:
             registration_shared_secret = 'registration_shared_secret: "%s"' % (
@@ -446,6 +448,16 @@ class RegistrationConfig(Config):
         # Defaults to true.
         #
         #auto_join_rooms_for_guests: false
+
+        # Whether to inhibit errors raised when registering a new account if the user ID
+        # already exists. If turned on, that requests to /register/available will always
+        # show a user ID as available, and Synapse won't raise an error when starting
+        # a registration with a user ID that already exists. However, Synapse will still
+        # raise an error if the registration completes and the username conflicts.
+        #
+        # Defaults to false.
+        #
+        #inhibit_user_in_use_error: true
         """
             % locals()
         )
diff --git a/synapse/config/server.py b/synapse/config/server.py
index f200d0c1f1..7bc9624546 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -179,7 +179,6 @@ KNOWN_RESOURCES = {
     "openid",
     "replication",
     "static",
-    "webclient",
 }
 
 
@@ -489,6 +488,19 @@ class ServerConfig(Config):
         # events with profile information that differ from the target's global profile.
         self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
 
+        # The maximum size an avatar can have, in bytes.
+        self.max_avatar_size = config.get("max_avatar_size")
+        if self.max_avatar_size is not None:
+            self.max_avatar_size = self.parse_size(self.max_avatar_size)
+
+        # The MIME types allowed for an avatar.
+        self.allowed_avatar_mimetypes = config.get("allowed_avatar_mimetypes")
+        if self.allowed_avatar_mimetypes and not isinstance(
+            self.allowed_avatar_mimetypes,
+            list,
+        ):
+            raise ConfigError("allowed_avatar_mimetypes must be a list")
+
         self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
 
         # no_tls is not really supported any more, but let's grandfather it in
@@ -506,16 +518,12 @@ class ServerConfig(Config):
             self.listeners = l2
 
         self.web_client_location = config.get("web_client_location", None)
-        self.web_client_location_is_redirect = self.web_client_location and (
+        # Non-HTTP(S) web client location is not supported.
+        if self.web_client_location and not (
             self.web_client_location.startswith("http://")
             or self.web_client_location.startswith("https://")
-        )
-        # A non-HTTP(S) web client location is deprecated.
-        if self.web_client_location and not self.web_client_location_is_redirect:
-            logger.warning(NO_MORE_NONE_HTTP_WEB_CLIENT_LOCATION_WARNING)
-
-        # Warn if webclient is configured for a worker.
-        _warn_if_webclient_configured(self.listeners)
+        ):
+            raise ConfigError("web_client_location must point to a HTTP(S) URL.")
 
         self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
         self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None))
@@ -643,19 +651,6 @@ class ServerConfig(Config):
             False,
         )
 
-        # List of users trialing the new experimental default push rules. This setting is
-        # not included in the sample configuration file on purpose as it's a temporary
-        # hack, so that some users can trial the new defaults without impacting every
-        # user on the homeserver.
-        users_new_default_push_rules: list = (
-            config.get("users_new_default_push_rules") or []
-        )
-        if not isinstance(users_new_default_push_rules, list):
-            raise ConfigError("'users_new_default_push_rules' must be a list")
-
-        # Turn the list into a set to improve lookup speed.
-        self.users_new_default_push_rules: set = set(users_new_default_push_rules)
-
         # Whitelist of domain names that given next_link parameters must have
         next_link_domain_whitelist: Optional[List[str]] = config.get(
             "next_link_domain_whitelist"
@@ -1168,6 +1163,20 @@ class ServerConfig(Config):
         #
         #allow_per_room_profiles: false
 
+        # The largest allowed file size for a user avatar. Defaults to no restriction.
+        #
+        # Note that user avatar changes will not work if this is set without
+        # using Synapse's media repository.
+        #
+        #max_avatar_size: 10M
+
+        # The MIME types allowed for user avatars. Defaults to no restriction.
+        #
+        # Note that user avatar changes will not work if this is set without
+        # using Synapse's media repository.
+        #
+        #allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"]
+
         # How long to keep redacted events in unredacted form in the database. After
         # this period redacted events get replaced with their redacted form in the DB.
         #
@@ -1337,11 +1346,16 @@ def parse_listener_def(listener: Any) -> ListenerConfig:
 
     http_config = None
     if listener_type == "http":
+        try:
+            resources = [
+                HttpResourceConfig(**res) for res in listener.get("resources", [])
+            ]
+        except ValueError as e:
+            raise ConfigError("Unknown listener resource") from e
+
         http_config = HttpListenerConfig(
             x_forwarded=listener.get("x_forwarded", False),
-            resources=[
-                HttpResourceConfig(**res) for res in listener.get("resources", [])
-            ],
+            resources=resources,
             additional_resources=listener.get("additional_resources", {}),
             tag=listener.get("tag"),
         )
@@ -1349,30 +1363,6 @@ def parse_listener_def(listener: Any) -> ListenerConfig:
     return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
 
 
-NO_MORE_NONE_HTTP_WEB_CLIENT_LOCATION_WARNING = """
-Synapse no longer supports serving a web client. To remove this warning,
-configure 'web_client_location' with an HTTP(S) URL.
-"""
-
-
-NO_MORE_WEB_CLIENT_WARNING = """
-Synapse no longer includes a web client. To redirect the root resource to a web client, configure
-'web_client_location'. To remove this warning, remove 'webclient' from the 'listeners'
-configuration.
-"""
-
-
-def _warn_if_webclient_configured(listeners: Iterable[ListenerConfig]) -> None:
-    for listener in listeners:
-        if not listener.http_options:
-            continue
-        for res in listener.http_options.resources:
-            for name in res.names:
-                if name == "webclient":
-                    logger.warning(NO_MORE_WEB_CLIENT_WARNING)
-                    return
-
-
 _MANHOLE_SETTINGS_SCHEMA = {
     "type": "object",
     "properties": {
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 38f3cf4d33..9acb3c0cc4 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -315,10 +315,11 @@ class EventBase(metaclass=abc.ABCMeta):
     redacts: DefaultDictProperty[Optional[str]] = DefaultDictProperty("redacts", None)
     room_id: DictProperty[str] = DictProperty("room_id")
     sender: DictProperty[str] = DictProperty("sender")
-    # TODO state_key should be Optional[str], this is generally asserted in Synapse
-    # by calling is_state() first (which ensures this), but it is hard (not possible?)
+    # TODO state_key should be Optional[str]. This is generally asserted in Synapse
+    # by calling is_state() first (which ensures it is not None), but it is hard (not possible?)
     # to properly annotate that calling is_state() asserts that state_key exists
-    # and is non-None.
+    # and is non-None. It would be better to replace such direct references with
+    # get_state_key() (and a check for None).
     state_key: DictProperty[str] = DictProperty("state_key")
     type: DictProperty[str] = DictProperty("type")
     user_id: DictProperty[str] = DictProperty("sender")
@@ -332,7 +333,11 @@ class EventBase(metaclass=abc.ABCMeta):
         return self.content["membership"]
 
     def is_state(self) -> bool:
-        return hasattr(self, "state_key") and self.state_key is not None
+        return self.get_state_key() is not None
+
+    def get_state_key(self) -> Optional[str]:
+        """Get the state key of this event, or None if it's not a state event"""
+        return self._dict.get("state_key")
 
     def get_dict(self) -> JsonDict:
         d = dict(self._dict)
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index 0eab1aefd6..5833fee25f 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -163,7 +163,7 @@ class EventContext:
         return {
             "prev_state_id": prev_state_id,
             "event_type": event.type,
-            "event_state_key": event.state_key if event.is_state() else None,
+            "event_state_key": event.get_state_key(),
             "state_group": self._state_group,
             "state_group_before_event": self.state_group_before_event,
             "rejected": self.rejected,
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 918adeecf8..243696b357 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -14,7 +14,17 @@
 # limitations under the License.
 import collections.abc
 import re
-from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Union
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Callable,
+    Dict,
+    Iterable,
+    List,
+    Mapping,
+    Optional,
+    Union,
+)
 
 from frozendict import frozendict
 
@@ -26,6 +36,10 @@ from synapse.util.frozenutils import unfreeze
 
 from . import EventBase
 
+if TYPE_CHECKING:
+    from synapse.storage.databases.main.relations import BundledAggregations
+
+
 # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
 # (?<!stuff) matches if the current position in the string is not preceded
 # by a match for 'stuff'.
@@ -376,7 +390,7 @@ class EventClientSerializer:
         event: Union[JsonDict, EventBase],
         time_now: int,
         *,
-        bundle_aggregations: Optional[Dict[str, JsonDict]] = None,
+        bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
         **kwargs: Any,
     ) -> JsonDict:
         """Serializes a single event.
@@ -415,7 +429,7 @@ class EventClientSerializer:
         self,
         event: EventBase,
         time_now: int,
-        aggregations: JsonDict,
+        aggregations: "BundledAggregations",
         serialized_event: JsonDict,
     ) -> None:
         """Potentially injects bundled aggregations into the unsigned portion of the serialized event.
@@ -427,13 +441,18 @@ class EventClientSerializer:
             serialized_event: The serialized event which may be modified.
 
         """
-        # Make a copy in-case the object is cached.
-        aggregations = aggregations.copy()
+        serialized_aggregations = {}
+
+        if aggregations.annotations:
+            serialized_aggregations[RelationTypes.ANNOTATION] = aggregations.annotations
+
+        if aggregations.references:
+            serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references
 
-        if RelationTypes.REPLACE in aggregations:
+        if aggregations.replace:
             # If there is an edit replace the content, preserving existing
             # relations.
-            edit = aggregations[RelationTypes.REPLACE]
+            edit = aggregations.replace
 
             # Ensure we take copies of the edit content, otherwise we risk modifying
             # the original event.
@@ -451,24 +470,28 @@ class EventClientSerializer:
             else:
                 serialized_event["content"].pop("m.relates_to", None)
 
-            aggregations[RelationTypes.REPLACE] = {
+            serialized_aggregations[RelationTypes.REPLACE] = {
                 "event_id": edit.event_id,
                 "origin_server_ts": edit.origin_server_ts,
                 "sender": edit.sender,
             }
 
         # If this event is the start of a thread, include a summary of the replies.
-        if RelationTypes.THREAD in aggregations:
-            # Serialize the latest thread event.
-            latest_thread_event = aggregations[RelationTypes.THREAD]["latest_event"]
-
-            # Don't bundle aggregations as this could recurse forever.
-            aggregations[RelationTypes.THREAD]["latest_event"] = self.serialize_event(
-                latest_thread_event, time_now, bundle_aggregations=None
-            )
+        if aggregations.thread:
+            serialized_aggregations[RelationTypes.THREAD] = {
+                # Don't bundle aggregations as this could recurse forever.
+                "latest_event": self.serialize_event(
+                    aggregations.thread.latest_event, time_now, bundle_aggregations=None
+                ),
+                "count": aggregations.thread.count,
+                "current_user_participated": aggregations.thread.current_user_participated,
+            }
 
         # Include the bundled aggregations in the event.
-        serialized_event["unsigned"].setdefault("m.relations", {}).update(aggregations)
+        if serialized_aggregations:
+            serialized_event["unsigned"].setdefault("m.relations", {}).update(
+                serialized_aggregations
+            )
 
     def serialize_events(
         self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index cf86934968..360d24274a 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import collections.abc
-from typing import Iterable, Union
+from typing import Iterable, Type, Union
 
 import jsonschema
 
@@ -246,7 +246,7 @@ POWER_LEVELS_SCHEMA = {
 
 # This could return something newer than Draft 7, but that's the current "latest"
 # validator.
-def _create_power_level_validator() -> jsonschema.Draft7Validator:
+def _create_power_level_validator() -> Type[jsonschema.Draft7Validator]:
     validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA)
 
     # by default jsonschema does not consider a frozendict to be an object so
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index 77b936361a..db4fe2c798 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import Dict, Iterable, List, Optional, Tuple, Type
+from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Type
 
 from typing_extensions import Literal
 
@@ -36,17 +36,19 @@ from synapse.http.servlet import (
     parse_integer_from_args,
     parse_string_from_args,
 )
-from synapse.server import HomeServer
 from synapse.types import JsonDict, ThirdPartyInstanceID
 from synapse.util.ratelimitutils import FederationRateLimiter
 
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
 logger = logging.getLogger(__name__)
 
 
 class TransportLayerServer(JsonResource):
     """Handles incoming federation HTTP requests"""
 
-    def __init__(self, hs: HomeServer, servlet_groups: Optional[List[str]] = None):
+    def __init__(self, hs: "HomeServer", servlet_groups: Optional[List[str]] = None):
         """Initialize the TransportLayerServer
 
         Will by default register all servlets. For custom behaviour, pass in
@@ -113,7 +115,7 @@ class PublicRoomList(BaseFederationServlet):
 
     def __init__(
         self,
-        hs: HomeServer,
+        hs: "HomeServer",
         authenticator: Authenticator,
         ratelimiter: FederationRateLimiter,
         server_name: str,
@@ -203,7 +205,7 @@ class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
 
     def __init__(
         self,
-        hs: HomeServer,
+        hs: "HomeServer",
         authenticator: Authenticator,
         ratelimiter: FederationRateLimiter,
         server_name: str,
@@ -251,7 +253,7 @@ class OpenIdUserInfo(BaseFederationServlet):
 
     def __init__(
         self,
-        hs: HomeServer,
+        hs: "HomeServer",
         authenticator: Authenticator,
         ratelimiter: FederationRateLimiter,
         server_name: str,
@@ -297,7 +299,7 @@ DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
 
 
 def register_servlets(
-    hs: HomeServer,
+    hs: "HomeServer",
     resource: HttpServer,
     authenticator: Authenticator,
     ratelimiter: FederationRateLimiter,
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index da1fbf8b63..dff2b68359 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -15,7 +15,8 @@
 import functools
 import logging
 import re
-from typing import Any, Awaitable, Callable, Optional, Tuple, cast
+import time
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, cast
 
 from synapse.api.errors import Codes, FederationDeniedError, SynapseError
 from synapse.api.urls import FEDERATION_V1_PREFIX
@@ -24,16 +25,20 @@ from synapse.http.servlet import parse_json_object_from_request
 from synapse.http.site import SynapseRequest
 from synapse.logging.context import run_in_background
 from synapse.logging.opentracing import (
+    active_span,
     set_tag,
     span_context_from_request,
+    start_active_span,
     start_active_span_follows_from,
     whitelisted_homeserver,
 )
-from synapse.server import HomeServer
 from synapse.types import JsonDict
 from synapse.util.ratelimitutils import FederationRateLimiter
 from synapse.util.stringutils import parse_and_validate_server_name
 
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
 logger = logging.getLogger(__name__)
 
 
@@ -46,7 +51,7 @@ class NoAuthenticationError(AuthenticationError):
 
 
 class Authenticator:
-    def __init__(self, hs: HomeServer):
+    def __init__(self, hs: "HomeServer"):
         self._clock = hs.get_clock()
         self.keyring = hs.get_keyring()
         self.server_name = hs.hostname
@@ -114,11 +119,11 @@ class Authenticator:
         # alive
         retry_timings = await self.store.get_destination_retry_timings(origin)
         if retry_timings and retry_timings.retry_last_ts:
-            run_in_background(self._reset_retry_timings, origin)
+            run_in_background(self.reset_retry_timings, origin)
 
         return origin
 
-    async def _reset_retry_timings(self, origin: str) -> None:
+    async def reset_retry_timings(self, origin: str) -> None:
         try:
             logger.info("Marking origin %r as up", origin)
             await self.store.set_destination_retry_timings(origin, None, 0, 0)
@@ -227,7 +232,7 @@ class BaseFederationServlet:
 
     def __init__(
         self,
-        hs: HomeServer,
+        hs: "HomeServer",
         authenticator: Authenticator,
         ratelimiter: FederationRateLimiter,
         server_name: str,
@@ -263,9 +268,10 @@ class BaseFederationServlet:
                 content = parse_json_object_from_request(request)
 
             try:
-                origin: Optional[str] = await authenticator.authenticate_request(
-                    request, content
-                )
+                with start_active_span("authenticate_request"):
+                    origin: Optional[str] = await authenticator.authenticate_request(
+                        request, content
+                    )
             except NoAuthenticationError:
                 origin = None
                 if self.REQUIRE_AUTH:
@@ -280,32 +286,57 @@ class BaseFederationServlet:
             # update the active opentracing span with the authenticated entity
             set_tag("authenticated_entity", origin)
 
-            # if the origin is authenticated and whitelisted, link to its span context
+            # if the origin is authenticated and whitelisted, use its span context
+            # as the parent.
             context = None
             if origin and whitelisted_homeserver(origin):
                 context = span_context_from_request(request)
 
-            scope = start_active_span_follows_from(
-                "incoming-federation-request", contexts=(context,) if context else ()
-            )
+            if context:
+                servlet_span = active_span()
+                # a scope which uses the origin's context as a parent
+                processing_start_time = time.time()
+                scope = start_active_span_follows_from(
+                    "incoming-federation-request",
+                    child_of=context,
+                    contexts=(servlet_span,),
+                    start_time=processing_start_time,
+                )
 
-            with scope:
-                if origin and self.RATELIMIT:
-                    with ratelimiter.ratelimit(origin) as d:
-                        await d
-                        if request._disconnected:
-                            logger.warning(
-                                "client disconnected before we started processing "
-                                "request"
+            else:
+                # just use our context as a parent
+                scope = start_active_span(
+                    "incoming-federation-request",
+                )
+
+            try:
+                with scope:
+                    if origin and self.RATELIMIT:
+                        with ratelimiter.ratelimit(origin) as d:
+                            await d
+                            if request._disconnected:
+                                logger.warning(
+                                    "client disconnected before we started processing "
+                                    "request"
+                                )
+                                return None
+                            response = await func(
+                                origin, content, request.args, *args, **kwargs
                             )
-                            return None
+                    else:
                         response = await func(
                             origin, content, request.args, *args, **kwargs
                         )
-                else:
-                    response = await func(
-                        origin, content, request.args, *args, **kwargs
+            finally:
+                # if we used the origin's context as the parent, add a new span using
+                # the servlet span as a parent, so that we have a link
+                if context:
+                    scope2 = start_active_span_follows_from(
+                        "process-federation_request",
+                        contexts=(scope.span,),
+                        start_time=processing_start_time,
                     )
+                    scope2.close()
 
             return response
 
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index beadfa422b..d86dfede4e 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -12,7 +12,17 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 import logging
-from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
+from typing import (
+    TYPE_CHECKING,
+    Dict,
+    List,
+    Mapping,
+    Optional,
+    Sequence,
+    Tuple,
+    Type,
+    Union,
+)
 
 from typing_extensions import Literal
 
@@ -30,11 +40,13 @@ from synapse.http.servlet import (
     parse_string_from_args,
     parse_strings_from_args,
 )
-from synapse.server import HomeServer
 from synapse.types import JsonDict
 from synapse.util.ratelimitutils import FederationRateLimiter
 from synapse.util.versionstring import get_version_string
 
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
 logger = logging.getLogger(__name__)
 issue_8631_logger = logging.getLogger("synapse.8631_debug")
 
@@ -47,7 +59,7 @@ class BaseFederationServerServlet(BaseFederationServlet):
 
     def __init__(
         self,
-        hs: HomeServer,
+        hs: "HomeServer",
         authenticator: Authenticator,
         ratelimiter: FederationRateLimiter,
         server_name: str,
@@ -97,11 +109,11 @@ class FederationSendServlet(BaseFederationServerServlet):
             )
 
             if issue_8631_logger.isEnabledFor(logging.DEBUG):
-                DEVICE_UPDATE_EDUS = {"m.device_list_update", "m.signing_key_update"}
+                DEVICE_UPDATE_EDUS = ["m.device_list_update", "m.signing_key_update"]
                 device_list_updates = [
                     edu.content
                     for edu in transaction_data.get("edus", [])
-                    if edu.edu_type in DEVICE_UPDATE_EDUS
+                    if edu.get("edu_type") in DEVICE_UPDATE_EDUS
                 ]
                 if device_list_updates:
                     issue_8631_logger.debug(
@@ -596,7 +608,7 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
 
     def __init__(
         self,
-        hs: HomeServer,
+        hs: "HomeServer",
         authenticator: Authenticator,
         ratelimiter: FederationRateLimiter,
         server_name: str,
@@ -670,7 +682,7 @@ class FederationRoomHierarchyServlet(BaseFederationServlet):
 
     def __init__(
         self,
-        hs: HomeServer,
+        hs: "HomeServer",
         authenticator: Authenticator,
         ratelimiter: FederationRateLimiter,
         server_name: str,
@@ -706,7 +718,7 @@ class RoomComplexityServlet(BaseFederationServlet):
 
     def __init__(
         self,
-        hs: HomeServer,
+        hs: "HomeServer",
         authenticator: Authenticator,
         ratelimiter: FederationRateLimiter,
         server_name: str,
diff --git a/synapse/federation/transport/server/groups_local.py b/synapse/federation/transport/server/groups_local.py
index a12cd18d58..496472e1dc 100644
--- a/synapse/federation/transport/server/groups_local.py
+++ b/synapse/federation/transport/server/groups_local.py
@@ -11,7 +11,7 @@
 #  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
-from typing import Dict, List, Tuple, Type
+from typing import TYPE_CHECKING, Dict, List, Tuple, Type
 
 from synapse.api.errors import SynapseError
 from synapse.federation.transport.server._base import (
@@ -19,10 +19,12 @@ from synapse.federation.transport.server._base import (
     BaseFederationServlet,
 )
 from synapse.handlers.groups_local import GroupsLocalHandler
-from synapse.server import HomeServer
 from synapse.types import JsonDict, get_domain_from_id
 from synapse.util.ratelimitutils import FederationRateLimiter
 
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
 
 class BaseGroupsLocalServlet(BaseFederationServlet):
     """Abstract base class for federation servlet classes which provides a groups local handler.
@@ -32,7 +34,7 @@ class BaseGroupsLocalServlet(BaseFederationServlet):
 
     def __init__(
         self,
-        hs: HomeServer,
+        hs: "HomeServer",
         authenticator: Authenticator,
         ratelimiter: FederationRateLimiter,
         server_name: str,
diff --git a/synapse/federation/transport/server/groups_server.py b/synapse/federation/transport/server/groups_server.py
index b30e92a5eb..851b50152e 100644
--- a/synapse/federation/transport/server/groups_server.py
+++ b/synapse/federation/transport/server/groups_server.py
@@ -11,7 +11,7 @@
 #  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
-from typing import Dict, List, Tuple, Type
+from typing import TYPE_CHECKING, Dict, List, Tuple, Type
 
 from typing_extensions import Literal
 
@@ -22,10 +22,12 @@ from synapse.federation.transport.server._base import (
     BaseFederationServlet,
 )
 from synapse.http.servlet import parse_string_from_args
-from synapse.server import HomeServer
 from synapse.types import JsonDict, get_domain_from_id
 from synapse.util.ratelimitutils import FederationRateLimiter
 
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
 
 class BaseGroupsServerServlet(BaseFederationServlet):
     """Abstract base class for federation servlet classes which provides a groups server handler.
@@ -35,7 +37,7 @@ class BaseGroupsServerServlet(BaseFederationServlet):
 
     def __init__(
         self,
-        hs: HomeServer,
+        hs: "HomeServer",
         authenticator: Authenticator,
         ratelimiter: FederationRateLimiter,
         server_name: str,
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 7833e77e2b..0fb919acf6 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -55,6 +55,9 @@ class ApplicationServicesHandler:
         self.clock = hs.get_clock()
         self.notify_appservices = hs.config.appservice.notify_appservices
         self.event_sources = hs.get_event_sources()
+        self._msc2409_to_device_messages_enabled = (
+            hs.config.experimental.msc2409_to_device_messages_enabled
+        )
 
         self.current_max = 0
         self.is_processing = False
@@ -132,7 +135,9 @@ class ApplicationServicesHandler:
 
                         # Fork off pushes to these services
                         for service in services:
-                            self.scheduler.submit_event_for_as(service, event)
+                            self.scheduler.enqueue_for_appservice(
+                                service, events=[event]
+                            )
 
                         now = self.clock.time_msec()
                         ts = await self.store.get_received_ts(event.event_id)
@@ -199,8 +204,9 @@ class ApplicationServicesHandler:
         Args:
             stream_key: The stream the event came from.
 
-                `stream_key` can be "typing_key", "receipt_key" or "presence_key". Any other
-                value for `stream_key` will cause this function to return early.
+                `stream_key` can be "typing_key", "receipt_key", "presence_key" or
+                "to_device_key". Any other value for `stream_key` will cause this function
+                to return early.
 
                 Ephemeral events will only be pushed to appservices that have opted into
                 receiving them by setting `push_ephemeral` to true in their registration
@@ -216,8 +222,15 @@ class ApplicationServicesHandler:
         if not self.notify_appservices:
             return
 
-        # Ignore any unsupported streams
-        if stream_key not in ("typing_key", "receipt_key", "presence_key"):
+        # Notify appservices of updates in ephemeral event streams.
+        # Only the following streams are currently supported.
+        # FIXME: We should use constants for these values.
+        if stream_key not in (
+            "typing_key",
+            "receipt_key",
+            "presence_key",
+            "to_device_key",
+        ):
             return
 
         # Assert that new_token is an integer (and not a RoomStreamToken).
@@ -233,6 +246,13 @@ class ApplicationServicesHandler:
         # Additional context: https://github.com/matrix-org/synapse/pull/11137
         assert isinstance(new_token, int)
 
+        # Ignore to-device messages if the feature flag is not enabled
+        if (
+            stream_key == "to_device_key"
+            and not self._msc2409_to_device_messages_enabled
+        ):
+            return
+
         # Check whether there are any appservices which have registered to receive
         # ephemeral events.
         #
@@ -266,7 +286,7 @@ class ApplicationServicesHandler:
         with Measure(self.clock, "notify_interested_services_ephemeral"):
             for service in services:
                 if stream_key == "typing_key":
-                    # Note that we don't persist the token (via set_type_stream_id_for_appservice)
+                    # Note that we don't persist the token (via set_appservice_stream_type_pos)
                     # for typing_key due to performance reasons and due to their highly
                     # ephemeral nature.
                     #
@@ -274,7 +294,7 @@ class ApplicationServicesHandler:
                     # and, if they apply to this application service, send it off.
                     events = await self._handle_typing(service, new_token)
                     if events:
-                        self.scheduler.submit_ephemeral_events_for_as(service, events)
+                        self.scheduler.enqueue_for_appservice(service, ephemeral=events)
                     continue
 
                 # Since we read/update the stream position for this AS/stream
@@ -285,28 +305,37 @@ class ApplicationServicesHandler:
                 ):
                     if stream_key == "receipt_key":
                         events = await self._handle_receipts(service, new_token)
-                        if events:
-                            self.scheduler.submit_ephemeral_events_for_as(
-                                service, events
-                            )
+                        self.scheduler.enqueue_for_appservice(service, ephemeral=events)
 
                         # Persist the latest handled stream token for this appservice
-                        await self.store.set_type_stream_id_for_appservice(
+                        await self.store.set_appservice_stream_type_pos(
                             service, "read_receipt", new_token
                         )
 
                     elif stream_key == "presence_key":
                         events = await self._handle_presence(service, users, new_token)
-                        if events:
-                            self.scheduler.submit_ephemeral_events_for_as(
-                                service, events
-                            )
+                        self.scheduler.enqueue_for_appservice(service, ephemeral=events)
 
                         # Persist the latest handled stream token for this appservice
-                        await self.store.set_type_stream_id_for_appservice(
+                        await self.store.set_appservice_stream_type_pos(
                             service, "presence", new_token
                         )
 
+                    elif stream_key == "to_device_key":
+                        # Retrieve a list of to-device message events, as well as the
+                        # maximum stream token of the messages we were able to retrieve.
+                        to_device_messages = await self._get_to_device_messages(
+                            service, new_token, users
+                        )
+                        self.scheduler.enqueue_for_appservice(
+                            service, to_device_messages=to_device_messages
+                        )
+
+                        # Persist the latest handled stream token for this appservice
+                        await self.store.set_appservice_stream_type_pos(
+                            service, "to_device", new_token
+                        )
+
     async def _handle_typing(
         self, service: ApplicationService, new_token: int
     ) -> List[JsonDict]:
@@ -440,6 +469,79 @@ class ApplicationServicesHandler:
 
         return events
 
+    async def _get_to_device_messages(
+        self,
+        service: ApplicationService,
+        new_token: int,
+        users: Collection[Union[str, UserID]],
+    ) -> List[JsonDict]:
+        """
+        Given an application service, determine which events it should receive
+        from those between the last-recorded to-device message stream token for this
+        appservice and the given stream token.
+
+        Args:
+            service: The application service to check for which events it should receive.
+            new_token: The latest to-device event stream token.
+            users: The users to be notified for the new to-device messages
+                (ie, the recipients of the messages).
+
+        Returns:
+            A list of JSON dictionaries containing data derived from the to-device events
+                that should be sent to the given application service.
+        """
+        # Get the stream token that this application service has processed up until
+        from_key = await self.store.get_type_stream_id_for_appservice(
+            service, "to_device"
+        )
+
+        # Filter out users that this appservice is not interested in
+        users_appservice_is_interested_in: List[str] = []
+        for user in users:
+            # FIXME: We should do this farther up the call stack. We currently repeat
+            #  this operation in _handle_presence.
+            if isinstance(user, UserID):
+                user = user.to_string()
+
+            if service.is_interested_in_user(user):
+                users_appservice_is_interested_in.append(user)
+
+        if not users_appservice_is_interested_in:
+            # Return early if the AS was not interested in any of these users
+            return []
+
+        # Retrieve the to-device messages for each user
+        recipient_device_to_messages = await self.store.get_messages_for_user_devices(
+            users_appservice_is_interested_in,
+            from_key,
+            new_token,
+        )
+
+        # According to MSC2409, we'll need to add 'to_user_id' and 'to_device_id' fields
+        # to the event JSON so that the application service will know which user/device
+        # combination this messages was intended for.
+        #
+        # So we mangle this dict into a flat list of to-device messages with the relevant
+        # user ID and device ID embedded inside each message dict.
+        message_payload: List[JsonDict] = []
+        for (
+            user_id,
+            device_id,
+        ), messages in recipient_device_to_messages.items():
+            for message_json in messages:
+                # Remove 'message_id' from the to-device message, as it's an internal ID
+                message_json.pop("message_id", None)
+
+                message_payload.append(
+                    {
+                        "to_user_id": user_id,
+                        "to_device_id": device_id,
+                        **message_json,
+                    }
+                )
+
+        return message_payload
+
     async def query_user_exists(self, user_id: str) -> bool:
         """Check if any application service knows this user_id exists.
 
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index bd1a322563..e32c93e234 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -2060,6 +2060,10 @@ CHECK_AUTH_CALLBACK = Callable[
         Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]
     ],
 ]
+GET_USERNAME_FOR_REGISTRATION_CALLBACK = Callable[
+    [JsonDict, JsonDict],
+    Awaitable[Optional[str]],
+]
 
 
 class PasswordAuthProvider:
@@ -2072,6 +2076,9 @@ class PasswordAuthProvider:
         # lists of callbacks
         self.check_3pid_auth_callbacks: List[CHECK_3PID_AUTH_CALLBACK] = []
         self.on_logged_out_callbacks: List[ON_LOGGED_OUT_CALLBACK] = []
+        self.get_username_for_registration_callbacks: List[
+            GET_USERNAME_FOR_REGISTRATION_CALLBACK
+        ] = []
 
         # Mapping from login type to login parameters
         self._supported_login_types: Dict[str, Iterable[str]] = {}
@@ -2086,6 +2093,9 @@ class PasswordAuthProvider:
         auth_checkers: Optional[
             Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK]
         ] = None,
+        get_username_for_registration: Optional[
+            GET_USERNAME_FOR_REGISTRATION_CALLBACK
+        ] = None,
     ) -> None:
         # Register check_3pid_auth callback
         if check_3pid_auth is not None:
@@ -2130,6 +2140,11 @@ class PasswordAuthProvider:
                 # Add the new method to the list of auth_checker_callbacks for this login type
                 self.auth_checker_callbacks.setdefault(login_type, []).append(callback)
 
+        if get_username_for_registration is not None:
+            self.get_username_for_registration_callbacks.append(
+                get_username_for_registration,
+            )
+
     def get_supported_login_types(self) -> Mapping[str, Iterable[str]]:
         """Get the login types supported by this password provider
 
@@ -2285,3 +2300,46 @@ class PasswordAuthProvider:
             except Exception as e:
                 logger.warning("Failed to run module API callback %s: %s", callback, e)
                 continue
+
+    async def get_username_for_registration(
+        self,
+        uia_results: JsonDict,
+        params: JsonDict,
+    ) -> Optional[str]:
+        """Defines the username to use when registering the user, using the credentials
+        and parameters provided during the UIA flow.
+
+        Stops at the first callback that returns a string.
+
+        Args:
+            uia_results: The credentials provided during the UIA flow.
+            params: The parameters provided by the registration request.
+
+        Returns:
+            The localpart to use when registering this user, or None if no module
+            returned a localpart.
+        """
+        for callback in self.get_username_for_registration_callbacks:
+            try:
+                res = await callback(uia_results, params)
+
+                if isinstance(res, str):
+                    return res
+                elif res is not None:
+                    # mypy complains that this line is unreachable because it assumes the
+                    # data returned by the module fits the expected type. We just want
+                    # to make sure this is the case.
+                    logger.warning(  # type: ignore[unreachable]
+                        "Ignoring non-string value returned by"
+                        " get_username_for_registration callback %s: %s",
+                        callback,
+                        res,
+                    )
+            except Exception as e:
+                logger.error(
+                    "Module raised an exception in get_username_for_registration: %s",
+                    e,
+                )
+                raise SynapseError(code=500, msg="Internal Server Error")
+
+        return None
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index bee62cf360..7a13d76a68 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -157,6 +157,9 @@ class DeactivateAccountHandler:
         # Mark the user as deactivated.
         await self.store.set_user_deactivated_status(user_id, True)
 
+        # Remove account data (including ignored users and push rules).
+        await self.store.purge_account_data_for_user(user_id)
+
         return identity_server_supports_unbinding
 
     async def _reject_pending_invites_for_user(self, user_id: str) -> None:
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 6b5a6ded8b..36e3ad2ba9 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -31,6 +31,8 @@ from synapse.types import (
     create_requester,
     get_domain_from_id,
 )
+from synapse.util.caches.descriptors import cached
+from synapse.util.stringutils import parse_and_validate_mxc_uri
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -64,6 +66,11 @@ class ProfileHandler:
         self.user_directory_handler = hs.get_user_directory_handler()
         self.request_ratelimiter = hs.get_request_ratelimiter()
 
+        self.max_avatar_size = hs.config.server.max_avatar_size
+        self.allowed_avatar_mimetypes = hs.config.server.allowed_avatar_mimetypes
+
+        self.server_name = hs.config.server.server_name
+
         if hs.config.worker.run_background_tasks:
             self.clock.looping_call(
                 self._update_remote_profile_cache, self.PROFILE_UPDATE_MS
@@ -286,6 +293,9 @@ class ProfileHandler:
                 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,)
             )
 
+        if not await self.check_avatar_size_and_mime_type(new_avatar_url):
+            raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN)
+
         avatar_url_to_set: Optional[str] = new_avatar_url
         if new_avatar_url == "":
             avatar_url_to_set = None
@@ -307,6 +317,63 @@ class ProfileHandler:
 
         await self._update_join_states(requester, target_user)
 
+    @cached()
+    async def check_avatar_size_and_mime_type(self, mxc: str) -> bool:
+        """Check that the size and content type of the avatar at the given MXC URI are
+        within the configured limits.
+
+        Args:
+            mxc: The MXC URI at which the avatar can be found.
+
+        Returns:
+             A boolean indicating whether the file can be allowed to be set as an avatar.
+        """
+        if not self.max_avatar_size and not self.allowed_avatar_mimetypes:
+            return True
+
+        server_name, _, media_id = parse_and_validate_mxc_uri(mxc)
+
+        if server_name == self.server_name:
+            media_info = await self.store.get_local_media(media_id)
+        else:
+            media_info = await self.store.get_cached_remote_media(server_name, media_id)
+
+        if media_info is None:
+            # Both configuration options need to access the file's metadata, and
+            # retrieving remote avatars just for this becomes a bit of a faff, especially
+            # if e.g. the file is too big. It's also generally safe to assume most files
+            # used as avatar are uploaded locally, or if the upload didn't happen as part
+            # of a PUT request on /avatar_url that the file was at least previewed by the
+            # user locally (and therefore downloaded to the remote media cache).
+            logger.warning("Forbidding avatar change to %s: avatar not on server", mxc)
+            return False
+
+        if self.max_avatar_size:
+            # Ensure avatar does not exceed max allowed avatar size
+            if media_info["media_length"] > self.max_avatar_size:
+                logger.warning(
+                    "Forbidding avatar change to %s: %d bytes is above the allowed size "
+                    "limit",
+                    mxc,
+                    media_info["media_length"],
+                )
+                return False
+
+        if self.allowed_avatar_mimetypes:
+            # Ensure the avatar's file type is allowed
+            if (
+                self.allowed_avatar_mimetypes
+                and media_info["media_type"] not in self.allowed_avatar_mimetypes
+            ):
+                logger.warning(
+                    "Forbidding avatar change to %s: mimetype %s not allowed",
+                    mxc,
+                    media_info["media_type"],
+                )
+                return False
+
+        return True
+
     async def on_profile_query(self, args: JsonDict) -> JsonDict:
         """Handles federation profile query requests."""
 
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index f08a516a75..a719d5eef3 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -132,6 +132,7 @@ class RegistrationHandler:
         localpart: str,
         guest_access_token: Optional[str] = None,
         assigned_user_id: Optional[str] = None,
+        inhibit_user_in_use_error: bool = False,
     ) -> None:
         if types.contains_invalid_mxid_characters(localpart):
             raise SynapseError(
@@ -171,21 +172,22 @@ class RegistrationHandler:
 
         users = await self.store.get_users_by_id_case_insensitive(user_id)
         if users:
-            if not guest_access_token:
+            if not inhibit_user_in_use_error and not guest_access_token:
                 raise SynapseError(
                     400, "User ID already taken.", errcode=Codes.USER_IN_USE
                 )
-            user_data = await self.auth.get_user_by_access_token(guest_access_token)
-            if (
-                not user_data.is_guest
-                or UserID.from_string(user_data.user_id).localpart != localpart
-            ):
-                raise AuthError(
-                    403,
-                    "Cannot register taken user ID without valid guest "
-                    "credentials for that user.",
-                    errcode=Codes.FORBIDDEN,
-                )
+            if guest_access_token:
+                user_data = await self.auth.get_user_by_access_token(guest_access_token)
+                if (
+                    not user_data.is_guest
+                    or UserID.from_string(user_data.user_id).localpart != localpart
+                ):
+                    raise AuthError(
+                        403,
+                        "Cannot register taken user ID without valid guest "
+                        "credentials for that user.",
+                        errcode=Codes.FORBIDDEN,
+                    )
 
         if guest_access_token is None:
             try:
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index f963078e59..1420d67729 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -30,6 +30,7 @@ from typing import (
     Tuple,
 )
 
+import attr
 from typing_extensions import TypedDict
 
 from synapse.api.constants import (
@@ -60,6 +61,7 @@ from synapse.events.utils import copy_power_levels_contents
 from synapse.federation.federation_client import InvalidResponseError
 from synapse.handlers.federation import get_domains_from_state
 from synapse.rest.admin._base import assert_user_is_admin
+from synapse.storage.databases.main.relations import BundledAggregations
 from synapse.storage.state import StateFilter
 from synapse.streams import EventSource
 from synapse.types import (
@@ -90,6 +92,17 @@ id_server_scheme = "https://"
 FIVE_MINUTES_IN_MS = 5 * 60 * 1000
 
 
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class EventContext:
+    events_before: List[EventBase]
+    event: EventBase
+    events_after: List[EventBase]
+    state: List[EventBase]
+    aggregations: Dict[str, BundledAggregations]
+    start: str
+    end: str
+
+
 class RoomCreationHandler:
     def __init__(self, hs: "HomeServer"):
         self.store = hs.get_datastore()
@@ -1119,7 +1132,7 @@ class RoomContextHandler:
         limit: int,
         event_filter: Optional[Filter],
         use_admin_priviledge: bool = False,
-    ) -> Optional[JsonDict]:
+    ) -> Optional[EventContext]:
         """Retrieves events, pagination tokens and state around a given event
         in a room.
 
@@ -1167,38 +1180,28 @@ class RoomContextHandler:
         results = await self.store.get_events_around(
             room_id, event_id, before_limit, after_limit, event_filter
         )
+        events_before = results.events_before
+        events_after = results.events_after
 
         if event_filter:
-            results["events_before"] = await event_filter.filter(
-                results["events_before"]
-            )
-            results["events_after"] = await event_filter.filter(results["events_after"])
+            events_before = await event_filter.filter(events_before)
+            events_after = await event_filter.filter(events_after)
 
-        results["events_before"] = await filter_evts(results["events_before"])
-        results["events_after"] = await filter_evts(results["events_after"])
+        events_before = await filter_evts(events_before)
+        events_after = await filter_evts(events_after)
         # filter_evts can return a pruned event in case the user is allowed to see that
         # there's something there but not see the content, so use the event that's in
         # `filtered` rather than the event we retrieved from the datastore.
-        results["event"] = filtered[0]
+        event = filtered[0]
 
         # Fetch the aggregations.
         aggregations = await self.store.get_bundled_aggregations(
-            [results["event"]], user.to_string()
+            itertools.chain(events_before, (event,), events_after),
+            user.to_string(),
         )
-        aggregations.update(
-            await self.store.get_bundled_aggregations(
-                results["events_before"], user.to_string()
-            )
-        )
-        aggregations.update(
-            await self.store.get_bundled_aggregations(
-                results["events_after"], user.to_string()
-            )
-        )
-        results["aggregations"] = aggregations
 
-        if results["events_after"]:
-            last_event_id = results["events_after"][-1].event_id
+        if events_after:
+            last_event_id = events_after[-1].event_id
         else:
             last_event_id = event_id
 
@@ -1206,9 +1209,9 @@ class RoomContextHandler:
             state_filter = StateFilter.from_lazy_load_member_list(
                 ev.sender
                 for ev in itertools.chain(
-                    results["events_before"],
-                    (results["event"],),
-                    results["events_after"],
+                    events_before,
+                    (event,),
+                    events_after,
                 )
             )
         else:
@@ -1226,21 +1229,23 @@ class RoomContextHandler:
         if event_filter:
             state_events = await event_filter.filter(state_events)
 
-        results["state"] = await filter_evts(state_events)
-
         # We use a dummy token here as we only care about the room portion of
         # the token, which we replace.
         token = StreamToken.START
 
-        results["start"] = await token.copy_and_replace(
-            "room_key", results["start"]
-        ).to_string(self.store)
-
-        results["end"] = await token.copy_and_replace(
-            "room_key", results["end"]
-        ).to_string(self.store)
-
-        return results
+        return EventContext(
+            events_before=events_before,
+            event=event,
+            events_after=events_after,
+            state=await filter_evts(state_events),
+            aggregations=aggregations,
+            start=await token.copy_and_replace("room_key", results.start).to_string(
+                self.store
+            ),
+            end=await token.copy_and_replace("room_key", results.end).to_string(
+                self.store
+            ),
+        )
 
 
 class TimestampLookupHandler:
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 6aa910dd10..efe6b4c9aa 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -116,6 +116,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
             burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
         )
 
+        self._third_party_invite_limiter = Ratelimiter(
+            store=self.store,
+            clock=self.clock,
+            rate_hz=hs.config.ratelimiting.rc_third_party_invite.per_second,
+            burst_count=hs.config.ratelimiting.rc_third_party_invite.burst_count,
+        )
+
         self.request_ratelimiter = hs.get_request_ratelimiter()
 
     @abc.abstractmethod
@@ -590,6 +597,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
                 errcode=Codes.BAD_JSON,
             )
 
+        if "avatar_url" in content:
+            if not await self.profile_handler.check_avatar_size_and_mime_type(
+                content["avatar_url"],
+            ):
+                raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN)
+
         # The event content should *not* include the authorising user as
         # it won't be properly signed. Strip it out since it might come
         # back from a client updating a display name / avatar.
@@ -1289,7 +1302,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
 
         # We need to rate limit *before* we send out any 3PID invites, so we
         # can't just rely on the standard ratelimiting of events.
-        await self.request_ratelimiter.ratelimit(requester)
+        await self._third_party_invite_limiter.ratelimit(requester)
 
         can_invite = await self.third_party_event_rules.check_threepid_can_be_invited(
             medium, address, room_id
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 0b153a6822..02bb5ae72f 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -361,36 +361,37 @@ class SearchHandler:
 
                 logger.info(
                     "Context for search returned %d and %d events",
-                    len(res["events_before"]),
-                    len(res["events_after"]),
+                    len(res.events_before),
+                    len(res.events_after),
                 )
 
-                res["events_before"] = await filter_events_for_client(
-                    self.storage, user.to_string(), res["events_before"]
+                events_before = await filter_events_for_client(
+                    self.storage, user.to_string(), res.events_before
                 )
 
-                res["events_after"] = await filter_events_for_client(
-                    self.storage, user.to_string(), res["events_after"]
+                events_after = await filter_events_for_client(
+                    self.storage, user.to_string(), res.events_after
                 )
 
-                res["start"] = await now_token.copy_and_replace(
-                    "room_key", res["start"]
-                ).to_string(self.store)
-
-                res["end"] = await now_token.copy_and_replace(
-                    "room_key", res["end"]
-                ).to_string(self.store)
+                context = {
+                    "events_before": events_before,
+                    "events_after": events_after,
+                    "start": await now_token.copy_and_replace(
+                        "room_key", res.start
+                    ).to_string(self.store),
+                    "end": await now_token.copy_and_replace(
+                        "room_key", res.end
+                    ).to_string(self.store),
+                }
 
                 if include_profile:
                     senders = {
                         ev.sender
-                        for ev in itertools.chain(
-                            res["events_before"], [event], res["events_after"]
-                        )
+                        for ev in itertools.chain(events_before, [event], events_after)
                     }
 
-                    if res["events_after"]:
-                        last_event_id = res["events_after"][-1].event_id
+                    if events_after:
+                        last_event_id = events_after[-1].event_id
                     else:
                         last_event_id = event.event_id
 
@@ -402,7 +403,7 @@ class SearchHandler:
                         last_event_id, state_filter
                     )
 
-                    res["profile_info"] = {
+                    context["profile_info"] = {
                         s.state_key: {
                             "displayname": s.content.get("displayname", None),
                             "avatar_url": s.content.get("avatar_url", None),
@@ -411,7 +412,7 @@ class SearchHandler:
                         if s.type == EventTypes.Member and s.state_key in senders
                     }
 
-                contexts[event.event_id] = res
+                contexts[event.event_id] = context
         else:
             contexts = {}
 
@@ -421,10 +422,10 @@ class SearchHandler:
 
         for context in contexts.values():
             context["events_before"] = self._event_serializer.serialize_events(
-                context["events_before"], time_now
+                context["events_before"], time_now  # type: ignore[arg-type]
             )
             context["events_after"] = self._event_serializer.serialize_events(
-                context["events_after"], time_now
+                context["events_after"], time_now  # type: ignore[arg-type]
             )
 
         state_results = {}
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index ffc6b748e8..aa9a76f8a9 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -37,6 +37,7 @@ from synapse.logging.context import current_context
 from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span
 from synapse.push.clientformat import format_push_rules_for_user
 from synapse.storage.databases.main.event_push_actions import NotifCounts
+from synapse.storage.databases.main.relations import BundledAggregations
 from synapse.storage.roommember import MemberSummary
 from synapse.storage.state import StateFilter
 from synapse.types import (
@@ -100,7 +101,7 @@ class TimelineBatch:
     limited: bool
     # A mapping of event ID to the bundled aggregations for the above events.
     # This is only calculated if limited is true.
-    bundled_aggregations: Optional[Dict[str, Dict[str, Any]]] = None
+    bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None
 
     def __bool__(self) -> bool:
         """Make the result appear empty if there are no updates. This is used
@@ -1347,8 +1348,8 @@ class SyncHandler:
         if sync_result_builder.since_token is not None:
             since_stream_id = int(sync_result_builder.since_token.to_device_key)
 
-        if since_stream_id != int(now_token.to_device_key):
-            messages, stream_id = await self.store.get_new_messages_for_device(
+        if device_id is not None and since_stream_id != int(now_token.to_device_key):
+            messages, stream_id = await self.store.get_messages_for_device(
                 user_id, device_id, since_stream_id, now_token.to_device_key
             )
 
@@ -1619,7 +1620,7 @@ class SyncHandler:
         # TODO: Can we `SELECT ignored_user_id FROM ignored_users WHERE ignorer_user_id=?;` instead?
         ignored_account_data = (
             await self.store.get_global_account_data_by_type_for_user(
-                AccountDataTypes.IGNORED_USER_LIST, user_id=user_id
+                user_id=user_id, data_type=AccountDataTypes.IGNORED_USER_LIST
             )
         )
 
diff --git a/synapse/http/client.py b/synapse/http/client.py
index ca33b45cb2..743a7ffcb1 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -731,15 +731,24 @@ class SimpleHttpClient:
         # straight back in again
 
         try:
-            length = await make_deferred_yieldable(
-                read_body_with_max_size(response, output_stream, max_size)
-            )
+            d = read_body_with_max_size(response, output_stream, max_size)
+
+            # Ensure that the body is not read forever.
+            d = timeout_deferred(d, 30, self.hs.get_reactor())
+
+            length = await make_deferred_yieldable(d)
         except BodyExceededMaxSize:
             raise SynapseError(
                 HTTPStatus.BAD_GATEWAY,
                 "Requested file is too large > %r bytes" % (max_size,),
                 Codes.TOO_LARGE,
             )
+        except defer.TimeoutError:
+            raise SynapseError(
+                HTTPStatus.BAD_GATEWAY,
+                "Requested file took too long to download",
+                Codes.TOO_LARGE,
+            )
         except Exception as e:
             raise SynapseError(
                 HTTPStatus.BAD_GATEWAY, ("Failed to download remote body: %s" % e)
diff --git a/synapse/http/site.py b/synapse/http/site.py
index c180a1d323..40f6c04894 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -407,7 +407,10 @@ class SynapseRequest(Request):
 
         user_agent = get_request_user_agent(self, "-")
 
-        code = str(self.code)
+        # int(self.code) looks redundant, because self.code is already an int.
+        # But self.code might be an HTTPStatus (which inherits from int)---which has
+        # a different string representation. So ensure we really have an integer.
+        code = str(int(self.code))
         if not self.finished:
             # we didn't send the full response before we gave up (presumably because
             # the connection dropped)
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index b240d2d21d..3ebed5c161 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -443,10 +443,14 @@ def start_active_span(
     start_time=None,
     ignore_active_span=False,
     finish_on_close=True,
+    *,
+    tracer=None,
 ):
-    """Starts an active opentracing span. Note, the scope doesn't become active
-    until it has been entered, however, the span starts from the time this
-    message is called.
+    """Starts an active opentracing span.
+
+    Records the start time for the span, and sets it as the "active span" in the
+    scope manager.
+
     Args:
         See opentracing.tracer
     Returns:
@@ -456,7 +460,11 @@ def start_active_span(
     if opentracing is None:
         return noop_context_manager()  # type: ignore[unreachable]
 
-    return opentracing.tracer.start_active_span(
+    if tracer is None:
+        # use the global tracer by default
+        tracer = opentracing.tracer
+
+    return tracer.start_active_span(
         operation_name,
         child_of=child_of,
         references=references,
@@ -468,21 +476,42 @@ def start_active_span(
 
 
 def start_active_span_follows_from(
-    operation_name: str, contexts: Collection, inherit_force_tracing=False
+    operation_name: str,
+    contexts: Collection,
+    child_of=None,
+    start_time: Optional[float] = None,
+    *,
+    inherit_force_tracing=False,
+    tracer=None,
 ):
     """Starts an active opentracing span, with additional references to previous spans
 
     Args:
         operation_name: name of the operation represented by the new span
         contexts: the previous spans to inherit from
+
+        child_of: optionally override the parent span. If unset, the currently active
+           span will be the parent. (If there is no currently active span, the first
+           span in `contexts` will be the parent.)
+
+        start_time: optional override for the start time of the created span. Seconds
+            since the epoch.
+
         inherit_force_tracing: if set, and any of the previous contexts have had tracing
            forced, the new span will also have tracing forced.
+        tracer: override the opentracing tracer. By default the global tracer is used.
     """
     if opentracing is None:
         return noop_context_manager()  # type: ignore[unreachable]
 
     references = [opentracing.follows_from(context) for context in contexts]
-    scope = start_active_span(operation_name, references=references)
+    scope = start_active_span(
+        operation_name,
+        child_of=child_of,
+        references=references,
+        start_time=start_time,
+        tracer=tracer,
+    )
 
     if inherit_force_tracing and any(
         is_context_forced_tracing(ctx) for ctx in contexts
diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py
index db8ca2c049..d57e7c5324 100644
--- a/synapse/logging/scopecontextmanager.py
+++ b/synapse/logging/scopecontextmanager.py
@@ -28,8 +28,9 @@ class LogContextScopeManager(ScopeManager):
     The LogContextScopeManager tracks the active scope in opentracing
     by using the log contexts which are native to synapse. This is so
     that the basic opentracing api can be used across twisted defereds.
-    (I would love to break logcontexts and this into an OS package. but
-    let's wait for twisted's contexts to be released.)
+
+    It would be nice just to use opentracing's ContextVarsScopeManager,
+    but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301.
     """
 
     def __init__(self, config):
@@ -65,29 +66,45 @@ class LogContextScopeManager(ScopeManager):
             Scope.close() on the returned instance.
         """
 
-        enter_logcontext = False
         ctx = current_context()
 
         if not ctx:
-            # We don't want this scope to affect.
             logger.error("Tried to activate scope outside of loggingcontext")
             return Scope(None, span)  # type: ignore[arg-type]
-        elif ctx.scope is not None:
-            # We want the logging scope to look exactly the same so we give it
-            # a blank suffix
+
+        if ctx.scope is not None:
+            # start a new logging context as a child of the existing one.
+            # Doing so -- rather than updating the existing logcontext -- means that
+            # creating several concurrent spans under the same logcontext works
+            # correctly.
             ctx = nested_logging_context("")
             enter_logcontext = True
+        else:
+            # if there is no span currently associated with the current logcontext, we
+            # just store the scope in it.
+            #
+            # This feels a bit dubious, but it does hack around a problem where a
+            # span outlasts its parent logcontext (which would otherwise lead to
+            # "Re-starting finished log context" errors).
+            enter_logcontext = False
 
         scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close)
         ctx.scope = scope
+        if enter_logcontext:
+            ctx.__enter__()
+
         return scope
 
 
 class _LogContextScope(Scope):
     """
-    A custom opentracing scope. The only significant difference is that it will
-    close the log context it's related to if the logcontext was created specifically
-    for this scope.
+    A custom opentracing scope, associated with a LogContext
+
+      * filters out _DefGen_Return exceptions which arise from calling
+        `defer.returnValue` in Twisted code
+
+      * When the scope is closed, the logcontext's active scope is reset to None.
+        and - if enter_logcontext was set - the logcontext is finished too.
     """
 
     def __init__(self, manager, span, logcontext, enter_logcontext, finish_on_close):
@@ -101,8 +118,7 @@ class _LogContextScope(Scope):
             logcontext (LogContext):
                 the logcontext to which this scope is attached.
             enter_logcontext (Boolean):
-                if True the logcontext will be entered and exited when the scope
-                is entered and exited respectively
+                if True the logcontext will be exited when the scope is finished
             finish_on_close (Boolean):
                 if True finish the span when the scope is closed
         """
@@ -111,26 +127,28 @@ class _LogContextScope(Scope):
         self._finish_on_close = finish_on_close
         self._enter_logcontext = enter_logcontext
 
-    def __enter__(self):
-        if self._enter_logcontext:
-            self.logcontext.__enter__()
+    def __exit__(self, exc_type, value, traceback):
+        if exc_type == twisted.internet.defer._DefGen_Return:
+            # filter out defer.returnValue() calls
+            exc_type = value = traceback = None
+        super().__exit__(exc_type, value, traceback)
 
-        return self
-
-    def __exit__(self, type, value, traceback):
-        if type == twisted.internet.defer._DefGen_Return:
-            super().__exit__(None, None, None)
-        else:
-            super().__exit__(type, value, traceback)
-        if self._enter_logcontext:
-            self.logcontext.__exit__(type, value, traceback)
-        else:  # the logcontext existed before the creation of the scope
-            self.logcontext.scope = None
+    def __str__(self):
+        return f"Scope<{self.span}>"
 
     def close(self):
-        if self.manager.active is not self:
-            logger.error("Tried to close a non-active scope!")
-            return
+        active_scope = self.manager.active
+        if active_scope is not self:
+            logger.error(
+                "Closing scope %s which is not the currently-active one %s",
+                self,
+                active_scope,
+            )
 
         if self._finish_on_close:
             self.span.finish()
+
+        self.logcontext.scope = None
+
+        if self._enter_logcontext:
+            self.logcontext.__exit__(None, None, None)
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 9e6c1b2f3b..cca084c18c 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -30,6 +30,7 @@ from typing import (
     Type,
     TypeVar,
     Union,
+    cast,
 )
 
 import attr
@@ -60,7 +61,7 @@ all_gauges: "Dict[str, Union[LaterGauge, InFlightGauge]]" = {}
 HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
 
 
-class RegistryProxy:
+class _RegistryProxy:
     @staticmethod
     def collect() -> Iterable[Metric]:
         for metric in REGISTRY.collect():
@@ -68,6 +69,13 @@ class RegistryProxy:
                 yield metric
 
 
+# A little bit nasty, but collect() above is static so a Protocol doesn't work.
+# _RegistryProxy matches the signature of a CollectorRegistry instance enough
+# for it to be usable in the contexts in which we use it.
+# TODO Do something nicer about this.
+RegistryProxy = cast(CollectorRegistry, _RegistryProxy)
+
+
 @attr.s(slots=True, hash=True, auto_attribs=True)
 class LaterGauge:
 
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 662e60bc33..29fbc73c97 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -71,6 +71,7 @@ from synapse.handlers.account_validity import (
 from synapse.handlers.auth import (
     CHECK_3PID_AUTH_CALLBACK,
     CHECK_AUTH_CALLBACK,
+    GET_USERNAME_FOR_REGISTRATION_CALLBACK,
     ON_LOGGED_OUT_CALLBACK,
     AuthHandler,
 )
@@ -177,6 +178,7 @@ class ModuleApi:
         self._presence_stream = hs.get_event_sources().sources.presence
         self._state = hs.get_state_handler()
         self._clock: Clock = hs.get_clock()
+        self._registration_handler = hs.get_registration_handler()
         self._send_email_handler = hs.get_send_email_handler()
         self.custom_template_dir = hs.config.server.custom_template_directory
 
@@ -310,6 +312,9 @@ class ModuleApi:
         auth_checkers: Optional[
             Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK]
         ] = None,
+        get_username_for_registration: Optional[
+            GET_USERNAME_FOR_REGISTRATION_CALLBACK
+        ] = None,
     ) -> None:
         """Registers callbacks for password auth provider capabilities.
 
@@ -319,6 +324,7 @@ class ModuleApi:
             check_3pid_auth=check_3pid_auth,
             on_logged_out=on_logged_out,
             auth_checkers=auth_checkers,
+            get_username_for_registration=get_username_for_registration,
         )
 
     def register_background_update_controller_callbacks(
@@ -395,6 +401,32 @@ class ModuleApi:
         """
         return self._hs.config.email.email_app_name
 
+    @property
+    def server_name(self) -> str:
+        """The server name for the local homeserver.
+
+        Added in Synapse v1.53.0.
+        """
+        return self._server_name
+
+    @property
+    def worker_name(self) -> Optional[str]:
+        """The name of the worker this specific instance is running as per the
+        "worker_name" configuration setting, or None if it's the main process.
+
+        Added in Synapse v1.53.0.
+        """
+        return self._hs.config.worker.worker_name
+
+    @property
+    def worker_app(self) -> Optional[str]:
+        """The name of the worker app this specific instance is running as per the
+        "worker_app" configuration setting, or None if it's the main process.
+
+        Added in Synapse v1.53.0.
+        """
+        return self._hs.config.worker.worker_app
+
     async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]:
         """Get user info by user_id
 
@@ -1202,6 +1234,22 @@ class ModuleApi:
         """
         return await defer_to_thread(self._hs.get_reactor(), f, *args, **kwargs)
 
+    async def check_username(self, username: str) -> None:
+        """Checks if the provided username uses the grammar defined in the Matrix
+        specification, and is already being used by an existing user.
+
+        Added in Synapse v1.52.0.
+
+        Args:
+            username: The username to check. This is the local part of the user's full
+                Matrix user ID, i.e. it's "alice" if the full user ID is "@alice:foo.com".
+
+        Raises:
+            SynapseError with the errcode "M_USER_IN_USE" if the username is already in
+            use.
+        """
+        await self._registration_handler.check_username(username)
+
 
 class PublicRoomListManager:
     """Contains methods for adding to, removing from and querying whether a room
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 632b2245ef..5988c67d90 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -461,7 +461,9 @@ class Notifier:
                     users,
                 )
             except Exception:
-                logger.exception("Error notifying application services of event")
+                logger.exception(
+                    "Error notifying application services of ephemeral events"
+                )
 
     def on_new_replication_data(self) -> None:
         """Used to inform replication listeners that something has happened
diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
index 6211506990..910b05c0da 100644
--- a/synapse/push/baserules.py
+++ b/synapse/push/baserules.py
@@ -20,15 +20,11 @@ from typing import Any, Dict, List
 from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP
 
 
-def list_with_base_rules(
-    rawrules: List[Dict[str, Any]], use_new_defaults: bool = False
-) -> List[Dict[str, Any]]:
+def list_with_base_rules(rawrules: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
     """Combine the list of rules set by the user with the default push rules
 
     Args:
         rawrules: The rules the user has modified or set.
-        use_new_defaults: Whether to use the new experimental default rules when
-            appending or prepending default rules.
 
     Returns:
         A new list with the rules set by the user combined with the defaults.
@@ -48,9 +44,7 @@ def list_with_base_rules(
 
     ruleslist.extend(
         make_base_prepend_rules(
-            PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
-            modified_base_rules,
-            use_new_defaults,
+            PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules
         )
     )
 
@@ -61,7 +55,6 @@ def list_with_base_rules(
                     make_base_append_rules(
                         PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
                         modified_base_rules,
-                        use_new_defaults,
                     )
                 )
                 current_prio_class -= 1
@@ -70,7 +63,6 @@ def list_with_base_rules(
                         make_base_prepend_rules(
                             PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
                             modified_base_rules,
-                            use_new_defaults,
                         )
                     )
 
@@ -79,18 +71,14 @@ def list_with_base_rules(
     while current_prio_class > 0:
         ruleslist.extend(
             make_base_append_rules(
-                PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
-                modified_base_rules,
-                use_new_defaults,
+                PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules
             )
         )
         current_prio_class -= 1
         if current_prio_class > 0:
             ruleslist.extend(
                 make_base_prepend_rules(
-                    PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
-                    modified_base_rules,
-                    use_new_defaults,
+                    PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules
                 )
             )
 
@@ -98,24 +86,14 @@ def list_with_base_rules(
 
 
 def make_base_append_rules(
-    kind: str,
-    modified_base_rules: Dict[str, Dict[str, Any]],
-    use_new_defaults: bool = False,
+    kind: str, modified_base_rules: Dict[str, Dict[str, Any]]
 ) -> List[Dict[str, Any]]:
     rules = []
 
     if kind == "override":
-        rules = (
-            NEW_APPEND_OVERRIDE_RULES
-            if use_new_defaults
-            else BASE_APPEND_OVERRIDE_RULES
-        )
+        rules = BASE_APPEND_OVERRIDE_RULES
     elif kind == "underride":
-        rules = (
-            NEW_APPEND_UNDERRIDE_RULES
-            if use_new_defaults
-            else BASE_APPEND_UNDERRIDE_RULES
-        )
+        rules = BASE_APPEND_UNDERRIDE_RULES
     elif kind == "content":
         rules = BASE_APPEND_CONTENT_RULES
 
@@ -134,7 +112,6 @@ def make_base_append_rules(
 def make_base_prepend_rules(
     kind: str,
     modified_base_rules: Dict[str, Dict[str, Any]],
-    use_new_defaults: bool = False,
 ) -> List[Dict[str, Any]]:
     rules = []
 
@@ -301,135 +278,6 @@ BASE_APPEND_OVERRIDE_RULES = [
 ]
 
 
-NEW_APPEND_OVERRIDE_RULES = [
-    {
-        "rule_id": "global/override/.m.rule.encrypted",
-        "conditions": [
-            {
-                "kind": "event_match",
-                "key": "type",
-                "pattern": "m.room.encrypted",
-                "_id": "_encrypted",
-            }
-        ],
-        "actions": ["notify"],
-    },
-    {
-        "rule_id": "global/override/.m.rule.suppress_notices",
-        "conditions": [
-            {
-                "kind": "event_match",
-                "key": "type",
-                "pattern": "m.room.message",
-                "_id": "_suppress_notices_type",
-            },
-            {
-                "kind": "event_match",
-                "key": "content.msgtype",
-                "pattern": "m.notice",
-                "_id": "_suppress_notices",
-            },
-        ],
-        "actions": [],
-    },
-    {
-        "rule_id": "global/underride/.m.rule.suppress_edits",
-        "conditions": [
-            {
-                "kind": "event_match",
-                "key": "m.relates_to.m.rel_type",
-                "pattern": "m.replace",
-                "_id": "_suppress_edits",
-            }
-        ],
-        "actions": [],
-    },
-    {
-        "rule_id": "global/override/.m.rule.invite_for_me",
-        "conditions": [
-            {
-                "kind": "event_match",
-                "key": "type",
-                "pattern": "m.room.member",
-                "_id": "_member",
-            },
-            {
-                "kind": "event_match",
-                "key": "content.membership",
-                "pattern": "invite",
-                "_id": "_invite_member",
-            },
-            {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"},
-        ],
-        "actions": ["notify", {"set_tweak": "sound", "value": "default"}],
-    },
-    {
-        "rule_id": "global/override/.m.rule.contains_display_name",
-        "conditions": [{"kind": "contains_display_name"}],
-        "actions": [
-            "notify",
-            {"set_tweak": "sound", "value": "default"},
-            {"set_tweak": "highlight"},
-        ],
-    },
-    {
-        "rule_id": "global/override/.m.rule.tombstone",
-        "conditions": [
-            {
-                "kind": "event_match",
-                "key": "type",
-                "pattern": "m.room.tombstone",
-                "_id": "_tombstone",
-            },
-            {
-                "kind": "event_match",
-                "key": "state_key",
-                "pattern": "",
-                "_id": "_tombstone_statekey",
-            },
-        ],
-        "actions": [
-            "notify",
-            {"set_tweak": "sound", "value": "default"},
-            {"set_tweak": "highlight"},
-        ],
-    },
-    {
-        "rule_id": "global/override/.m.rule.roomnotif",
-        "conditions": [
-            {
-                "kind": "event_match",
-                "key": "content.body",
-                "pattern": "@room",
-                "_id": "_roomnotif_content",
-            },
-            {
-                "kind": "sender_notification_permission",
-                "key": "room",
-                "_id": "_roomnotif_pl",
-            },
-        ],
-        "actions": [
-            "notify",
-            {"set_tweak": "highlight"},
-            {"set_tweak": "sound", "value": "default"},
-        ],
-    },
-    {
-        "rule_id": "global/override/.m.rule.call",
-        "conditions": [
-            {
-                "kind": "event_match",
-                "key": "type",
-                "pattern": "m.call.invite",
-                "_id": "_call",
-            }
-        ],
-        "actions": ["notify", {"set_tweak": "sound", "value": "ring"}],
-    },
-]
-
-
 BASE_APPEND_UNDERRIDE_RULES = [
     {
         "rule_id": "global/underride/.m.rule.call",
@@ -538,36 +386,6 @@ BASE_APPEND_UNDERRIDE_RULES = [
 ]
 
 
-NEW_APPEND_UNDERRIDE_RULES = [
-    {
-        "rule_id": "global/underride/.m.rule.room_one_to_one",
-        "conditions": [
-            {"kind": "room_member_count", "is": "2", "_id": "member_count"},
-            {
-                "kind": "event_match",
-                "key": "content.body",
-                "pattern": "*",
-                "_id": "body",
-            },
-        ],
-        "actions": ["notify", {"set_tweak": "sound", "value": "default"}],
-    },
-    {
-        "rule_id": "global/underride/.m.rule.message",
-        "conditions": [
-            {
-                "kind": "event_match",
-                "key": "content.body",
-                "pattern": "*",
-                "_id": "body",
-            },
-        ],
-        "actions": ["notify"],
-        "enabled": False,
-    },
-]
-
-
 BASE_RULE_IDS = set()
 
 for r in BASE_APPEND_CONTENT_RULES:
@@ -589,26 +407,3 @@ for r in BASE_APPEND_UNDERRIDE_RULES:
     r["priority_class"] = PRIORITY_CLASS_MAP["underride"]
     r["default"] = True
     BASE_RULE_IDS.add(r["rule_id"])
-
-
-NEW_RULE_IDS = set()
-
-for r in BASE_APPEND_CONTENT_RULES:
-    r["priority_class"] = PRIORITY_CLASS_MAP["content"]
-    r["default"] = True
-    NEW_RULE_IDS.add(r["rule_id"])
-
-for r in BASE_PREPEND_OVERRIDE_RULES:
-    r["priority_class"] = PRIORITY_CLASS_MAP["override"]
-    r["default"] = True
-    NEW_RULE_IDS.add(r["rule_id"])
-
-for r in NEW_APPEND_OVERRIDE_RULES:
-    r["priority_class"] = PRIORITY_CLASS_MAP["override"]
-    r["default"] = True
-    NEW_RULE_IDS.add(r["rule_id"])
-
-for r in NEW_APPEND_UNDERRIDE_RULES:
-    r["priority_class"] = PRIORITY_CLASS_MAP["underride"]
-    r["default"] = True
-    NEW_RULE_IDS.add(r["rule_id"])
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index dadfc57413..3df8452eec 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -455,7 +455,7 @@ class Mailer:
         }
 
         the_events = await filter_events_for_client(
-            self.storage, user_id, results["events_before"]
+            self.storage, user_id, results.events_before
         )
         the_events.append(notif_event)
 
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index d844fbb3b3..22b4606ae0 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -70,7 +70,7 @@ REQUIREMENTS = [
     "pyasn1>=0.1.9",
     "pyasn1-modules>=0.0.7",
     "bcrypt>=3.1.0",
-    "pillow>=4.3.0",
+    "pillow>=5.4.0",
     "sortedcontainers>=1.4.4",
     "pymacaroons>=0.13.0",
     "msgpack>=0.5.2",
@@ -107,7 +107,7 @@ CONDITIONAL_REQUIREMENTS = {
     # `systemd.journal.JournalHandler`, as is documented in
     # `contrib/systemd/log_config.yaml`.
     "systemd": ["systemd-python>=231"],
-    "url_preview": ["lxml>=3.5.0"],
+    "url_preview": ["lxml>=4.2.0"],
     "sentry": ["sentry-sdk>=0.7.2"],
     "opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"],
     "jwt": ["pyjwt>=1.6.4"],
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index 0f08372694..a72dad7464 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -52,8 +52,8 @@ class SlavedEventStore(
     EventPushActionsWorkerStore,
     StreamWorkerStore,
     StateGroupWorkerStore,
-    EventsWorkerStore,
     SignatureWorkerStore,
+    EventsWorkerStore,
     UserErasureWorkerStore,
     RelationsWorkerStore,
     BaseSlavedStore,
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 465e06772b..9be9e33c8e 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -41,7 +41,9 @@ from synapse.rest.admin.event_reports import (
     EventReportsRestServlet,
 )
 from synapse.rest.admin.federation import (
-    DestinationsRestServlet,
+    DestinationMembershipRestServlet,
+    DestinationResetConnectionRestServlet,
+    DestinationRestServlet,
     ListDestinationsRestServlet,
 )
 from synapse.rest.admin.groups import DeleteGroupAdminRestServlet
@@ -267,7 +269,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
     ListRegistrationTokensRestServlet(hs).register(http_server)
     NewRegistrationTokenRestServlet(hs).register(http_server)
     RegistrationTokenRestServlet(hs).register(http_server)
-    DestinationsRestServlet(hs).register(http_server)
+    DestinationMembershipRestServlet(hs).register(http_server)
+    DestinationResetConnectionRestServlet(hs).register(http_server)
+    DestinationRestServlet(hs).register(http_server)
     ListDestinationsRestServlet(hs).register(http_server)
 
     # Some servlets only get registered for the main process.
diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py
index 8cd3fa189e..d162e0081e 100644
--- a/synapse/rest/admin/federation.py
+++ b/synapse/rest/admin/federation.py
@@ -16,6 +16,7 @@ from http import HTTPStatus
 from typing import TYPE_CHECKING, Tuple
 
 from synapse.api.errors import Codes, NotFoundError, SynapseError
+from synapse.federation.transport.server import Authenticator
 from synapse.http.servlet import RestServlet, parse_integer, parse_string
 from synapse.http.site import SynapseRequest
 from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
@@ -90,7 +91,7 @@ class ListDestinationsRestServlet(RestServlet):
         return HTTPStatus.OK, response
 
 
-class DestinationsRestServlet(RestServlet):
+class DestinationRestServlet(RestServlet):
     """Get details of a destination.
     This needs user to have administrator access in Synapse.
 
@@ -145,3 +146,100 @@ class DestinationsRestServlet(RestServlet):
             }
 
         return HTTPStatus.OK, response
+
+
+class DestinationMembershipRestServlet(RestServlet):
+    """Get list of rooms of a destination.
+    This needs user to have administrator access in Synapse.
+
+    GET /_synapse/admin/v1/federation/destinations/<destination>/rooms?from=0&limit=10
+
+    returns:
+        200 OK with a list of rooms if success otherwise an error.
+
+    The parameters `from` and `limit` are required only for pagination.
+    By default, a `limit` of 100 is used.
+    """
+
+    PATTERNS = admin_patterns("/federation/destinations/(?P<destination>[^/]*)/rooms$")
+
+    def __init__(self, hs: "HomeServer"):
+        self._auth = hs.get_auth()
+        self._store = hs.get_datastore()
+
+    async def on_GET(
+        self, request: SynapseRequest, destination: str
+    ) -> Tuple[int, JsonDict]:
+        await assert_requester_is_admin(self._auth, request)
+
+        if not await self._store.is_destination_known(destination):
+            raise NotFoundError("Unknown destination")
+
+        start = parse_integer(request, "from", default=0)
+        limit = parse_integer(request, "limit", default=100)
+
+        if start < 0:
+            raise SynapseError(
+                HTTPStatus.BAD_REQUEST,
+                "Query parameter from must be a string representing a positive integer.",
+                errcode=Codes.INVALID_PARAM,
+            )
+
+        if limit < 0:
+            raise SynapseError(
+                HTTPStatus.BAD_REQUEST,
+                "Query parameter limit must be a string representing a positive integer.",
+                errcode=Codes.INVALID_PARAM,
+            )
+
+        direction = parse_string(request, "dir", default="f", allowed_values=("f", "b"))
+
+        rooms, total = await self._store.get_destination_rooms_paginate(
+            destination, start, limit, direction
+        )
+        response = {"rooms": rooms, "total": total}
+        if (start + limit) < total:
+            response["next_token"] = str(start + len(rooms))
+
+        return HTTPStatus.OK, response
+
+
+class DestinationResetConnectionRestServlet(RestServlet):
+    """Reset destinations' connection timeouts and wake it up.
+    This needs user to have administrator access in Synapse.
+
+    POST /_synapse/admin/v1/federation/destinations/<destination>/reset_connection
+    {}
+
+    returns:
+        200 OK otherwise an error.
+    """
+
+    PATTERNS = admin_patterns(
+        "/federation/destinations/(?P<destination>[^/]+)/reset_connection$"
+    )
+
+    def __init__(self, hs: "HomeServer"):
+        self._auth = hs.get_auth()
+        self._store = hs.get_datastore()
+        self._authenticator = Authenticator(hs)
+
+    async def on_POST(
+        self, request: SynapseRequest, destination: str
+    ) -> Tuple[int, JsonDict]:
+        await assert_requester_is_admin(self._auth, request)
+
+        if not await self._store.is_destination_known(destination):
+            raise NotFoundError("Unknown destination")
+
+        retry_timings = await self._store.get_destination_retry_timings(destination)
+        if not (retry_timings and retry_timings.retry_last_ts):
+            raise SynapseError(
+                HTTPStatus.BAD_REQUEST,
+                "The retry timing does not need to be reset for this destination.",
+            )
+
+        # reset timings and wake up
+        await self._authenticator.reset_retry_timings(destination)
+
+        return HTTPStatus.OK, {}
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index efe25fe7eb..5b706efbcf 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -729,7 +729,7 @@ class RoomEventContextServlet(RestServlet):
         else:
             event_filter = None
 
-        results = await self.room_context_handler.get_event_context(
+        event_context = await self.room_context_handler.get_event_context(
             requester,
             room_id,
             event_id,
@@ -738,25 +738,34 @@ class RoomEventContextServlet(RestServlet):
             use_admin_priviledge=True,
         )
 
-        if not results:
+        if not event_context:
             raise SynapseError(
                 HTTPStatus.NOT_FOUND, "Event not found.", errcode=Codes.NOT_FOUND
             )
 
         time_now = self.clock.time_msec()
-        aggregations = results.pop("aggregations", None)
-        results["events_before"] = self._event_serializer.serialize_events(
-            results["events_before"], time_now, bundle_aggregations=aggregations
-        )
-        results["event"] = self._event_serializer.serialize_event(
-            results["event"], time_now, bundle_aggregations=aggregations
-        )
-        results["events_after"] = self._event_serializer.serialize_events(
-            results["events_after"], time_now, bundle_aggregations=aggregations
-        )
-        results["state"] = self._event_serializer.serialize_events(
-            results["state"], time_now
-        )
+        results = {
+            "events_before": self._event_serializer.serialize_events(
+                event_context.events_before,
+                time_now,
+                bundle_aggregations=event_context.aggregations,
+            ),
+            "event": self._event_serializer.serialize_event(
+                event_context.event,
+                time_now,
+                bundle_aggregations=event_context.aggregations,
+            ),
+            "events_after": self._event_serializer.serialize_events(
+                event_context.events_after,
+                time_now,
+                bundle_aggregations=event_context.aggregations,
+            ),
+            "state": self._event_serializer.serialize_events(
+                event_context.state, time_now
+            ),
+            "start": event_context.start,
+            "end": event_context.end,
+        }
 
         return HTTPStatus.OK, results
 
diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py
index d1badbdf3b..58b8adbd32 100644
--- a/synapse/rest/client/account_data.py
+++ b/synapse/rest/client/account_data.py
@@ -66,7 +66,7 @@ class AccountDataServlet(RestServlet):
             raise AuthError(403, "Cannot get account data for other users.")
 
         event = await self.store.get_global_account_data_by_type_for_user(
-            account_data_type, user_id
+            user_id, account_data_type
         )
 
         if event is None:
diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py
index 6f796d5e50..8fe75bd750 100644
--- a/synapse/rest/client/push_rule.py
+++ b/synapse/rest/client/push_rule.py
@@ -29,7 +29,7 @@ from synapse.http.servlet import (
     parse_string,
 )
 from synapse.http.site import SynapseRequest
-from synapse.push.baserules import BASE_RULE_IDS, NEW_RULE_IDS
+from synapse.push.baserules import BASE_RULE_IDS
 from synapse.push.clientformat import format_push_rules_for_user
 from synapse.push.rulekinds import PRIORITY_CLASS_MAP
 from synapse.rest.client._base import client_patterns
@@ -61,10 +61,6 @@ class PushRuleRestServlet(RestServlet):
         self.notifier = hs.get_notifier()
         self._is_worker = hs.config.worker.worker_app is not None
 
-        self._users_new_default_push_rules = (
-            hs.config.server.users_new_default_push_rules
-        )
-
     async def on_PUT(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]:
         if self._is_worker:
             raise Exception("Cannot handle PUT /push_rules on worker")
@@ -217,12 +213,7 @@ class PushRuleRestServlet(RestServlet):
             rule_id = spec.rule_id
             is_default_rule = rule_id.startswith(".")
             if is_default_rule:
-                if user_id in self._users_new_default_push_rules:
-                    rule_ids = NEW_RULE_IDS
-                else:
-                    rule_ids = BASE_RULE_IDS
-
-                if namespaced_rule_id not in rule_ids:
+                if namespaced_rule_id not in BASE_RULE_IDS:
                     raise SynapseError(404, "Unknown rule %r" % (namespaced_rule_id,))
             await self.store.set_push_rule_actions(
                 user_id, namespaced_rule_id, actions, is_default_rule
diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py
index 8b56c76aed..e3492f9f93 100644
--- a/synapse/rest/client/register.py
+++ b/synapse/rest/client/register.py
@@ -339,12 +339,19 @@ class UsernameAvailabilityRestServlet(RestServlet):
             ),
         )
 
+        self.inhibit_user_in_use_error = (
+            hs.config.registration.inhibit_user_in_use_error
+        )
+
     async def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
         if not self.hs.config.registration.enable_registration:
             raise SynapseError(
                 403, "Registration has been disabled", errcode=Codes.FORBIDDEN
             )
 
+        if self.inhibit_user_in_use_error:
+            return 200, {"available": True}
+
         ip = request.getClientIP()
         with self.ratelimiter.ratelimit(ip) as wait_deferred:
             await wait_deferred
@@ -418,10 +425,14 @@ class RegisterRestServlet(RestServlet):
         self.ratelimiter = hs.get_registration_ratelimiter()
         self.password_policy_handler = hs.get_password_policy_handler()
         self.clock = hs.get_clock()
+        self.password_auth_provider = hs.get_password_auth_provider()
         self._registration_enabled = self.hs.config.registration.enable_registration
         self._refresh_tokens_enabled = (
             hs.config.registration.refreshable_access_token_lifetime is not None
         )
+        self._inhibit_user_in_use_error = (
+            hs.config.registration.inhibit_user_in_use_error
+        )
 
         self._registration_flows = _calculate_registration_flows(
             hs.config, self.auth_handler
@@ -564,6 +575,7 @@ class RegisterRestServlet(RestServlet):
                 desired_username,
                 guest_access_token=guest_access_token,
                 assigned_user_id=registered_user_id,
+                inhibit_user_in_use_error=self._inhibit_user_in_use_error,
             )
 
         # Check if the user-interactive authentication flows are complete, if
@@ -627,7 +639,16 @@ class RegisterRestServlet(RestServlet):
             if not password_hash:
                 raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM)
 
-            desired_username = params.get("username", None)
+            desired_username = await (
+                self.password_auth_provider.get_username_for_registration(
+                    auth_result,
+                    params,
+                )
+            )
+
+            if desired_username is None:
+                desired_username = params.get("username", None)
+
             guest_access_token = params.get("guest_access_token", None)
 
             if desired_username is not None:
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 90bb9142a0..90355e44b2 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -706,27 +706,36 @@ class RoomEventContextServlet(RestServlet):
         else:
             event_filter = None
 
-        results = await self.room_context_handler.get_event_context(
+        event_context = await self.room_context_handler.get_event_context(
             requester, room_id, event_id, limit, event_filter
         )
 
-        if not results:
+        if not event_context:
             raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
 
         time_now = self.clock.time_msec()
-        aggregations = results.pop("aggregations", None)
-        results["events_before"] = self._event_serializer.serialize_events(
-            results["events_before"], time_now, bundle_aggregations=aggregations
-        )
-        results["event"] = self._event_serializer.serialize_event(
-            results["event"], time_now, bundle_aggregations=aggregations
-        )
-        results["events_after"] = self._event_serializer.serialize_events(
-            results["events_after"], time_now, bundle_aggregations=aggregations
-        )
-        results["state"] = self._event_serializer.serialize_events(
-            results["state"], time_now
-        )
+        results = {
+            "events_before": self._event_serializer.serialize_events(
+                event_context.events_before,
+                time_now,
+                bundle_aggregations=event_context.aggregations,
+            ),
+            "event": self._event_serializer.serialize_event(
+                event_context.event,
+                time_now,
+                bundle_aggregations=event_context.aggregations,
+            ),
+            "events_after": self._event_serializer.serialize_events(
+                event_context.events_after,
+                time_now,
+                bundle_aggregations=event_context.aggregations,
+            ),
+            "state": self._event_serializer.serialize_events(
+                event_context.state, time_now
+            ),
+            "start": event_context.start,
+            "end": event_context.end,
+        }
 
         return 200, results
 
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index d20ae1421e..f9615da525 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -48,6 +48,7 @@ from synapse.http.server import HttpServer
 from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
 from synapse.http.site import SynapseRequest
 from synapse.logging.opentracing import trace
+from synapse.storage.databases.main.relations import BundledAggregations
 from synapse.types import JsonDict, StreamToken
 from synapse.util import json_decoder
 
@@ -526,7 +527,7 @@ class SyncRestServlet(RestServlet):
 
         def serialize(
             events: Iterable[EventBase],
-            aggregations: Optional[Dict[str, Dict[str, Any]]] = None,
+            aggregations: Optional[Dict[str, BundledAggregations]] = None,
         ) -> List[JsonDict]:
             return self._event_serializer.serialize_events(
                 events,
diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py
index 30b067dd42..872a9e72e8 100644
--- a/synapse/rest/media/v1/preview_html.py
+++ b/synapse/rest/media/v1/preview_html.py
@@ -321,14 +321,33 @@ def _iterate_over_text(
 
 
 def rebase_url(url: str, base: str) -> str:
-    base_parts = list(urlparse.urlparse(base))
+    """
+    Resolves a potentially relative `url` against an absolute `base` URL.
+
+    For example:
+
+        >>> rebase_url("subpage", "https://example.com/foo/")
+        'https://example.com/foo/subpage'
+        >>> rebase_url("sibling", "https://example.com/foo")
+        'https://example.com/sibling'
+        >>> rebase_url("/bar", "https://example.com/foo/")
+        'https://example.com/bar'
+        >>> rebase_url("https://alice.com/a/", "https://example.com/foo/")
+        'https://alice.com/a'
+    """
+    base_parts = urlparse.urlparse(base)
+    # Convert the parsed URL to a list for (potential) modification.
     url_parts = list(urlparse.urlparse(url))
-    if not url_parts[0]:  # fix up schema
-        url_parts[0] = base_parts[0] or "http"
-    if not url_parts[1]:  # fix up hostname
-        url_parts[1] = base_parts[1]
+    # Add a scheme, if one does not exist.
+    if not url_parts[0]:
+        url_parts[0] = base_parts.scheme or "http"
+    # Fix up the hostname, if this is not a data URL.
+    if url_parts[0] != "data" and not url_parts[1]:
+        url_parts[1] = base_parts.netloc
+        # If the path does not start with a /, nest it under the base path's last
+        # directory.
         if not url_parts[2].startswith("/"):
-            url_parts[2] = re.sub(r"/[^/]+$", "/", base_parts[2]) + url_parts[2]
+            url_parts[2] = re.sub(r"/[^/]+$", "/", base_parts.path) + url_parts[2]
     return urlparse.urlunparse(url_parts)
 
 
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index e8881bc870..efd84ced8f 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -21,8 +21,9 @@ import re
 import shutil
 import sys
 import traceback
-from typing import TYPE_CHECKING, Iterable, Optional, Tuple
+from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple
 from urllib import parse as urlparse
+from urllib.request import urlopen
 
 import attr
 
@@ -71,6 +72,17 @@ IMAGE_CACHE_EXPIRY_MS = 2 * ONE_DAY
 
 
 @attr.s(slots=True, frozen=True, auto_attribs=True)
+class DownloadResult:
+    length: int
+    uri: str
+    response_code: int
+    media_type: str
+    download_name: Optional[str]
+    expires: int
+    etag: Optional[str]
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
 class MediaInfo:
     """
     Information parsed from downloading media being previewed.
@@ -256,7 +268,7 @@ class PreviewUrlResource(DirectServeJsonResource):
         if oembed_url:
             url_to_download = oembed_url
 
-        media_info = await self._download_url(url_to_download, user)
+        media_info = await self._handle_url(url_to_download, user)
 
         logger.debug("got media_info of '%s'", media_info)
 
@@ -297,7 +309,9 @@ class PreviewUrlResource(DirectServeJsonResource):
                 oembed_url = self._oembed.autodiscover_from_html(tree)
                 og_from_oembed: JsonDict = {}
                 if oembed_url:
-                    oembed_info = await self._download_url(oembed_url, user)
+                    oembed_info = await self._handle_url(
+                        oembed_url, user, allow_data_urls=True
+                    )
                     (
                         og_from_oembed,
                         author_name,
@@ -367,7 +381,135 @@ class PreviewUrlResource(DirectServeJsonResource):
 
         return jsonog.encode("utf8")
 
-    async def _download_url(self, url: str, user: UserID) -> MediaInfo:
+    async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResult:
+        """
+        Fetches a remote URL and parses the headers.
+
+        Args:
+             url: The URL to fetch.
+             output_stream: The stream to write the content to.
+
+        Returns:
+            A tuple of:
+                Media length, URL downloaded, the HTTP response code,
+                the media type, the downloaded file name, the number of
+                milliseconds the result is valid for, the etag header.
+        """
+
+        try:
+            logger.debug("Trying to get preview for url '%s'", url)
+            length, headers, uri, code = await self.client.get_file(
+                url,
+                output_stream=output_stream,
+                max_size=self.max_spider_size,
+                headers={"Accept-Language": self.url_preview_accept_language},
+            )
+        except SynapseError:
+            # Pass SynapseErrors through directly, so that the servlet
+            # handler will return a SynapseError to the client instead of
+            # blank data or a 500.
+            raise
+        except DNSLookupError:
+            # DNS lookup returned no results
+            # Note: This will also be the case if one of the resolved IP
+            # addresses is blacklisted
+            raise SynapseError(
+                502,
+                "DNS resolution failure during URL preview generation",
+                Codes.UNKNOWN,
+            )
+        except Exception as e:
+            # FIXME: pass through 404s and other error messages nicely
+            logger.warning("Error downloading %s: %r", url, e)
+
+            raise SynapseError(
+                500,
+                "Failed to download content: %s"
+                % (traceback.format_exception_only(sys.exc_info()[0], e),),
+                Codes.UNKNOWN,
+            )
+
+        if b"Content-Type" in headers:
+            media_type = headers[b"Content-Type"][0].decode("ascii")
+        else:
+            media_type = "application/octet-stream"
+
+        download_name = get_filename_from_headers(headers)
+
+        # FIXME: we should calculate a proper expiration based on the
+        # Cache-Control and Expire headers.  But for now, assume 1 hour.
+        expires = ONE_HOUR
+        etag = headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None
+
+        return DownloadResult(
+            length, uri, code, media_type, download_name, expires, etag
+        )
+
+    async def _parse_data_url(
+        self, url: str, output_stream: BinaryIO
+    ) -> DownloadResult:
+        """
+        Parses a data: URL.
+
+        Args:
+             url: The URL to parse.
+             output_stream: The stream to write the content to.
+
+        Returns:
+            A tuple of:
+                Media length, URL downloaded, the HTTP response code,
+                the media type, the downloaded file name, the number of
+                milliseconds the result is valid for, the etag header.
+        """
+
+        try:
+            logger.debug("Trying to parse data url '%s'", url)
+            with urlopen(url) as url_info:
+                # TODO Can this be more efficient.
+                output_stream.write(url_info.read())
+        except Exception as e:
+            logger.warning("Error parsing data: URL %s: %r", url, e)
+
+            raise SynapseError(
+                500,
+                "Failed to parse data URL: %s"
+                % (traceback.format_exception_only(sys.exc_info()[0], e),),
+                Codes.UNKNOWN,
+            )
+
+        return DownloadResult(
+            # Read back the length that has been written.
+            length=output_stream.tell(),
+            uri=url,
+            # If it was parsed, consider this a 200 OK.
+            response_code=200,
+            # urlopen shoves the media-type from the data URL into the content type
+            # header object.
+            media_type=url_info.headers.get_content_type(),
+            # Some features are not supported by data: URLs.
+            download_name=None,
+            expires=ONE_HOUR,
+            etag=None,
+        )
+
+    async def _handle_url(
+        self, url: str, user: UserID, allow_data_urls: bool = False
+    ) -> MediaInfo:
+        """
+        Fetches content from a URL and parses the result to generate a MediaInfo.
+
+        It uses the media storage provider to persist the fetched content and
+        stores the mapping into the database.
+
+        Args:
+             url: The URL to fetch.
+             user: The user who ahs requested this URL.
+             allow_data_urls: True if data URLs should be allowed.
+
+        Returns:
+            A MediaInfo object describing the fetched content.
+        """
+
         # TODO: we should probably honour robots.txt... except in practice
         # we're most likely being explicitly triggered by a human rather than a
         # bot, so are we really a robot?
@@ -377,61 +519,27 @@ class PreviewUrlResource(DirectServeJsonResource):
         file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True)
 
         with self.media_storage.store_into_file(file_info) as (f, fname, finish):
-            try:
-                logger.debug("Trying to get preview for url '%s'", url)
-                length, headers, uri, code = await self.client.get_file(
-                    url,
-                    output_stream=f,
-                    max_size=self.max_spider_size,
-                    headers={"Accept-Language": self.url_preview_accept_language},
-                )
-            except SynapseError:
-                # Pass SynapseErrors through directly, so that the servlet
-                # handler will return a SynapseError to the client instead of
-                # blank data or a 500.
-                raise
-            except DNSLookupError:
-                # DNS lookup returned no results
-                # Note: This will also be the case if one of the resolved IP
-                # addresses is blacklisted
-                raise SynapseError(
-                    502,
-                    "DNS resolution failure during URL preview generation",
-                    Codes.UNKNOWN,
-                )
-            except Exception as e:
-                # FIXME: pass through 404s and other error messages nicely
-                logger.warning("Error downloading %s: %r", url, e)
-
-                raise SynapseError(
-                    500,
-                    "Failed to download content: %s"
-                    % (traceback.format_exception_only(sys.exc_info()[0], e),),
-                    Codes.UNKNOWN,
-                )
-            await finish()
+            if url.startswith("data:"):
+                if not allow_data_urls:
+                    raise SynapseError(
+                        500, "Previewing of data: URLs is forbidden", Codes.UNKNOWN
+                    )
 
-            if b"Content-Type" in headers:
-                media_type = headers[b"Content-Type"][0].decode("ascii")
+                download_result = await self._parse_data_url(url, f)
             else:
-                media_type = "application/octet-stream"
+                download_result = await self._download_url(url, f)
 
-            download_name = get_filename_from_headers(headers)
-
-            # FIXME: we should calculate a proper expiration based on the
-            # Cache-Control and Expire headers.  But for now, assume 1 hour.
-            expires = ONE_HOUR
-            etag = headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None
+            await finish()
 
         try:
             time_now_ms = self.clock.time_msec()
 
             await self.store.store_local_media(
                 media_id=file_id,
-                media_type=media_type,
+                media_type=download_result.media_type,
                 time_now_ms=time_now_ms,
-                upload_name=download_name,
-                media_length=length,
+                upload_name=download_result.download_name,
+                media_length=download_result.length,
                 user_id=user,
                 url_cache=url,
             )
@@ -444,16 +552,16 @@ class PreviewUrlResource(DirectServeJsonResource):
             raise
 
         return MediaInfo(
-            media_type=media_type,
-            media_length=length,
-            download_name=download_name,
+            media_type=download_result.media_type,
+            media_length=download_result.length,
+            download_name=download_result.download_name,
             created_ts_ms=time_now_ms,
             filesystem_id=file_id,
             filename=fname,
-            uri=uri,
-            response_code=code,
-            expires=expires,
-            etag=etag,
+            uri=download_result.uri,
+            response_code=download_result.response_code,
+            expires=download_result.expires,
+            etag=download_result.etag,
         )
 
     async def _precache_image_url(
@@ -474,8 +582,8 @@ class PreviewUrlResource(DirectServeJsonResource):
         # FIXME: it might be cleaner to use the same flow as the main /preview_url
         # request itself and benefit from the same caching etc.  But for now we
         # just rely on the caching on the master request to speed things up.
-        image_info = await self._download_url(
-            rebase_url(og["og:image"], media_info.uri), user
+        image_info = await self._handle_url(
+            rebase_url(og["og:image"], media_info.uri), user, allow_data_urls=True
         )
 
         if _is_media(image_info.media_type):
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 7967011afd..8df80664a2 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -57,7 +57,7 @@ class SQLBaseStore(metaclass=ABCMeta):
         pass
 
     def _invalidate_state_caches(
-        self, room_id: str, members_changed: Iterable[str]
+        self, room_id: str, members_changed: Collection[str]
     ) -> None:
         """Invalidates caches that are based on the current state, but does
         not stream invalidations down replication.
@@ -66,11 +66,16 @@ class SQLBaseStore(metaclass=ABCMeta):
             room_id: Room where state changed
             members_changed: The user_ids of members that have changed
         """
+        # If there were any membership changes, purge the appropriate caches.
         for host in {get_domain_from_id(u) for u in members_changed}:
             self._attempt_to_invalidate_cache("is_host_joined", (room_id, host))
+        if members_changed:
+            self._attempt_to_invalidate_cache("get_users_in_room", (room_id,))
+            self._attempt_to_invalidate_cache(
+                "get_users_in_room_with_profiles", (room_id,)
+            )
 
-        self._attempt_to_invalidate_cache("get_users_in_room", (room_id,))
-        self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,))
+        # Purge other caches based on room state.
         self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
         self._attempt_to_invalidate_cache("get_current_state_ids", (room_id,))
 
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 57cc1d76e0..99802228c9 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -702,6 +702,7 @@ class DatabasePool:
         func: Callable[..., R],
         *args: Any,
         db_autocommit: bool = False,
+        isolation_level: Optional[int] = None,
         **kwargs: Any,
     ) -> R:
         """Starts a transaction on the database and runs a given function
@@ -724,6 +725,7 @@ class DatabasePool:
                 called multiple times if the transaction is retried, so must
                 correctly handle that case.
 
+            isolation_level: Set the server isolation level for this transaction.
             args: positional args to pass to `func`
             kwargs: named args to pass to `func`
 
@@ -746,6 +748,7 @@ class DatabasePool:
                     func,
                     *args,
                     db_autocommit=db_autocommit,
+                    isolation_level=isolation_level,
                     **kwargs,
                 )
 
@@ -763,6 +766,7 @@ class DatabasePool:
         func: Callable[..., R],
         *args: Any,
         db_autocommit: bool = False,
+        isolation_level: Optional[int] = None,
         **kwargs: Any,
     ) -> R:
         """Wraps the .runWithConnection() method on the underlying db_pool.
@@ -775,6 +779,7 @@ class DatabasePool:
             db_autocommit: Whether to run the function in "autocommit" mode,
                 i.e. outside of a transaction. This is useful for transaction
                 that are only a single query. Currently only affects postgres.
+            isolation_level: Set the server isolation level for this transaction.
             kwargs: named args to pass to `func`
 
         Returns:
@@ -834,6 +839,10 @@ class DatabasePool:
                     try:
                         if db_autocommit:
                             self.engine.attempt_to_set_autocommit(conn, True)
+                        if isolation_level is not None:
+                            self.engine.attempt_to_set_isolation_level(
+                                conn, isolation_level
+                            )
 
                         db_conn = LoggingDatabaseConnection(
                             conn, self.engine, "runWithConnection"
@@ -842,6 +851,8 @@ class DatabasePool:
                     finally:
                         if db_autocommit:
                             self.engine.attempt_to_set_autocommit(conn, False)
+                        if isolation_level:
+                            self.engine.attempt_to_set_isolation_level(conn, None)
 
         return await make_deferred_yieldable(
             self._db_pool.runWithConnection(inner_func, *args, **kwargs)
diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py
index ef475e18c7..52146aacc8 100644
--- a/synapse/storage/databases/main/account_data.py
+++ b/synapse/storage/databases/main/account_data.py
@@ -26,6 +26,7 @@ from synapse.storage.database import (
     LoggingTransaction,
 )
 from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
+from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.util.id_generators import (
     AbstractStreamIdGenerator,
@@ -44,7 +45,7 @@ if TYPE_CHECKING:
 logger = logging.getLogger(__name__)
 
 
-class AccountDataWorkerStore(CacheInvalidationWorkerStore):
+class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore):
     def __init__(
         self,
         database: DatabasePool,
@@ -105,6 +106,11 @@ class AccountDataWorkerStore(CacheInvalidationWorkerStore):
             "AccountDataAndTagsChangeCache", account_max
         )
 
+        self.db_pool.updates.register_background_update_handler(
+            "delete_account_data_for_deactivated_users",
+            self._delete_account_data_for_deactivated_users,
+        )
+
     def get_max_account_data_stream_id(self) -> int:
         """Get the current max stream ID for account data stream
 
@@ -158,9 +164,9 @@ class AccountDataWorkerStore(CacheInvalidationWorkerStore):
             "get_account_data_for_user", get_account_data_for_user_txn
         )
 
-    @cached(num_args=2, max_entries=5000)
+    @cached(num_args=2, max_entries=5000, tree=True)
     async def get_global_account_data_by_type_for_user(
-        self, data_type: str, user_id: str
+        self, user_id: str, data_type: str
     ) -> Optional[JsonDict]:
         """
         Returns:
@@ -179,7 +185,7 @@ class AccountDataWorkerStore(CacheInvalidationWorkerStore):
         else:
             return None
 
-    @cached(num_args=2)
+    @cached(num_args=2, tree=True)
     async def get_account_data_for_room(
         self, user_id: str, room_id: str
     ) -> Dict[str, JsonDict]:
@@ -210,7 +216,7 @@ class AccountDataWorkerStore(CacheInvalidationWorkerStore):
             "get_account_data_for_room", get_account_data_for_room_txn
         )
 
-    @cached(num_args=3, max_entries=5000)
+    @cached(num_args=3, max_entries=5000, tree=True)
     async def get_account_data_for_room_and_type(
         self, user_id: str, room_id: str, account_data_type: str
     ) -> Optional[JsonDict]:
@@ -392,7 +398,7 @@ class AccountDataWorkerStore(CacheInvalidationWorkerStore):
             for row in rows:
                 if not row.room_id:
                     self.get_global_account_data_by_type_for_user.invalidate(
-                        (row.data_type, row.user_id)
+                        (row.user_id, row.data_type)
                     )
                 self.get_account_data_for_user.invalidate((row.user_id,))
                 self.get_account_data_for_room.invalidate((row.user_id, row.room_id))
@@ -476,7 +482,7 @@ class AccountDataWorkerStore(CacheInvalidationWorkerStore):
             self._account_data_stream_cache.entity_has_changed(user_id, next_id)
             self.get_account_data_for_user.invalidate((user_id,))
             self.get_global_account_data_by_type_for_user.invalidate(
-                (account_data_type, user_id)
+                (user_id, account_data_type)
             )
 
         return self._account_data_id_gen.get_current_token()
@@ -546,6 +552,123 @@ class AccountDataWorkerStore(CacheInvalidationWorkerStore):
         for ignored_user_id in previously_ignored_users ^ currently_ignored_users:
             self._invalidate_cache_and_stream(txn, self.ignored_by, (ignored_user_id,))
 
+    async def purge_account_data_for_user(self, user_id: str) -> None:
+        """
+        Removes ALL the account data for a user.
+        Intended to be used upon user deactivation.
+
+        Also purges the user from the ignored_users cache table
+        and the push_rules cache tables.
+        """
+
+        await self.db_pool.runInteraction(
+            "purge_account_data_for_user_txn",
+            self._purge_account_data_for_user_txn,
+            user_id,
+        )
+
+    def _purge_account_data_for_user_txn(
+        self, txn: LoggingTransaction, user_id: str
+    ) -> None:
+        """
+        See `purge_account_data_for_user`.
+        """
+        # Purge from the primary account_data tables.
+        self.db_pool.simple_delete_txn(
+            txn, table="account_data", keyvalues={"user_id": user_id}
+        )
+
+        self.db_pool.simple_delete_txn(
+            txn, table="room_account_data", keyvalues={"user_id": user_id}
+        )
+
+        # Purge from ignored_users where this user is the ignorer.
+        # N.B. We don't purge where this user is the ignoree, because that
+        #      interferes with other users' account data.
+        #      It's also not this user's data to delete!
+        self.db_pool.simple_delete_txn(
+            txn, table="ignored_users", keyvalues={"ignorer_user_id": user_id}
+        )
+
+        # Remove the push rules
+        self.db_pool.simple_delete_txn(
+            txn, table="push_rules", keyvalues={"user_name": user_id}
+        )
+        self.db_pool.simple_delete_txn(
+            txn, table="push_rules_enable", keyvalues={"user_name": user_id}
+        )
+        self.db_pool.simple_delete_txn(
+            txn, table="push_rules_stream", keyvalues={"user_id": user_id}
+        )
+
+        # Invalidate caches as appropriate
+        self._invalidate_cache_and_stream(
+            txn, self.get_account_data_for_room_and_type, (user_id,)
+        )
+        self._invalidate_cache_and_stream(
+            txn, self.get_account_data_for_user, (user_id,)
+        )
+        self._invalidate_cache_and_stream(
+            txn, self.get_global_account_data_by_type_for_user, (user_id,)
+        )
+        self._invalidate_cache_and_stream(
+            txn, self.get_account_data_for_room, (user_id,)
+        )
+        self._invalidate_cache_and_stream(txn, self.get_push_rules_for_user, (user_id,))
+        self._invalidate_cache_and_stream(
+            txn, self.get_push_rules_enabled_for_user, (user_id,)
+        )
+        # This user might be contained in the ignored_by cache for other users,
+        # so we have to invalidate it all.
+        self._invalidate_all_cache_and_stream(txn, self.ignored_by)
+
+    async def _delete_account_data_for_deactivated_users(
+        self, progress: dict, batch_size: int
+    ) -> int:
+        """
+        Retroactively purges account data for users that have already been deactivated.
+        Gets run as a background update caused by a schema delta.
+        """
+
+        last_user: str = progress.get("last_user", "")
+
+        def _delete_account_data_for_deactivated_users_txn(
+            txn: LoggingTransaction,
+        ) -> int:
+            sql = """
+                SELECT name FROM users
+                WHERE deactivated = ? and name > ?
+                ORDER BY name ASC
+                LIMIT ?
+            """
+
+            txn.execute(sql, (1, last_user, batch_size))
+            users = [row[0] for row in txn]
+
+            for user in users:
+                self._purge_account_data_for_user_txn(txn, user_id=user)
+
+            if users:
+                self.db_pool.updates._background_update_progress_txn(
+                    txn,
+                    "delete_account_data_for_deactivated_users",
+                    {"last_user": users[-1]},
+                )
+
+            return len(users)
+
+        number_deleted = await self.db_pool.runInteraction(
+            "_delete_account_data_for_deactivated_users",
+            _delete_account_data_for_deactivated_users_txn,
+        )
+
+        if number_deleted < batch_size:
+            await self.db_pool.updates._end_background_update(
+                "delete_account_data_for_deactivated_users"
+            )
+
+        return number_deleted
+
 
 class AccountDataStore(AccountDataWorkerStore):
     pass
diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py
index 92c95a41d7..304814af5d 100644
--- a/synapse/storage/databases/main/appservice.py
+++ b/synapse/storage/databases/main/appservice.py
@@ -198,6 +198,7 @@ class ApplicationServiceTransactionWorkerStore(
         service: ApplicationService,
         events: List[EventBase],
         ephemeral: List[JsonDict],
+        to_device_messages: List[JsonDict],
     ) -> AppServiceTransaction:
         """Atomically creates a new transaction for this application service
         with the given list of events. Ephemeral events are NOT persisted to the
@@ -207,6 +208,7 @@ class ApplicationServiceTransactionWorkerStore(
             service: The service who the transaction is for.
             events: A list of persistent events to put in the transaction.
             ephemeral: A list of ephemeral events to put in the transaction.
+            to_device_messages: A list of to-device messages to put in the transaction.
 
         Returns:
             A new transaction.
@@ -237,7 +239,11 @@ class ApplicationServiceTransactionWorkerStore(
                 (service.id, new_txn_id, event_ids),
             )
             return AppServiceTransaction(
-                service=service, id=new_txn_id, events=events, ephemeral=ephemeral
+                service=service,
+                id=new_txn_id,
+                events=events,
+                ephemeral=ephemeral,
+                to_device_messages=to_device_messages,
             )
 
         return await self.db_pool.runInteraction(
@@ -330,7 +336,11 @@ class ApplicationServiceTransactionWorkerStore(
         events = await self.get_events_as_list(event_ids)
 
         return AppServiceTransaction(
-            service=service, id=entry["txn_id"], events=events, ephemeral=[]
+            service=service,
+            id=entry["txn_id"],
+            events=events,
+            ephemeral=[],
+            to_device_messages=[],
         )
 
     def _get_last_txn(self, txn, service_id: Optional[str]) -> int:
@@ -384,14 +394,14 @@ class ApplicationServiceTransactionWorkerStore(
             "get_new_events_for_appservice", get_new_events_for_appservice_txn
         )
 
-        events = await self.get_events_as_list(event_ids)
+        events = await self.get_events_as_list(event_ids, get_prev_content=True)
 
         return upper_bound, events
 
     async def get_type_stream_id_for_appservice(
         self, service: ApplicationService, type: str
     ) -> int:
-        if type not in ("read_receipt", "presence"):
+        if type not in ("read_receipt", "presence", "to_device"):
             raise ValueError(
                 "Expected type to be a valid application stream id type, got %s"
                 % (type,)
@@ -415,16 +425,16 @@ class ApplicationServiceTransactionWorkerStore(
             "get_type_stream_id_for_appservice", get_type_stream_id_for_appservice_txn
         )
 
-    async def set_type_stream_id_for_appservice(
+    async def set_appservice_stream_type_pos(
         self, service: ApplicationService, stream_type: str, pos: Optional[int]
     ) -> None:
-        if stream_type not in ("read_receipt", "presence"):
+        if stream_type not in ("read_receipt", "presence", "to_device"):
             raise ValueError(
                 "Expected type to be a valid application stream id type, got %s"
                 % (stream_type,)
             )
 
-        def set_type_stream_id_for_appservice_txn(txn):
+        def set_appservice_stream_type_pos_txn(txn):
             stream_id_type = "%s_stream_id" % stream_type
             txn.execute(
                 "UPDATE application_services_state SET %s = ? WHERE as_id=?"
@@ -433,7 +443,7 @@ class ApplicationServiceTransactionWorkerStore(
             )
 
         await self.db_pool.runInteraction(
-            "set_type_stream_id_for_appservice", set_type_stream_id_for_appservice_txn
+            "set_appservice_stream_type_pos", set_appservice_stream_type_pos_txn
         )
 
 
diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
index 0024348067..c428dd5596 100644
--- a/synapse/storage/databases/main/cache.py
+++ b/synapse/storage/databases/main/cache.py
@@ -15,7 +15,7 @@
 
 import itertools
 import logging
-from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Collection, Iterable, List, Optional, Tuple
 
 from synapse.api.constants import EventTypes
 from synapse.replication.tcp.streams import BackfillStream, CachesStream
@@ -25,7 +25,11 @@ from synapse.replication.tcp.streams.events import (
     EventsStreamEventRow,
 )
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
+from synapse.storage.database import (
+    DatabasePool,
+    LoggingDatabaseConnection,
+    LoggingTransaction,
+)
 from synapse.storage.engines import PostgresEngine
 from synapse.util.iterutils import batch_iter
 
@@ -236,7 +240,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
         txn.call_after(cache_func.invalidate_all)
         self._send_invalidation_to_replication(txn, cache_func.__name__, None)
 
-    def _invalidate_state_caches_and_stream(self, txn, room_id, members_changed):
+    def _invalidate_state_caches_and_stream(
+        self, txn: LoggingTransaction, room_id: str, members_changed: Collection[str]
+    ) -> None:
         """Special case invalidation of caches based on current state.
 
         We special case this so that we can batch the cache invalidations into a
@@ -244,8 +250,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
 
         Args:
             txn
-            room_id (str): Room where state changed
-            members_changed (iterable[str]): The user_ids of members that have changed
+            room_id: Room where state changed
+            members_changed: The user_ids of members that have changed
         """
         txn.call_after(self._invalidate_state_caches, room_id, members_changed)
 
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 4eca97189b..8801b7b2dd 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 import logging
-from typing import TYPE_CHECKING, List, Optional, Tuple, cast
+from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set, Tuple, cast
 
 from synapse.logging import issue9533_logger
 from synapse.logging.opentracing import log_kv, set_tag, trace
@@ -24,6 +24,7 @@ from synapse.storage.database import (
     DatabasePool,
     LoggingDatabaseConnection,
     LoggingTransaction,
+    make_in_list_sql_clause,
 )
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.util.id_generators import (
@@ -136,63 +137,260 @@ class DeviceInboxWorkerStore(SQLBaseStore):
     def get_to_device_stream_token(self):
         return self._device_inbox_id_gen.get_current_token()
 
-    async def get_new_messages_for_device(
+    async def get_messages_for_user_devices(
+        self,
+        user_ids: Collection[str],
+        from_stream_id: int,
+        to_stream_id: int,
+    ) -> Dict[Tuple[str, str], List[JsonDict]]:
+        """
+        Retrieve to-device messages for a given set of users.
+
+        Only to-device messages with stream ids between the given boundaries
+        (from < X <= to) are returned.
+
+        Args:
+            user_ids: The users to retrieve to-device messages for.
+            from_stream_id: The lower boundary of stream id to filter with (exclusive).
+            to_stream_id: The upper boundary of stream id to filter with (inclusive).
+
+        Returns:
+            A dictionary of (user id, device id) -> list of to-device messages.
+        """
+        # We expect the stream ID returned by _get_device_messages to always
+        # be to_stream_id. So, no need to return it from this function.
+        (
+            user_id_device_id_to_messages,
+            last_processed_stream_id,
+        ) = await self._get_device_messages(
+            user_ids=user_ids,
+            from_stream_id=from_stream_id,
+            to_stream_id=to_stream_id,
+        )
+
+        assert (
+            last_processed_stream_id == to_stream_id
+        ), "Expected _get_device_messages to process all to-device messages up to `to_stream_id`"
+
+        return user_id_device_id_to_messages
+
+    async def get_messages_for_device(
         self,
         user_id: str,
-        device_id: Optional[str],
-        last_stream_id: int,
-        current_stream_id: int,
+        device_id: str,
+        from_stream_id: int,
+        to_stream_id: int,
         limit: int = 100,
-    ) -> Tuple[List[dict], int]:
+    ) -> Tuple[List[JsonDict], int]:
         """
+        Retrieve to-device messages for a single user device.
+
+        Only to-device messages with stream ids between the given boundaries
+        (from < X <= to) are returned.
+
         Args:
-            user_id: The recipient user_id.
-            device_id: The recipient device_id.
-            last_stream_id: The last stream ID checked.
-            current_stream_id: The current position of the to device
-                message stream.
-            limit: The maximum number of messages to retrieve.
+            user_id: The ID of the user to retrieve messages for.
+            device_id: The ID of the device to retrieve to-device messages for.
+            from_stream_id: The lower boundary of stream id to filter with (exclusive).
+            to_stream_id: The upper boundary of stream id to filter with (inclusive).
+            limit: A limit on the number of to-device messages returned.
 
         Returns:
             A tuple containing:
-                * A list of messages for the device.
-                * The max stream token of these messages. There may be more to retrieve
-                  if the given limit was reached.
+                * A list of to-device messages within the given stream id range intended for
+                  the given user / device combo.
+                * The last-processed stream ID. Subsequent calls of this function with the
+                  same device should pass this value as 'from_stream_id'.
         """
-        has_changed = self._device_inbox_stream_cache.has_entity_changed(
-            user_id, last_stream_id
+        (
+            user_id_device_id_to_messages,
+            last_processed_stream_id,
+        ) = await self._get_device_messages(
+            user_ids=[user_id],
+            device_id=device_id,
+            from_stream_id=from_stream_id,
+            to_stream_id=to_stream_id,
+            limit=limit,
         )
-        if not has_changed:
-            return [], current_stream_id
 
-        def get_new_messages_for_device_txn(txn):
-            sql = (
-                "SELECT stream_id, message_json FROM device_inbox"
-                " WHERE user_id = ? AND device_id = ?"
-                " AND ? < stream_id AND stream_id <= ?"
-                " ORDER BY stream_id ASC"
-                " LIMIT ?"
+        if not user_id_device_id_to_messages:
+            # There were no messages!
+            return [], to_stream_id
+
+        # Extract the messages, no need to return the user and device ID again
+        to_device_messages = user_id_device_id_to_messages.get((user_id, device_id), [])
+
+        return to_device_messages, last_processed_stream_id
+
+    async def _get_device_messages(
+        self,
+        user_ids: Collection[str],
+        from_stream_id: int,
+        to_stream_id: int,
+        device_id: Optional[str] = None,
+        limit: Optional[int] = None,
+    ) -> Tuple[Dict[Tuple[str, str], List[JsonDict]], int]:
+        """
+        Retrieve pending to-device messages for a collection of user devices.
+
+        Only to-device messages with stream ids between the given boundaries
+        (from < X <= to) are returned.
+
+        Note that a stream ID can be shared by multiple copies of the same message with
+        different recipient devices. Stream IDs are only unique in the context of a single
+        user ID / device ID pair. Thus, applying a limit (of messages to return) when working
+        with a sliding window of stream IDs is only possible when querying messages of a
+        single user device.
+
+        Finally, note that device IDs are not unique across users.
+
+        Args:
+            user_ids: The user IDs to filter device messages by.
+            from_stream_id: The lower boundary of stream id to filter with (exclusive).
+            to_stream_id: The upper boundary of stream id to filter with (inclusive).
+            device_id: A device ID to query to-device messages for. If not provided, to-device
+                messages from all device IDs for the given user IDs will be queried. May not be
+                provided if `user_ids` contains more than one entry.
+            limit: The maximum number of to-device messages to return. Can only be used when
+                passing a single user ID / device ID tuple.
+
+        Returns:
+            A tuple containing:
+                * A dict of (user_id, device_id) -> list of to-device messages
+                * The last-processed stream ID. If this is less than `to_stream_id`, then
+                    there may be more messages to retrieve. If `limit` is not set, then this
+                    is always equal to 'to_stream_id'.
+        """
+        if not user_ids:
+            logger.warning("No users provided upon querying for device IDs")
+            return {}, to_stream_id
+
+        # Prevent a query for one user's device also retrieving another user's device with
+        # the same device ID (device IDs are not unique across users).
+        if len(user_ids) > 1 and device_id is not None:
+            raise AssertionError(
+                "Programming error: 'device_id' cannot be supplied to "
+                "_get_device_messages when >1 user_id has been provided"
             )
-            txn.execute(
-                sql, (user_id, device_id, last_stream_id, current_stream_id, limit)
+
+        # A limit can only be applied when querying for a single user ID / device ID tuple.
+        # See the docstring of this function for more details.
+        if limit is not None and device_id is None:
+            raise AssertionError(
+                "Programming error: _get_device_messages was passed 'limit' "
+                "without a specific user_id/device_id"
             )
 
-            messages = []
-            stream_pos = current_stream_id
+        user_ids_to_query: Set[str] = set()
+        device_ids_to_query: Set[str] = set()
+
+        # Note that a device ID could be an empty str
+        if device_id is not None:
+            # If a device ID was passed, use it to filter results.
+            # Otherwise, device IDs will be derived from the given collection of user IDs.
+            device_ids_to_query.add(device_id)
+
+        # Determine which users have devices with pending messages
+        for user_id in user_ids:
+            if self._device_inbox_stream_cache.has_entity_changed(
+                user_id, from_stream_id
+            ):
+                # This user has new messages sent to them. Query messages for them
+                user_ids_to_query.add(user_id)
+
+        def get_device_messages_txn(txn: LoggingTransaction):
+            # Build a query to select messages from any of the given devices that
+            # are between the given stream id bounds.
+
+            # If a list of device IDs was not provided, retrieve all devices IDs
+            # for the given users. We explicitly do not query hidden devices, as
+            # hidden devices should not receive to-device messages.
+            # Note that this is more efficient than just dropping `device_id` from the query,
+            # since device_inbox has an index on `(user_id, device_id, stream_id)`
+            if not device_ids_to_query:
+                user_device_dicts = self.db_pool.simple_select_many_txn(
+                    txn,
+                    table="devices",
+                    column="user_id",
+                    iterable=user_ids_to_query,
+                    keyvalues={"user_id": user_id, "hidden": False},
+                    retcols=("device_id",),
+                )
 
-            for row in txn:
-                stream_pos = row[0]
-                messages.append(db_to_json(row[1]))
+                device_ids_to_query.update(
+                    {row["device_id"] for row in user_device_dicts}
+                )
 
-            # If the limit was not reached we know that there's no more data for this
-            # user/device pair up to current_stream_id.
-            if len(messages) < limit:
-                stream_pos = current_stream_id
+            if not device_ids_to_query:
+                # We've ended up with no devices to query.
+                return {}, to_stream_id
 
-            return messages, stream_pos
+            # We include both user IDs and device IDs in this query, as we have an index
+            # (device_inbox_user_stream_id) for them.
+            user_id_many_clause_sql, user_id_many_clause_args = make_in_list_sql_clause(
+                self.database_engine, "user_id", user_ids_to_query
+            )
+            (
+                device_id_many_clause_sql,
+                device_id_many_clause_args,
+            ) = make_in_list_sql_clause(
+                self.database_engine, "device_id", device_ids_to_query
+            )
+
+            sql = f"""
+                SELECT stream_id, user_id, device_id, message_json FROM device_inbox
+                WHERE {user_id_many_clause_sql}
+                AND {device_id_many_clause_sql}
+                AND ? < stream_id AND stream_id <= ?
+                ORDER BY stream_id ASC
+            """
+            sql_args = (
+                *user_id_many_clause_args,
+                *device_id_many_clause_args,
+                from_stream_id,
+                to_stream_id,
+            )
+
+            # If a limit was provided, limit the data retrieved from the database
+            if limit is not None:
+                sql += "LIMIT ?"
+                sql_args += (limit,)
+
+            txn.execute(sql, sql_args)
+
+            # Create and fill a dictionary of (user ID, device ID) -> list of messages
+            # intended for each device.
+            last_processed_stream_pos = to_stream_id
+            recipient_device_to_messages: Dict[Tuple[str, str], List[JsonDict]] = {}
+            for row in txn:
+                last_processed_stream_pos = row[0]
+                recipient_user_id = row[1]
+                recipient_device_id = row[2]
+                message_dict = db_to_json(row[3])
+
+                # Store the device details
+                recipient_device_to_messages.setdefault(
+                    (recipient_user_id, recipient_device_id), []
+                ).append(message_dict)
+
+            if limit is not None and txn.rowcount == limit:
+                # We ended up bumping up against the message limit. There may be more messages
+                # to retrieve. Return what we have, as well as the last stream position that
+                # was processed.
+                #
+                # The caller is expected to set this as the lower (exclusive) bound
+                # for the next query of this device.
+                return recipient_device_to_messages, last_processed_stream_pos
+
+            # The limit was not reached, thus we know that recipient_device_to_messages
+            # contains all to-device messages for the given device and stream id range.
+            #
+            # We return to_stream_id, which the caller should then provide as the lower
+            # (exclusive) bound on the next query of this device.
+            return recipient_device_to_messages, to_stream_id
 
         return await self.db_pool.runInteraction(
-            "get_new_messages_for_device", get_new_messages_for_device_txn
+            "get_device_messages", get_device_messages_txn
         )
 
     @trace
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index a556f17dac..ca71f073fc 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -65,7 +65,7 @@ class _NoChainCoverIndex(Exception):
         super().__init__("Unexpectedly no chain cover for events in %s" % (room_id,))
 
 
-class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBaseStore):
+class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBaseStore):
     def __init__(
         self,
         database: DatabasePool,
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 1ae1ebe108..b7554154ac 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -1389,6 +1389,8 @@ class PersistEventsStore:
                 "received_ts",
                 "sender",
                 "contains_url",
+                "state_key",
+                "rejection_reason",
             ),
             values=(
                 (
@@ -1405,8 +1407,10 @@ class PersistEventsStore:
                     self._clock.time_msec(),
                     event.sender,
                     "url" in event.content and isinstance(event.content["url"], str),
+                    event.get_state_key(),
+                    context.rejected or None,
                 )
-                for event, _ in events_and_contexts
+                for event, context in events_and_contexts
             ),
         )
 
@@ -1456,6 +1460,7 @@ class PersistEventsStore:
         for event, context in events_and_contexts:
             if context.rejected:
                 # Insert the event_id into the rejections table
+                # (events.rejection_reason has already been done)
                 self._store_rejections_txn(txn, event.event_id, context.rejected)
                 to_remove.add(event)
 
diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index 91b0576b85..e87a8fb85d 100644
--- a/synapse/storage/databases/main/purge_events.py
+++ b/synapse/storage/databases/main/purge_events.py
@@ -390,7 +390,6 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
             "event_search",
             "events",
             "group_rooms",
-            "public_room_list_stream",
             "receipts_graph",
             "receipts_linearized",
             "room_aliases",
diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index e01c94930a..92539f5d41 100644
--- a/synapse/storage/databases/main/push_rule.py
+++ b/synapse/storage/databases/main/push_rule.py
@@ -42,7 +42,7 @@ if TYPE_CHECKING:
 logger = logging.getLogger(__name__)
 
 
-def _load_rules(rawrules, enabled_map, use_new_defaults=False):
+def _load_rules(rawrules, enabled_map):
     ruleslist = []
     for rawrule in rawrules:
         rule = dict(rawrule)
@@ -52,7 +52,7 @@ def _load_rules(rawrules, enabled_map, use_new_defaults=False):
         ruleslist.append(rule)
 
     # We're going to be mutating this a lot, so do a deep copy
-    rules = list(list_with_base_rules(ruleslist, use_new_defaults))
+    rules = list(list_with_base_rules(ruleslist))
 
     for i, rule in enumerate(rules):
         rule_id = rule["rule_id"]
@@ -112,10 +112,6 @@ class PushRulesWorkerStore(
             prefilled_cache=push_rules_prefill,
         )
 
-        self._users_new_default_push_rules = (
-            hs.config.server.users_new_default_push_rules
-        )
-
     @abc.abstractmethod
     def get_max_push_rules_stream_id(self):
         """Get the position of the push rules stream.
@@ -145,9 +141,7 @@ class PushRulesWorkerStore(
 
         enabled_map = await self.get_push_rules_enabled_for_user(user_id)
 
-        use_new_defaults = user_id in self._users_new_default_push_rules
-
-        return _load_rules(rows, enabled_map, use_new_defaults)
+        return _load_rules(rows, enabled_map)
 
     @cached(max_entries=5000)
     async def get_push_rules_enabled_for_user(self, user_id) -> Dict[str, bool]:
@@ -206,13 +200,7 @@ class PushRulesWorkerStore(
         enabled_map_by_user = await self.bulk_get_push_rules_enabled(user_ids)
 
         for user_id, rules in results.items():
-            use_new_defaults = user_id in self._users_new_default_push_rules
-
-            results[user_id] = _load_rules(
-                rules,
-                enabled_map_by_user.get(user_id, {}),
-                use_new_defaults,
-            )
+            results[user_id] = _load_rules(rules, enabled_map_by_user.get(user_id, {}))
 
         return results
 
diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index 2cb5d06c13..37468a5183 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -13,17 +13,7 @@
 # limitations under the License.
 
 import logging
-from typing import (
-    TYPE_CHECKING,
-    Any,
-    Dict,
-    Iterable,
-    List,
-    Optional,
-    Tuple,
-    Union,
-    cast,
-)
+from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union, cast
 
 import attr
 from frozendict import frozendict
@@ -43,6 +33,7 @@ from synapse.storage.relations import (
     PaginationChunk,
     RelationPaginationToken,
 )
+from synapse.types import JsonDict
 from synapse.util.caches.descriptors import cached
 
 if TYPE_CHECKING:
@@ -51,6 +42,30 @@ if TYPE_CHECKING:
 logger = logging.getLogger(__name__)
 
 
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _ThreadAggregation:
+    latest_event: EventBase
+    count: int
+    current_user_participated: bool
+
+
+@attr.s(slots=True, auto_attribs=True)
+class BundledAggregations:
+    """
+    The bundled aggregations for an event.
+
+    Some values require additional processing during serialization.
+    """
+
+    annotations: Optional[JsonDict] = None
+    references: Optional[JsonDict] = None
+    replace: Optional[EventBase] = None
+    thread: Optional[_ThreadAggregation] = None
+
+    def __bool__(self) -> bool:
+        return bool(self.annotations or self.references or self.replace or self.thread)
+
+
 class RelationsWorkerStore(SQLBaseStore):
     def __init__(
         self,
@@ -60,7 +75,6 @@ class RelationsWorkerStore(SQLBaseStore):
     ):
         super().__init__(database, db_conn, hs)
 
-        self._msc1849_enabled = hs.config.experimental.msc1849_enabled
         self._msc3440_enabled = hs.config.experimental.msc3440_enabled
 
     @cached(tree=True)
@@ -585,7 +599,7 @@ class RelationsWorkerStore(SQLBaseStore):
 
     async def _get_bundled_aggregation_for_event(
         self, event: EventBase, user_id: str
-    ) -> Optional[Dict[str, Any]]:
+    ) -> Optional[BundledAggregations]:
         """Generate bundled aggregations for an event.
 
         Note that this does not use a cache, but depends on cached methods.
@@ -616,24 +630,24 @@ class RelationsWorkerStore(SQLBaseStore):
         # The bundled aggregations to include, a mapping of relation type to a
         # type-specific value. Some types include the direct return type here
         # while others need more processing during serialization.
-        aggregations: Dict[str, Any] = {}
+        aggregations = BundledAggregations()
 
         annotations = await self.get_aggregation_groups_for_event(event_id, room_id)
         if annotations.chunk:
-            aggregations[RelationTypes.ANNOTATION] = annotations.to_dict()
+            aggregations.annotations = annotations.to_dict()
 
         references = await self.get_relations_for_event(
             event_id, room_id, RelationTypes.REFERENCE, direction="f"
         )
         if references.chunk:
-            aggregations[RelationTypes.REFERENCE] = references.to_dict()
+            aggregations.references = references.to_dict()
 
         edit = None
         if event.type == EventTypes.Message:
             edit = await self.get_applicable_edit(event_id, room_id)
 
         if edit:
-            aggregations[RelationTypes.REPLACE] = edit
+            aggregations.replace = edit
 
         # If this event is the start of a thread, include a summary of the replies.
         if self._msc3440_enabled:
@@ -644,11 +658,11 @@ class RelationsWorkerStore(SQLBaseStore):
                 event_id, room_id, user_id
             )
             if latest_thread_event:
-                aggregations[RelationTypes.THREAD] = {
-                    "latest_event": latest_thread_event,
-                    "count": thread_count,
-                    "current_user_participated": participated,
-                }
+                aggregations.thread = _ThreadAggregation(
+                    latest_event=latest_thread_event,
+                    count=thread_count,
+                    current_user_participated=participated,
+                )
 
         # Store the bundled aggregations in the event metadata for later use.
         return aggregations
@@ -657,7 +671,7 @@ class RelationsWorkerStore(SQLBaseStore):
         self,
         events: Iterable[EventBase],
         user_id: str,
-    ) -> Dict[str, Dict[str, Any]]:
+    ) -> Dict[str, BundledAggregations]:
         """Generate bundled aggregations for events.
 
         Args:
@@ -668,15 +682,12 @@ class RelationsWorkerStore(SQLBaseStore):
             A map of event ID to the bundled aggregation for the event. Not all
             events may have bundled aggregations in the results.
         """
-        # If bundled aggregations are disabled, nothing to do.
-        if not self._msc1849_enabled:
-            return {}
 
         # TODO Parallelize.
         results = {}
         for event in events:
             event_result = await self._get_bundled_aggregation_for_event(event, user_id)
-            if event_result is not None:
+            if event_result:
                 results[event.event_id] = event_result
 
         return results
diff --git a/synapse/storage/databases/main/signatures.py b/synapse/storage/databases/main/signatures.py
index 3201623fe4..0518b8b910 100644
--- a/synapse/storage/databases/main/signatures.py
+++ b/synapse/storage/databases/main/signatures.py
@@ -12,16 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Dict, Iterable, List, Tuple
+from typing import Collection, Dict, List, Tuple
 
 from unpaddedbase64 import encode_base64
 
-from synapse.storage._base import SQLBaseStore
-from synapse.storage.types import Cursor
+from synapse.crypto.event_signing import compute_event_reference_hash
+from synapse.storage.databases.main.events_worker import (
+    EventRedactBehaviour,
+    EventsWorkerStore,
+)
 from synapse.util.caches.descriptors import cached, cachedList
 
 
-class SignatureWorkerStore(SQLBaseStore):
+class SignatureWorkerStore(EventsWorkerStore):
     @cached()
     def get_event_reference_hash(self, event_id):
         # This is a dummy function to allow get_event_reference_hashes
@@ -32,7 +35,7 @@ class SignatureWorkerStore(SQLBaseStore):
         cached_method_name="get_event_reference_hash", list_name="event_ids", num_args=1
     )
     async def get_event_reference_hashes(
-        self, event_ids: Iterable[str]
+        self, event_ids: Collection[str]
     ) -> Dict[str, Dict[str, bytes]]:
         """Get all hashes for given events.
 
@@ -41,18 +44,27 @@ class SignatureWorkerStore(SQLBaseStore):
 
         Returns:
              A mapping of event ID to a mapping of algorithm to hash.
+             Returns an empty dict for a given event id if that event is unknown.
         """
+        events = await self.get_events(
+            event_ids,
+            redact_behaviour=EventRedactBehaviour.AS_IS,
+            allow_rejected=True,
+        )
 
-        def f(txn):
-            return {
-                event_id: self._get_event_reference_hashes_txn(txn, event_id)
-                for event_id in event_ids
-            }
+        hashes: Dict[str, Dict[str, bytes]] = {}
+        for event_id in event_ids:
+            event = events.get(event_id)
+            if event is None:
+                hashes[event_id] = {}
+            else:
+                ref_alg, ref_hash_bytes = compute_event_reference_hash(event)
+                hashes[event_id] = {ref_alg: ref_hash_bytes}
 
-        return await self.db_pool.runInteraction("get_event_reference_hashes", f)
+        return hashes
 
     async def add_event_hashes(
-        self, event_ids: Iterable[str]
+        self, event_ids: Collection[str]
     ) -> List[Tuple[str, Dict[str, str]]]:
         """
 
@@ -70,24 +82,6 @@ class SignatureWorkerStore(SQLBaseStore):
 
         return list(encoded_hashes.items())
 
-    def _get_event_reference_hashes_txn(
-        self, txn: Cursor, event_id: str
-    ) -> Dict[str, bytes]:
-        """Get all the hashes for a given PDU.
-        Args:
-            txn:
-            event_id: Id for the Event.
-        Returns:
-            A mapping of algorithm -> hash.
-        """
-        query = (
-            "SELECT algorithm, hash"
-            " FROM event_reference_hashes"
-            " WHERE event_id = ?"
-        )
-        txn.execute(query, (event_id,))
-        return {k: v for k, v in txn}
-
 
 class SignatureStore(SignatureWorkerStore):
     """Persistence for event signatures and hashes"""
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 319464b1fa..a898f847e7 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -81,6 +81,14 @@ class _EventDictReturn:
     stream_ordering: int
 
 
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _EventsAround:
+    events_before: List[EventBase]
+    events_after: List[EventBase]
+    start: RoomStreamToken
+    end: RoomStreamToken
+
+
 def generate_pagination_where_clause(
     direction: str,
     column_names: Tuple[str, str],
@@ -846,7 +854,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         before_limit: int,
         after_limit: int,
         event_filter: Optional[Filter] = None,
-    ) -> dict:
+    ) -> _EventsAround:
         """Retrieve events and pagination tokens around a given event in a
         room.
         """
@@ -869,12 +877,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             list(results["after"]["event_ids"]), get_prev_content=True
         )
 
-        return {
-            "events_before": events_before,
-            "events_after": events_after,
-            "start": results["before"]["token"],
-            "end": results["after"]["token"],
-        }
+        return _EventsAround(
+            events_before=events_before,
+            events_after=events_after,
+            start=results["before"]["token"],
+            end=results["after"]["token"],
+        )
 
     def _get_events_around_txn(
         self,
diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py
index 4b78b4d098..ba79e19f7f 100644
--- a/synapse/storage/databases/main/transactions.py
+++ b/synapse/storage/databases/main/transactions.py
@@ -561,6 +561,54 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
             "get_destinations_paginate_txn", get_destinations_paginate_txn
         )
 
+    async def get_destination_rooms_paginate(
+        self, destination: str, start: int, limit: int, direction: str = "f"
+    ) -> Tuple[List[JsonDict], int]:
+        """Function to retrieve a paginated list of destination's rooms.
+        This will return a json list of rooms and the
+        total number of rooms.
+
+        Args:
+            destination: the destination to query
+            start: start number to begin the query from
+            limit: number of rows to retrieve
+            direction: sort ascending or descending by room_id
+        Returns:
+            A tuple of a dict of rooms and a count of total rooms.
+        """
+
+        def get_destination_rooms_paginate_txn(
+            txn: LoggingTransaction,
+        ) -> Tuple[List[JsonDict], int]:
+
+            if direction == "b":
+                order = "DESC"
+            else:
+                order = "ASC"
+
+            sql = """
+                SELECT COUNT(*) as total_rooms
+                FROM destination_rooms
+                WHERE destination = ?
+                """
+            txn.execute(sql, [destination])
+            count = cast(Tuple[int], txn.fetchone())[0]
+
+            rooms = self.db_pool.simple_select_list_paginate_txn(
+                txn=txn,
+                table="destination_rooms",
+                orderby="room_id",
+                start=start,
+                limit=limit,
+                retcols=("room_id", "stream_ordering"),
+                order_direction=order,
+            )
+            return rooms, count
+
+        return await self.db_pool.runInteraction(
+            "get_destination_rooms_paginate_txn", get_destination_rooms_paginate_txn
+        )
+
     async def is_destination_known(self, destination: str) -> bool:
         """Check if a destination is known to the server."""
         result = await self.db_pool.simple_select_one_onecol(
diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py
index 20cd63c330..143cd98ca2 100644
--- a/synapse/storage/engines/_base.py
+++ b/synapse/storage/engines/_base.py
@@ -12,11 +12,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import abc
-from typing import Generic, TypeVar
+from enum import IntEnum
+from typing import Generic, Optional, TypeVar
 
 from synapse.storage.types import Connection
 
 
+class IsolationLevel(IntEnum):
+    READ_COMMITTED: int = 1
+    REPEATABLE_READ: int = 2
+    SERIALIZABLE: int = 3
+
+
 class IncorrectDatabaseSetup(RuntimeError):
     pass
 
@@ -109,3 +116,13 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
         commit/rollback the connections.
         """
         ...
+
+    @abc.abstractmethod
+    def attempt_to_set_isolation_level(
+        self, conn: Connection, isolation_level: Optional[int]
+    ):
+        """Attempt to set the connections isolation level.
+
+        Note: This has no effect on SQLite3, as transactions are SERIALIZABLE by default.
+        """
+        ...
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 30f948a0f7..808342fafb 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -13,8 +13,13 @@
 # limitations under the License.
 
 import logging
+from typing import Mapping, Optional
 
-from synapse.storage.engines._base import BaseDatabaseEngine, IncorrectDatabaseSetup
+from synapse.storage.engines._base import (
+    BaseDatabaseEngine,
+    IncorrectDatabaseSetup,
+    IsolationLevel,
+)
 from synapse.storage.types import Connection
 
 logger = logging.getLogger(__name__)
@@ -34,6 +39,15 @@ class PostgresEngine(BaseDatabaseEngine):
         self.synchronous_commit = database_config.get("synchronous_commit", True)
         self._version = None  # unknown as yet
 
+        self.isolation_level_map: Mapping[int, int] = {
+            IsolationLevel.READ_COMMITTED: self.module.extensions.ISOLATION_LEVEL_READ_COMMITTED,
+            IsolationLevel.REPEATABLE_READ: self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ,
+            IsolationLevel.SERIALIZABLE: self.module.extensions.ISOLATION_LEVEL_SERIALIZABLE,
+        }
+        self.default_isolation_level = (
+            self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
+        )
+
     @property
     def single_threaded(self) -> bool:
         return False
@@ -46,8 +60,8 @@ class PostgresEngine(BaseDatabaseEngine):
         self._version = db_conn.server_version
 
         # Are we on a supported PostgreSQL version?
-        if not allow_outdated_version and self._version < 90600:
-            raise RuntimeError("Synapse requires PostgreSQL 9.6 or above.")
+        if not allow_outdated_version and self._version < 100000:
+            raise RuntimeError("Synapse requires PostgreSQL 10 or above.")
 
         with db_conn.cursor() as txn:
             txn.execute("SHOW SERVER_ENCODING")
@@ -104,9 +118,7 @@ class PostgresEngine(BaseDatabaseEngine):
         return sql.replace("?", "%s")
 
     def on_new_connection(self, db_conn):
-        db_conn.set_isolation_level(
-            self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
-        )
+        db_conn.set_isolation_level(self.default_isolation_level)
 
         # Set the bytea output to escape, vs the default of hex
         cursor = db_conn.cursor()
@@ -175,3 +187,12 @@ class PostgresEngine(BaseDatabaseEngine):
 
     def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool):
         return conn.set_session(autocommit=autocommit)  # type: ignore
+
+    def attempt_to_set_isolation_level(
+        self, conn: Connection, isolation_level: Optional[int]
+    ):
+        if isolation_level is None:
+            isolation_level = self.default_isolation_level
+        else:
+            isolation_level = self.isolation_level_map[isolation_level]
+        return conn.set_isolation_level(isolation_level)  # type: ignore
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index 70d17d4f2c..6c19e55999 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -15,6 +15,7 @@ import platform
 import struct
 import threading
 import typing
+from typing import Optional
 
 from synapse.storage.engines import BaseDatabaseEngine
 from synapse.storage.types import Connection
@@ -122,6 +123,12 @@ class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]):
         # set the connection to autocommit mode.
         pass
 
+    def attempt_to_set_isolation_level(
+        self, conn: Connection, isolation_level: Optional[int]
+    ):
+        # All transactions are SERIALIZABLE by default in sqllite
+        pass
+
 
 # Following functions taken from: https://github.com/coleifer/peewee
 
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 1823e18720..e3153d1a4a 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -499,9 +499,12 @@ def _upgrade_existing_database(
                 module = importlib.util.module_from_spec(spec)
                 spec.loader.exec_module(module)  # type: ignore
 
-                logger.info("Running script %s", relative_path)
-                module.run_create(cur, database_engine)  # type: ignore
-                if not is_empty:
+                if hasattr(module, "run_create"):
+                    logger.info("Running %s:run_create", relative_path)
+                    module.run_create(cur, database_engine)  # type: ignore
+
+                if not is_empty and hasattr(module, "run_upgrade"):
+                    logger.info("Running %s:run_upgrade", relative_path)
                     module.run_upgrade(cur, database_engine, config=config)  # type: ignore
             elif ext == ".pyc" or file_name == "__pycache__":
                 # Sometimes .pyc files turn up anyway even though we've
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index 2a3d47185a..7b21c1b96d 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-SCHEMA_VERSION = 67  # remember to update the list below when updating
+SCHEMA_VERSION = 68  # remember to update the list below when updating
 """Represents the expectations made by the codebase about the database schema
 
 This should be incremented whenever the codebase changes its requirements on the
@@ -53,11 +53,18 @@ Changes in SCHEMA_VERSION = 66:
 
 Changes in SCHEMA_VERSION = 67:
     - state_events.prev_state is no longer written to.
+
+Changes in SCHEMA_VERSION = 68:
+    - event_reference_hashes is no longer read.
+    - `events` has `state_key` and `rejection_reason` columns, which are populated for
+      new events.
 """
 
 
 SCHEMA_COMPAT_VERSION = (
-    61  # 61: Remove unused tables `user_stats_historical` and `room_stats_historical`
+    # we now have `state_key` columns in both `events` and `state_events`, so
+    # now incompatible with synapses wth SCHEMA_VERSION < 66.
+    66
 )
 """Limit on how far the synapse codebase can be rolled back without breaking db compat
 
diff --git a/synapse/storage/schema/main/delta/67/01drop_public_room_list_stream.sql b/synapse/storage/schema/main/delta/67/01drop_public_room_list_stream.sql
new file mode 100644
index 0000000000..1eb8de9907
--- /dev/null
+++ b/synapse/storage/schema/main/delta/67/01drop_public_room_list_stream.sql
@@ -0,0 +1,18 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- this table is unused as of Synapse 1.41
+DROP TABLE public_room_list_stream;
+
diff --git a/synapse/storage/schema/main/delta/68/01event_columns.sql b/synapse/storage/schema/main/delta/68/01event_columns.sql
new file mode 100644
index 0000000000..7c072f972e
--- /dev/null
+++ b/synapse/storage/schema/main/delta/68/01event_columns.sql
@@ -0,0 +1,26 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add new colums to the `events` table which will (one day) make the `state_events`
+-- and `rejections` tables redundant.
+
+ALTER TABLE events
+  -- if this event is a state event, its state key
+  ADD COLUMN state_key TEXT DEFAULT NULL;
+
+
+ALTER TABLE events
+  -- if this event was rejected, the reason it was rejected.
+  ADD COLUMN rejection_reason TEXT DEFAULT NULL;
diff --git a/synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql b/synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql
new file mode 100644
index 0000000000..bbf0af5311
--- /dev/null
+++ b/synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql
@@ -0,0 +1,21 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add a column to track what to_device stream id that this application
+-- service has been caught up to.
+
+-- NULL indicates that this appservice has never received any to_device messages. This
+-- can be used, for example, to avoid sending a huge dump of messages at startup.
+ALTER TABLE application_services_state ADD COLUMN to_device_stream_id BIGINT;
\ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/68/03_delete_account_data_for_deactivated_accounts.sql b/synapse/storage/schema/main/delta/68/03_delete_account_data_for_deactivated_accounts.sql
new file mode 100644
index 0000000000..e124933843
--- /dev/null
+++ b/synapse/storage/schema/main/delta/68/03_delete_account_data_for_deactivated_accounts.sql
@@ -0,0 +1,20 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- We want to retroactively delete account data for users that were already
+-- deactivated.
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+  (6803, 'delete_account_data_for_deactivated_users', '{}');
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index df8b2f1088..913448f0f9 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -74,21 +74,21 @@ class StateFilter:
 
     @staticmethod
     def all() -> "StateFilter":
-        """Creates a filter that fetches everything.
+        """Returns a filter that fetches everything.
 
         Returns:
-            The new state filter.
+            The state filter.
         """
-        return StateFilter(types=frozendict(), include_others=True)
+        return _ALL_STATE_FILTER
 
     @staticmethod
     def none() -> "StateFilter":
-        """Creates a filter that fetches nothing.
+        """Returns a filter that fetches nothing.
 
         Returns:
             The new state filter.
         """
-        return StateFilter(types=frozendict(), include_others=False)
+        return _NONE_STATE_FILTER
 
     @staticmethod
     def from_types(types: Iterable[Tuple[str, Optional[str]]]) -> "StateFilter":
@@ -527,6 +527,10 @@ class StateFilter:
         )
 
 
+_ALL_STATE_FILTER = StateFilter(types=frozendict(), include_others=True)
+_NONE_STATE_FILTER = StateFilter(types=frozendict(), include_others=False)
+
+
 class StateGroupStorage:
     """High level interface to fetching state for event."""
 
diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py
index 377c9a282a..1d6ec22191 100644
--- a/synapse/util/caches/deferred_cache.py
+++ b/synapse/util/caches/deferred_cache.py
@@ -81,13 +81,14 @@ class DeferredCache(Generic[KT, VT]):
         Args:
             name: The name of the cache
             max_entries: Maximum amount of entries that the cache will hold
-            keylen: The length of the tuple used as the cache key. Ignored unless
-               `tree` is True.
             tree: Use a TreeCache instead of a dict as the underlying cache type
             iterable: If True, count each item in the cached object as an entry,
                 rather than each cached object
             apply_cache_factor_from_config: Whether cache factors specified in the
                 config file affect `max_entries`
+            prune_unread_entries: If True, cache entries that haven't been read recently
+                will be evicted from the cache in the background. Set to False to
+                opt-out of this behaviour.
         """
         cache_type = TreeCache if tree else dict
 
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 375cd443f1..df4fb156c2 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -254,9 +254,17 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
             return r1 + r2
 
     Args:
+        orig:
+        max_entries:
         num_args: number of positional arguments (excluding ``self`` and
             ``cache_context``) to use as cache keys. Defaults to all named
             args of the function.
+        tree:
+        cache_context:
+        iterable:
+        prune_unread_entries: If True, cache entries that haven't been read recently
+            will be evicted from the cache in the background. Set to False to opt-out
+            of this behaviour.
     """
 
     def __init__(
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index 3f11a2f9dd..7548b38548 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -340,6 +340,12 @@ class LruCache(Generic[KT, VT]):
 
             apply_cache_factor_from_config (bool): If true, `max_size` will be
                 multiplied by a cache factor derived from the homeserver config
+
+            clock:
+
+            prune_unread_entries: If True, cache entries that haven't been read recently
+                will be evicted from the cache in the background. Set to False to
+                opt-out of this behaviour.
         """
         # Default `clock` to something sensible. Note that we rename it to
         # `real_clock` so that mypy doesn't think its still `Optional`.
diff --git a/synapse/visibility.py b/synapse/visibility.py
index 17532059e9..1b970ce479 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -87,7 +87,7 @@ async def filter_events_for_client(
     )
 
     ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user(
-        AccountDataTypes.IGNORED_USER_LIST, user_id
+        user_id, AccountDataTypes.IGNORED_USER_LIST
     )
 
     ignore_list: FrozenSet[str] = frozenset()
diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py
index ba2a2bfd64..07d8105f41 100644
--- a/tests/appservice/test_appservice.py
+++ b/tests/appservice/test_appservice.py
@@ -19,6 +19,7 @@ from twisted.internet import defer
 from synapse.appservice import ApplicationService, Namespace
 
 from tests import unittest
+from tests.test_utils import simple_async_mock
 
 
 def _regex(regex: str, exclusive: bool = True) -> Namespace:
@@ -91,10 +92,10 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.service.namespaces[ApplicationService.NS_ALIASES].append(
             _regex("#irc_.*:matrix.org")
         )
-        self.store.get_aliases_for_room.return_value = defer.succeed(
+        self.store.get_aliases_for_room = simple_async_mock(
             ["#irc_foobar:matrix.org", "#athing:matrix.org"]
         )
-        self.store.get_users_in_room.return_value = defer.succeed([])
+        self.store.get_users_in_room = simple_async_mock([])
         self.assertTrue(
             (
                 yield defer.ensureDeferred(
@@ -144,10 +145,10 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.service.namespaces[ApplicationService.NS_ALIASES].append(
             _regex("#irc_.*:matrix.org")
         )
-        self.store.get_aliases_for_room.return_value = defer.succeed(
+        self.store.get_aliases_for_room = simple_async_mock(
             ["#xmpp_foobar:matrix.org", "#athing:matrix.org"]
         )
-        self.store.get_users_in_room.return_value = defer.succeed([])
+        self.store.get_users_in_room = simple_async_mock([])
         self.assertFalse(
             (
                 yield defer.ensureDeferred(
@@ -163,10 +164,8 @@ class ApplicationServiceTestCase(unittest.TestCase):
         )
         self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
         self.event.sender = "@irc_foobar:matrix.org"
-        self.store.get_aliases_for_room.return_value = defer.succeed(
-            ["#irc_barfoo:matrix.org"]
-        )
-        self.store.get_users_in_room.return_value = defer.succeed([])
+        self.store.get_aliases_for_room = simple_async_mock(["#irc_barfoo:matrix.org"])
+        self.store.get_users_in_room = simple_async_mock([])
         self.assertTrue(
             (
                 yield defer.ensureDeferred(
@@ -191,10 +190,10 @@ class ApplicationServiceTestCase(unittest.TestCase):
     def test_member_list_match(self):
         self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
         # Note that @irc_fo:here is the AS user.
-        self.store.get_users_in_room.return_value = defer.succeed(
+        self.store.get_users_in_room = simple_async_mock(
             ["@alice:here", "@irc_fo:here", "@bob:here"]
         )
-        self.store.get_aliases_for_room.return_value = defer.succeed([])
+        self.store.get_aliases_for_room = simple_async_mock([])
 
         self.event.sender = "@xmpp_foobar:matrix.org"
         self.assertTrue(
diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
index 55f0899bae..8fb6687f89 100644
--- a/tests/appservice/test_scheduler.py
+++ b/tests/appservice/test_scheduler.py
@@ -11,23 +11,29 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from typing import TYPE_CHECKING
 from unittest.mock import Mock
 
 from twisted.internet import defer
 
 from synapse.appservice import ApplicationServiceState
 from synapse.appservice.scheduler import (
+    ApplicationServiceScheduler,
     _Recoverer,
-    _ServiceQueuer,
     _TransactionController,
 )
 from synapse.logging.context import make_deferred_yieldable
+from synapse.server import HomeServer
+from synapse.util import Clock
 
 from tests import unittest
 from tests.test_utils import simple_async_mock
 
 from ..utils import MockClock
 
+if TYPE_CHECKING:
+    from twisted.internet.testing import MemoryReactor
+
 
 class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
     def setUp(self):
@@ -58,7 +64,10 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
         self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events)))
 
         self.store.create_appservice_txn.assert_called_once_with(
-            service=service, events=events, ephemeral=[]  # txn made and saved
+            service=service,
+            events=events,
+            ephemeral=[],
+            to_device_messages=[],  # txn made and saved
         )
         self.assertEquals(0, len(self.txnctrl.recoverers))  # no recoverer made
         txn.complete.assert_called_once_with(self.store)  # txn completed
@@ -79,7 +88,10 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
         self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events)))
 
         self.store.create_appservice_txn.assert_called_once_with(
-            service=service, events=events, ephemeral=[]  # txn made and saved
+            service=service,
+            events=events,
+            ephemeral=[],
+            to_device_messages=[],  # txn made and saved
         )
         self.assertEquals(0, txn.send.call_count)  # txn not sent though
         self.assertEquals(0, txn.complete.call_count)  # or completed
@@ -102,7 +114,7 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
         self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events)))
 
         self.store.create_appservice_txn.assert_called_once_with(
-            service=service, events=events, ephemeral=[]
+            service=service, events=events, ephemeral=[], to_device_messages=[]
         )
         self.assertEquals(1, self.recoverer_fn.call_count)  # recoverer made
         self.assertEquals(1, self.recoverer.recover.call_count)  # and invoked
@@ -189,38 +201,41 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
         self.callback.assert_called_once_with(self.recoverer)
 
 
-class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase):
-    def setUp(self):
+class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase):
+    def prepare(self, reactor: "MemoryReactor", clock: Clock, hs: HomeServer):
+        self.scheduler = ApplicationServiceScheduler(hs)
         self.txn_ctrl = Mock()
         self.txn_ctrl.send = simple_async_mock()
-        self.queuer = _ServiceQueuer(self.txn_ctrl, MockClock())
+
+        # Replace instantiated _TransactionController instances with our Mock
+        self.scheduler.txn_ctrl = self.txn_ctrl
+        self.scheduler.queuer.txn_ctrl = self.txn_ctrl
 
     def test_send_single_event_no_queue(self):
         # Expect the event to be sent immediately.
         service = Mock(id=4)
         event = Mock()
-        self.queuer.enqueue_event(service, event)
-        self.txn_ctrl.send.assert_called_once_with(service, [event], [])
+        self.scheduler.enqueue_for_appservice(service, events=[event])
+        self.txn_ctrl.send.assert_called_once_with(service, [event], [], [])
 
     def test_send_single_event_with_queue(self):
         d = defer.Deferred()
-        self.txn_ctrl.send = Mock(
-            side_effect=lambda x, y, z: make_deferred_yieldable(d)
-        )
+        self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d))
         service = Mock(id=4)
         event = Mock(event_id="first")
         event2 = Mock(event_id="second")
         event3 = Mock(event_id="third")
         # Send an event and don't resolve it just yet.
-        self.queuer.enqueue_event(service, event)
+        self.scheduler.enqueue_for_appservice(service, events=[event])
         # Send more events: expect send() to NOT be called multiple times.
-        self.queuer.enqueue_event(service, event2)
-        self.queuer.enqueue_event(service, event3)
-        self.txn_ctrl.send.assert_called_with(service, [event], [])
+        # (call enqueue_for_appservice multiple times deliberately)
+        self.scheduler.enqueue_for_appservice(service, events=[event2])
+        self.scheduler.enqueue_for_appservice(service, events=[event3])
+        self.txn_ctrl.send.assert_called_with(service, [event], [], [])
         self.assertEquals(1, self.txn_ctrl.send.call_count)
         # Resolve the send event: expect the queued events to be sent
         d.callback(service)
-        self.txn_ctrl.send.assert_called_with(service, [event2, event3], [])
+        self.txn_ctrl.send.assert_called_with(service, [event2, event3], [], [])
         self.assertEquals(2, self.txn_ctrl.send.call_count)
 
     def test_multiple_service_queues(self):
@@ -238,23 +253,23 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase):
 
         send_return_list = [srv_1_defer, srv_2_defer]
 
-        def do_send(x, y, z):
+        def do_send(*args, **kwargs):
             return make_deferred_yieldable(send_return_list.pop(0))
 
         self.txn_ctrl.send = Mock(side_effect=do_send)
 
         # send events for different ASes and make sure they are sent
-        self.queuer.enqueue_event(srv1, srv_1_event)
-        self.queuer.enqueue_event(srv1, srv_1_event2)
-        self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event], [])
-        self.queuer.enqueue_event(srv2, srv_2_event)
-        self.queuer.enqueue_event(srv2, srv_2_event2)
-        self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event], [])
+        self.scheduler.enqueue_for_appservice(srv1, events=[srv_1_event])
+        self.scheduler.enqueue_for_appservice(srv1, events=[srv_1_event2])
+        self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event], [], [])
+        self.scheduler.enqueue_for_appservice(srv2, events=[srv_2_event])
+        self.scheduler.enqueue_for_appservice(srv2, events=[srv_2_event2])
+        self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event], [], [])
 
         # make sure callbacks for a service only send queued events for THAT
         # service
         srv_2_defer.callback(srv2)
-        self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], [])
+        self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], [], [])
         self.assertEquals(3, self.txn_ctrl.send.call_count)
 
     def test_send_large_txns(self):
@@ -262,7 +277,7 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase):
         srv_2_defer = defer.Deferred()
         send_return_list = [srv_1_defer, srv_2_defer]
 
-        def do_send(x, y, z):
+        def do_send(*args, **kwargs):
             return make_deferred_yieldable(send_return_list.pop(0))
 
         self.txn_ctrl.send = Mock(side_effect=do_send)
@@ -270,67 +285,65 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase):
         service = Mock(id=4, name="service")
         event_list = [Mock(name="event%i" % (i + 1)) for i in range(200)]
         for event in event_list:
-            self.queuer.enqueue_event(service, event)
+            self.scheduler.enqueue_for_appservice(service, [event], [])
 
         # Expect the first event to be sent immediately.
-        self.txn_ctrl.send.assert_called_with(service, [event_list[0]], [])
+        self.txn_ctrl.send.assert_called_with(service, [event_list[0]], [], [])
         srv_1_defer.callback(service)
         # Then send the next 100 events
-        self.txn_ctrl.send.assert_called_with(service, event_list[1:101], [])
+        self.txn_ctrl.send.assert_called_with(service, event_list[1:101], [], [])
         srv_2_defer.callback(service)
         # Then the final 99 events
-        self.txn_ctrl.send.assert_called_with(service, event_list[101:], [])
+        self.txn_ctrl.send.assert_called_with(service, event_list[101:], [], [])
         self.assertEquals(3, self.txn_ctrl.send.call_count)
 
     def test_send_single_ephemeral_no_queue(self):
         # Expect the event to be sent immediately.
         service = Mock(id=4, name="service")
         event_list = [Mock(name="event")]
-        self.queuer.enqueue_ephemeral(service, event_list)
-        self.txn_ctrl.send.assert_called_once_with(service, [], event_list)
+        self.scheduler.enqueue_for_appservice(service, ephemeral=event_list)
+        self.txn_ctrl.send.assert_called_once_with(service, [], event_list, [])
 
     def test_send_multiple_ephemeral_no_queue(self):
         # Expect the event to be sent immediately.
         service = Mock(id=4, name="service")
         event_list = [Mock(name="event1"), Mock(name="event2"), Mock(name="event3")]
-        self.queuer.enqueue_ephemeral(service, event_list)
-        self.txn_ctrl.send.assert_called_once_with(service, [], event_list)
+        self.scheduler.enqueue_for_appservice(service, ephemeral=event_list)
+        self.txn_ctrl.send.assert_called_once_with(service, [], event_list, [])
 
     def test_send_single_ephemeral_with_queue(self):
         d = defer.Deferred()
-        self.txn_ctrl.send = Mock(
-            side_effect=lambda x, y, z: make_deferred_yieldable(d)
-        )
+        self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d))
         service = Mock(id=4)
         event_list_1 = [Mock(event_id="event1"), Mock(event_id="event2")]
         event_list_2 = [Mock(event_id="event3"), Mock(event_id="event4")]
         event_list_3 = [Mock(event_id="event5"), Mock(event_id="event6")]
 
         # Send an event and don't resolve it just yet.
-        self.queuer.enqueue_ephemeral(service, event_list_1)
+        self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_1)
         # Send more events: expect send() to NOT be called multiple times.
-        self.queuer.enqueue_ephemeral(service, event_list_2)
-        self.queuer.enqueue_ephemeral(service, event_list_3)
-        self.txn_ctrl.send.assert_called_with(service, [], event_list_1)
+        self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_2)
+        self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_3)
+        self.txn_ctrl.send.assert_called_with(service, [], event_list_1, [])
         self.assertEquals(1, self.txn_ctrl.send.call_count)
         # Resolve txn_ctrl.send
         d.callback(service)
         # Expect the queued events to be sent
-        self.txn_ctrl.send.assert_called_with(service, [], event_list_2 + event_list_3)
+        self.txn_ctrl.send.assert_called_with(
+            service, [], event_list_2 + event_list_3, []
+        )
         self.assertEquals(2, self.txn_ctrl.send.call_count)
 
     def test_send_large_txns_ephemeral(self):
         d = defer.Deferred()
-        self.txn_ctrl.send = Mock(
-            side_effect=lambda x, y, z: make_deferred_yieldable(d)
-        )
+        self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d))
         # Expect the event to be sent immediately.
         service = Mock(id=4, name="service")
         first_chunk = [Mock(name="event%i" % (i + 1)) for i in range(100)]
         second_chunk = [Mock(name="event%i" % (i + 101)) for i in range(50)]
         event_list = first_chunk + second_chunk
-        self.queuer.enqueue_ephemeral(service, event_list)
-        self.txn_ctrl.send.assert_called_once_with(service, [], first_chunk)
+        self.scheduler.enqueue_for_appservice(service, ephemeral=event_list)
+        self.txn_ctrl.send.assert_called_once_with(service, [], first_chunk, [])
         d.callback(service)
-        self.txn_ctrl.send.assert_called_with(service, [], second_chunk)
+        self.txn_ctrl.send.assert_called_with(service, [], second_chunk, [])
         self.assertEquals(2, self.txn_ctrl.send.call_count)
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index d6f14e2dba..fe57ff2671 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -1,4 +1,4 @@
-# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2015-2021 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,18 +12,23 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from typing import Dict, Iterable, List, Optional
 from unittest.mock import Mock
 
 from twisted.internet import defer
 
+import synapse.rest.admin
+import synapse.storage
+from synapse.appservice import ApplicationService
 from synapse.handlers.appservice import ApplicationServicesHandler
+from synapse.rest.client import login, receipts, room, sendtodevice
 from synapse.types import RoomStreamToken
+from synapse.util.stringutils import random_string
 
-from tests.test_utils import make_awaitable
+from tests import unittest
+from tests.test_utils import make_awaitable, simple_async_mock
 from tests.utils import MockClock
 
-from .. import unittest
-
 
 class AppServiceHandlerTestCase(unittest.TestCase):
     """Tests the ApplicationServicesHandler."""
@@ -36,6 +41,9 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         hs.get_datastore.return_value = self.mock_store
         self.mock_store.get_received_ts.return_value = make_awaitable(0)
         self.mock_store.set_appservice_last_pos.return_value = make_awaitable(None)
+        self.mock_store.set_appservice_stream_type_pos.return_value = make_awaitable(
+            None
+        )
         hs.get_application_service_api.return_value = self.mock_as_api
         hs.get_application_service_scheduler.return_value = self.mock_scheduler
         hs.get_clock.return_value = MockClock()
@@ -63,8 +71,8 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         ]
         self.handler.notify_interested_services(RoomStreamToken(None, 1))
 
-        self.mock_scheduler.submit_event_for_as.assert_called_once_with(
-            interested_service, event
+        self.mock_scheduler.enqueue_for_appservice.assert_called_once_with(
+            interested_service, events=[event]
         )
 
     def test_query_user_exists_unknown_user(self):
@@ -261,7 +269,6 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         """
         interested_service = self._mkservice(is_interested=True)
         services = [interested_service]
-
         self.mock_store.get_app_services.return_value = services
         self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable(
             579
@@ -275,10 +282,10 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         self.handler.notify_interested_services_ephemeral(
             "receipt_key", 580, ["@fakerecipient:example.com"]
         )
-        self.mock_scheduler.submit_ephemeral_events_for_as.assert_called_once_with(
-            interested_service, [event]
+        self.mock_scheduler.enqueue_for_appservice.assert_called_once_with(
+            interested_service, ephemeral=[event]
         )
-        self.mock_store.set_type_stream_id_for_appservice.assert_called_once_with(
+        self.mock_store.set_appservice_stream_type_pos.assert_called_once_with(
             interested_service,
             "read_receipt",
             580,
@@ -305,7 +312,10 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         self.handler.notify_interested_services_ephemeral(
             "receipt_key", 580, ["@fakerecipient:example.com"]
         )
-        self.mock_scheduler.submit_ephemeral_events_for_as.assert_not_called()
+        # This method will be called, but with an empty list of events
+        self.mock_scheduler.enqueue_for_appservice.assert_called_once_with(
+            interested_service, ephemeral=[]
+        )
 
     def _mkservice(self, is_interested, protocols=None):
         service = Mock()
@@ -321,3 +331,252 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         service.token = "mock_service_token"
         service.url = "mock_service_url"
         return service
+
+
+class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
+    """
+    Tests that the ApplicationServicesHandler sends events to application
+    services correctly.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets_for_client_rest_resource,
+        login.register_servlets,
+        room.register_servlets,
+        sendtodevice.register_servlets,
+        receipts.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        # Mock the ApplicationServiceScheduler's _TransactionController's send method so that
+        # we can track any outgoing ephemeral events
+        self.send_mock = simple_async_mock()
+        hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock
+
+        # Mock out application services, and allow defining our own in tests
+        self._services: List[ApplicationService] = []
+        self.hs.get_datastore().get_app_services = Mock(return_value=self._services)
+
+        # A user on the homeserver.
+        self.local_user_device_id = "local_device"
+        self.local_user = self.register_user("local_user", "password")
+        self.local_user_token = self.login(
+            "local_user", "password", self.local_user_device_id
+        )
+
+        # A user on the homeserver which lies within an appservice's exclusive user namespace.
+        self.exclusive_as_user_device_id = "exclusive_as_device"
+        self.exclusive_as_user = self.register_user("exclusive_as_user", "password")
+        self.exclusive_as_user_token = self.login(
+            "exclusive_as_user", "password", self.exclusive_as_user_device_id
+        )
+
+    @unittest.override_config(
+        {"experimental_features": {"msc2409_to_device_messages_enabled": True}}
+    )
+    def test_application_services_receive_local_to_device(self):
+        """
+        Test that when a user sends a to-device message to another user
+        that is an application service's user namespace, the
+        application service will receive it.
+        """
+        interested_appservice = self._register_application_service(
+            namespaces={
+                ApplicationService.NS_USERS: [
+                    {
+                        "regex": "@exclusive_as_user:.+",
+                        "exclusive": True,
+                    }
+                ],
+            },
+        )
+
+        # Have local_user send a to-device message to exclusive_as_user
+        message_content = {"some_key": "some really interesting value"}
+        chan = self.make_request(
+            "PUT",
+            "/_matrix/client/r0/sendToDevice/m.room_key_request/3",
+            content={
+                "messages": {
+                    self.exclusive_as_user: {
+                        self.exclusive_as_user_device_id: message_content
+                    }
+                }
+            },
+            access_token=self.local_user_token,
+        )
+        self.assertEqual(chan.code, 200, chan.result)
+
+        # Have exclusive_as_user send a to-device message to local_user
+        chan = self.make_request(
+            "PUT",
+            "/_matrix/client/r0/sendToDevice/m.room_key_request/4",
+            content={
+                "messages": {
+                    self.local_user: {self.local_user_device_id: message_content}
+                }
+            },
+            access_token=self.exclusive_as_user_token,
+        )
+        self.assertEqual(chan.code, 200, chan.result)
+
+        # Check if our application service - that is interested in exclusive_as_user - received
+        # the to-device message as part of an AS transaction.
+        # Only the local_user -> exclusive_as_user to-device message should have been forwarded to the AS.
+        #
+        # The uninterested application service should not have been notified at all.
+        self.send_mock.assert_called_once()
+        service, _events, _ephemeral, to_device_messages = self.send_mock.call_args[0]
+
+        # Assert that this was the same to-device message that local_user sent
+        self.assertEqual(service, interested_appservice)
+        self.assertEqual(to_device_messages[0]["type"], "m.room_key_request")
+        self.assertEqual(to_device_messages[0]["sender"], self.local_user)
+
+        # Additional fields 'to_user_id' and 'to_device_id' specifically for
+        # to-device messages via the AS API
+        self.assertEqual(to_device_messages[0]["to_user_id"], self.exclusive_as_user)
+        self.assertEqual(
+            to_device_messages[0]["to_device_id"], self.exclusive_as_user_device_id
+        )
+        self.assertEqual(to_device_messages[0]["content"], message_content)
+
+    @unittest.override_config(
+        {"experimental_features": {"msc2409_to_device_messages_enabled": True}}
+    )
+    def test_application_services_receive_bursts_of_to_device(self):
+        """
+        Test that when a user sends >100 to-device messages at once, any
+        interested AS's will receive them in separate transactions.
+
+        Also tests that uninterested application services do not receive messages.
+        """
+        # Register two application services with exclusive interest in a user
+        interested_appservices = []
+        for _ in range(2):
+            appservice = self._register_application_service(
+                namespaces={
+                    ApplicationService.NS_USERS: [
+                        {
+                            "regex": "@exclusive_as_user:.+",
+                            "exclusive": True,
+                        }
+                    ],
+                },
+            )
+            interested_appservices.append(appservice)
+
+        # ...and an application service which does not have any user interest.
+        self._register_application_service()
+
+        to_device_message_content = {
+            "some key": "some interesting value",
+        }
+
+        # We need to send a large burst of to-device messages. We also would like to
+        # include them all in the same application service transaction so that we can
+        # test large transactions.
+        #
+        # To do this, we can send a single to-device message to many user devices at
+        # once.
+        #
+        # We insert number_of_messages - 1 messages into the database directly. We'll then
+        # send a final to-device message to the real device, which will also kick off
+        # an AS transaction (as just inserting messages into the DB won't).
+        number_of_messages = 150
+        fake_device_ids = [f"device_{num}" for num in range(number_of_messages - 1)]
+        messages = {
+            self.exclusive_as_user: {
+                device_id: to_device_message_content for device_id in fake_device_ids
+            }
+        }
+
+        # Create a fake device per message. We can't send to-device messages to
+        # a device that doesn't exist.
+        self.get_success(
+            self.hs.get_datastore().db_pool.simple_insert_many(
+                desc="test_application_services_receive_burst_of_to_device",
+                table="devices",
+                keys=("user_id", "device_id"),
+                values=[
+                    (
+                        self.exclusive_as_user,
+                        device_id,
+                    )
+                    for device_id in fake_device_ids
+                ],
+            )
+        )
+
+        # Seed the device_inbox table with our fake messages
+        self.get_success(
+            self.hs.get_datastore().add_messages_to_device_inbox(messages, {})
+        )
+
+        # Now have local_user send a final to-device message to exclusive_as_user. All unsent
+        # to-device messages should be sent to any application services
+        # interested in exclusive_as_user.
+        chan = self.make_request(
+            "PUT",
+            "/_matrix/client/r0/sendToDevice/m.room_key_request/4",
+            content={
+                "messages": {
+                    self.exclusive_as_user: {
+                        self.exclusive_as_user_device_id: to_device_message_content
+                    }
+                }
+            },
+            access_token=self.local_user_token,
+        )
+        self.assertEqual(chan.code, 200, chan.result)
+
+        self.send_mock.assert_called()
+
+        # Count the total number of to-device messages that were sent out per-service.
+        # Ensure that we only sent to-device messages to interested services, and that
+        # each interested service received the full count of to-device messages.
+        service_id_to_message_count: Dict[str, int] = {}
+
+        for call in self.send_mock.call_args_list:
+            service, _events, _ephemeral, to_device_messages = call[0]
+
+            # Check that this was made to an interested service
+            self.assertIn(service, interested_appservices)
+
+            # Add to the count of messages for this application service
+            service_id_to_message_count.setdefault(service.id, 0)
+            service_id_to_message_count[service.id] += len(to_device_messages)
+
+        # Assert that each interested service received the full count of messages
+        for count in service_id_to_message_count.values():
+            self.assertEqual(count, number_of_messages)
+
+    def _register_application_service(
+        self,
+        namespaces: Optional[Dict[str, Iterable[Dict]]] = None,
+    ) -> ApplicationService:
+        """
+        Register a new application service, with the given namespaces of interest.
+
+        Args:
+            namespaces: A dictionary containing any user, room or alias namespaces that
+                the application service is interested in.
+
+        Returns:
+            The registered application service.
+        """
+        # Create an application service
+        appservice = ApplicationService(
+            token=random_string(10),
+            hostname="example.com",
+            id=random_string(10),
+            sender="@as:example.com",
+            rate_limited=False,
+            namespaces=namespaces,
+            supports_ephemeral=True,
+        )
+
+        # Register the application service
+        self._services.append(appservice)
+
+        return appservice
diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py
new file mode 100644
index 0000000000..01096a1581
--- /dev/null
+++ b/tests/handlers/test_deactivate_account.py
@@ -0,0 +1,325 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from http import HTTPStatus
+from typing import Any, Dict
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.constants import AccountDataTypes
+from synapse.push.rulekinds import PRIORITY_CLASS_MAP
+from synapse.rest import admin
+from synapse.rest.client import account, login
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests.unittest import HomeserverTestCase
+
+
+class DeactivateAccountTestCase(HomeserverTestCase):
+    servlets = [
+        login.register_servlets,
+        admin.register_servlets,
+        account.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self._store = hs.get_datastore()
+
+        self.user = self.register_user("user", "pass")
+        self.token = self.login("user", "pass")
+
+    def _deactivate_my_account(self):
+        """
+        Deactivates the account `self.user` using `self.token` and asserts
+        that it returns a 200 success code.
+        """
+        req = self.get_success(
+            self.make_request(
+                "POST",
+                "account/deactivate",
+                {
+                    "auth": {
+                        "type": "m.login.password",
+                        "user": self.user,
+                        "password": "pass",
+                    },
+                    "erase": True,
+                },
+                access_token=self.token,
+            )
+        )
+        self.assertEqual(req.code, HTTPStatus.OK, req)
+
+    def test_global_account_data_deleted_upon_deactivation(self) -> None:
+        """
+        Tests that global account data is removed upon deactivation.
+        """
+        # Add some account data
+        self.get_success(
+            self._store.add_account_data_for_user(
+                self.user,
+                AccountDataTypes.DIRECT,
+                {"@someone:remote": ["!somewhere:remote"]},
+            )
+        )
+
+        # Check that we actually added some.
+        self.assertIsNotNone(
+            self.get_success(
+                self._store.get_global_account_data_by_type_for_user(
+                    self.user, AccountDataTypes.DIRECT
+                )
+            ),
+        )
+
+        # Request the deactivation of our account
+        self._deactivate_my_account()
+
+        # Check that the account data does not persist.
+        self.assertIsNone(
+            self.get_success(
+                self._store.get_global_account_data_by_type_for_user(
+                    self.user, AccountDataTypes.DIRECT
+                )
+            ),
+        )
+
+    def test_room_account_data_deleted_upon_deactivation(self) -> None:
+        """
+        Tests that room account data is removed upon deactivation.
+        """
+        room_id = "!room:test"
+
+        # Add some room account data
+        self.get_success(
+            self._store.add_account_data_to_room(
+                self.user,
+                room_id,
+                "m.fully_read",
+                {"event_id": "$aaaa:test"},
+            )
+        )
+
+        # Check that we actually added some.
+        self.assertIsNotNone(
+            self.get_success(
+                self._store.get_account_data_for_room_and_type(
+                    self.user, room_id, "m.fully_read"
+                )
+            ),
+        )
+
+        # Request the deactivation of our account
+        self._deactivate_my_account()
+
+        # Check that the account data does not persist.
+        self.assertIsNone(
+            self.get_success(
+                self._store.get_account_data_for_room_and_type(
+                    self.user, room_id, "m.fully_read"
+                )
+            ),
+        )
+
+    def _is_custom_rule(self, push_rule: Dict[str, Any]) -> bool:
+        """
+        Default rules start with a dot: such as .m.rule and .im.vector.
+        This function returns true iff a rule is custom (not default).
+        """
+        return "/." not in push_rule["rule_id"]
+
+    def test_push_rules_deleted_upon_account_deactivation(self) -> None:
+        """
+        Push rules are a special case of account data.
+        They are stored separately but get sent to the client as account data in /sync.
+        This tests that deactivating a user deletes push rules along with the rest
+        of their account data.
+        """
+
+        # Add a push rule
+        self.get_success(
+            self._store.add_push_rule(
+                self.user,
+                "personal.override.rule1",
+                PRIORITY_CLASS_MAP["override"],
+                [],
+                [],
+            )
+        )
+
+        # Test the rule exists
+        push_rules = self.get_success(self._store.get_push_rules_for_user(self.user))
+        # Filter out default rules; we don't care
+        push_rules = list(filter(self._is_custom_rule, push_rules))
+        # Check our rule made it
+        self.assertEqual(
+            push_rules,
+            [
+                {
+                    "user_name": "@user:test",
+                    "rule_id": "personal.override.rule1",
+                    "priority_class": 5,
+                    "priority": 0,
+                    "conditions": [],
+                    "actions": [],
+                    "default": False,
+                }
+            ],
+            push_rules,
+        )
+
+        # Request the deactivation of our account
+        self._deactivate_my_account()
+
+        push_rules = self.get_success(self._store.get_push_rules_for_user(self.user))
+        # Filter out default rules; we don't care
+        push_rules = list(filter(self._is_custom_rule, push_rules))
+        # Check our rule no longer exists
+        self.assertEqual(push_rules, [], push_rules)
+
+    def test_ignored_users_deleted_upon_deactivation(self) -> None:
+        """
+        Ignored users are a special case of account data.
+        They get denormalised into the `ignored_users` table upon being stored as
+        account data.
+        Test that a user's list of ignored users is deleted upon deactivation.
+        """
+
+        # Add an ignored user
+        self.get_success(
+            self._store.add_account_data_for_user(
+                self.user,
+                AccountDataTypes.IGNORED_USER_LIST,
+                {"ignored_users": {"@sheltie:test": {}}},
+            )
+        )
+
+        # Test the user is ignored
+        self.assertEqual(
+            self.get_success(self._store.ignored_by("@sheltie:test")), {self.user}
+        )
+
+        # Request the deactivation of our account
+        self._deactivate_my_account()
+
+        # Test the user is no longer ignored by the user that was deactivated
+        self.assertEqual(
+            self.get_success(self._store.ignored_by("@sheltie:test")), set()
+        )
+
+    def _rerun_retroactive_account_data_deletion_update(self) -> None:
+        # Reset the 'all done' flag
+        self._store.db_pool.updates._all_done = False
+
+        self.get_success(
+            self._store.db_pool.simple_insert(
+                "background_updates",
+                {
+                    "update_name": "delete_account_data_for_deactivated_users",
+                    "progress_json": "{}",
+                },
+            )
+        )
+
+        self.wait_for_background_updates()
+
+    def test_account_data_deleted_retroactively_by_background_update_if_deactivated(
+        self,
+    ) -> None:
+        """
+        Tests that a user, who deactivated their account before account data was
+        deleted automatically upon deactivation, has their account data retroactively
+        scrubbed by the background update.
+        """
+
+        # Request the deactivation of our account
+        self._deactivate_my_account()
+
+        # Add some account data
+        # (we do this after the deactivation so that the act of deactivating doesn't
+        # clear it out. This emulates a user that was deactivated before this was cleared
+        # upon deactivation.)
+        self.get_success(
+            self._store.add_account_data_for_user(
+                self.user,
+                AccountDataTypes.DIRECT,
+                {"@someone:remote": ["!somewhere:remote"]},
+            )
+        )
+
+        # Check that the account data is there.
+        self.assertIsNotNone(
+            self.get_success(
+                self._store.get_global_account_data_by_type_for_user(
+                    self.user,
+                    AccountDataTypes.DIRECT,
+                )
+            ),
+        )
+
+        # Re-run the retroactive deletion update
+        self._rerun_retroactive_account_data_deletion_update()
+
+        # Check that the account data was cleared.
+        self.assertIsNone(
+            self.get_success(
+                self._store.get_global_account_data_by_type_for_user(
+                    self.user,
+                    AccountDataTypes.DIRECT,
+                )
+            ),
+        )
+
+    def test_account_data_preserved_by_background_update_if_not_deactivated(
+        self,
+    ) -> None:
+        """
+        Tests that the background update does not scrub account data for users that have
+        not been deactivated.
+        """
+
+        # Add some account data
+        # (we do this after the deactivation so that the act of deactivating doesn't
+        # clear it out. This emulates a user that was deactivated before this was cleared
+        # upon deactivation.)
+        self.get_success(
+            self._store.add_account_data_for_user(
+                self.user,
+                AccountDataTypes.DIRECT,
+                {"@someone:remote": ["!somewhere:remote"]},
+            )
+        )
+
+        # Check that the account data is there.
+        self.assertIsNotNone(
+            self.get_success(
+                self._store.get_global_account_data_by_type_for_user(
+                    self.user,
+                    AccountDataTypes.DIRECT,
+                )
+            ),
+        )
+
+        # Re-run the retroactive deletion update
+        self._rerun_retroactive_account_data_deletion_update()
+
+        # Check that the account data was NOT cleared.
+        self.assertIsNotNone(
+            self.get_success(
+                self._store.get_global_account_data_by_type_for_user(
+                    self.user,
+                    AccountDataTypes.DIRECT,
+                )
+            ),
+        )
diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py
index 2add72b28a..94809cb8be 100644
--- a/tests/handlers/test_password_providers.py
+++ b/tests/handlers/test_password_providers.py
@@ -20,10 +20,11 @@ from unittest.mock import Mock
 from twisted.internet import defer
 
 import synapse
+from synapse.api.constants import LoginType
 from synapse.handlers.auth import load_legacy_password_auth_providers
 from synapse.module_api import ModuleApi
-from synapse.rest.client import devices, login, logout
-from synapse.types import JsonDict
+from synapse.rest.client import devices, login, logout, register
+from synapse.types import JsonDict, UserID
 
 from tests import unittest
 from tests.server import FakeChannel
@@ -156,6 +157,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase):
         login.register_servlets,
         devices.register_servlets,
         logout.register_servlets,
+        register.register_servlets,
     ]
 
     def setUp(self):
@@ -745,6 +747,79 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase):
         on_logged_out.assert_called_once()
         self.assertTrue(self.called)
 
+    def test_username(self):
+        """Tests that the get_username_for_registration callback can define the username
+        of a user when registering.
+        """
+        self._setup_get_username_for_registration()
+
+        username = "rin"
+        channel = self.make_request(
+            "POST",
+            "/register",
+            {
+                "username": username,
+                "password": "bar",
+                "auth": {"type": LoginType.DUMMY},
+            },
+        )
+        self.assertEqual(channel.code, 200)
+
+        # Our callback takes the username and appends "-foo" to it, check that's what we
+        # have.
+        mxid = channel.json_body["user_id"]
+        self.assertEqual(UserID.from_string(mxid).localpart, username + "-foo")
+
+    def test_username_uia(self):
+        """Tests that the get_username_for_registration callback is only called at the
+        end of the UIA flow.
+        """
+        m = self._setup_get_username_for_registration()
+
+        # Initiate the UIA flow.
+        username = "rin"
+        channel = self.make_request(
+            "POST",
+            "register",
+            {"username": username, "type": "m.login.password", "password": "bar"},
+        )
+        self.assertEqual(channel.code, 401)
+        self.assertIn("session", channel.json_body)
+
+        # Check that the callback hasn't been called yet.
+        m.assert_not_called()
+
+        # Finish the UIA flow.
+        session = channel.json_body["session"]
+        channel = self.make_request(
+            "POST",
+            "register",
+            {"auth": {"session": session, "type": LoginType.DUMMY}},
+        )
+        self.assertEqual(channel.code, 200, channel.json_body)
+        mxid = channel.json_body["user_id"]
+        self.assertEqual(UserID.from_string(mxid).localpart, username + "-foo")
+
+        # Check that the callback has been called.
+        m.assert_called_once()
+
+    def _setup_get_username_for_registration(self) -> Mock:
+        """Registers a get_username_for_registration callback that appends "-foo" to the
+        username the client is trying to register.
+        """
+
+        async def get_username_for_registration(uia_results, params):
+            self.assertIn(LoginType.DUMMY, uia_results)
+            username = params["username"]
+            return username + "-foo"
+
+        m = Mock(side_effect=get_username_for_registration)
+
+        password_auth_provider = self.hs.get_password_auth_provider()
+        password_auth_provider.get_username_for_registration_callbacks.append(m)
+
+        return m
+
     def _get_login_flows(self) -> JsonDict:
         channel = self.make_request("GET", "/_matrix/client/r0/login")
         self.assertEqual(channel.code, 200, channel.result)
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index c153018fd8..60235e5699 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -11,12 +11,13 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
+from typing import Any, Dict
 from unittest.mock import Mock
 
 import synapse.types
 from synapse.api.errors import AuthError, SynapseError
 from synapse.rest import admin
+from synapse.server import HomeServer
 from synapse.types import UserID
 
 from tests import unittest
@@ -46,7 +47,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
         )
         return hs
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor, clock, hs: HomeServer):
         self.store = hs.get_datastore()
 
         self.frank = UserID.from_string("@1234abcd:test")
@@ -248,3 +249,92 @@ class ProfileTestCase(unittest.HomeserverTestCase):
             ),
             SynapseError,
         )
+
+    def test_avatar_constraints_no_config(self):
+        """Tests that the method to check an avatar against configured constraints skips
+        all of its check if no constraint is configured.
+        """
+        # The first check that's done by this method is whether the file exists; if we
+        # don't get an error on a non-existing file then it means all of the checks were
+        # successfully skipped.
+        res = self.get_success(
+            self.handler.check_avatar_size_and_mime_type("mxc://test/unknown_file")
+        )
+        self.assertTrue(res)
+
+    @unittest.override_config({"max_avatar_size": 50})
+    def test_avatar_constraints_missing(self):
+        """Tests that an avatar isn't allowed if the file at the given MXC URI couldn't
+        be found.
+        """
+        res = self.get_success(
+            self.handler.check_avatar_size_and_mime_type("mxc://test/unknown_file")
+        )
+        self.assertFalse(res)
+
+    @unittest.override_config({"max_avatar_size": 50})
+    def test_avatar_constraints_file_size(self):
+        """Tests that a file that's above the allowed file size is forbidden but one
+        that's below it is allowed.
+        """
+        self._setup_local_files(
+            {
+                "small": {"size": 40},
+                "big": {"size": 60},
+            }
+        )
+
+        res = self.get_success(
+            self.handler.check_avatar_size_and_mime_type("mxc://test/small")
+        )
+        self.assertTrue(res)
+
+        res = self.get_success(
+            self.handler.check_avatar_size_and_mime_type("mxc://test/big")
+        )
+        self.assertFalse(res)
+
+    @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]})
+    def test_avatar_constraint_mime_type(self):
+        """Tests that a file with an unauthorised MIME type is forbidden but one with
+        an authorised content type is allowed.
+        """
+        self._setup_local_files(
+            {
+                "good": {"mimetype": "image/png"},
+                "bad": {"mimetype": "application/octet-stream"},
+            }
+        )
+
+        res = self.get_success(
+            self.handler.check_avatar_size_and_mime_type("mxc://test/good")
+        )
+        self.assertTrue(res)
+
+        res = self.get_success(
+            self.handler.check_avatar_size_and_mime_type("mxc://test/bad")
+        )
+        self.assertFalse(res)
+
+    def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]):
+        """Stores metadata about files in the database.
+
+        Args:
+            names_and_props: A dictionary with one entry per file, with the key being the
+                file's name, and the value being a dictionary of properties. Supported
+                properties are "mimetype" (for the file's type) and "size" (for the
+                file's size).
+        """
+        store = self.hs.get_datastore()
+
+        for name, props in names_and_props.items():
+            self.get_success(
+                store.store_local_media(
+                    media_id=name,
+                    media_type=props.get("mimetype", "image/png"),
+                    time_now_ms=self.clock.time_msec(),
+                    upload_name=None,
+                    media_length=props.get("size", 50),
+                    user_id=UserID.from_string("@rin:test"),
+                )
+            )
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 70c621b825..482c90ef68 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -169,7 +169,9 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
         # Register an AS user.
         user = self.register_user("user", "pass")
         token = self.login(user, "pass")
-        as_user = self.register_appservice_user("as_user_potato", self.appservice.token)
+        as_user, _ = self.register_appservice_user(
+            "as_user_potato", self.appservice.token
+        )
 
         # Join the AS user to rooms owned by the normal user.
         public, private = self._create_rooms_and_inject_memberships(
@@ -388,7 +390,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
 
     def test_handle_local_profile_change_with_appservice_user(self) -> None:
         # create user
-        as_user_id = self.register_appservice_user(
+        as_user_id, _ = self.register_appservice_user(
             "as_user_alice", self.appservice.token
         )
 
diff --git a/tests/http/test_webclient.py b/tests/http/test_webclient.py
deleted file mode 100644
index ee5cf299f6..0000000000
--- a/tests/http/test_webclient.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2022 The Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from http import HTTPStatus
-from typing import Dict
-
-from twisted.web.resource import Resource
-
-from synapse.app.homeserver import SynapseHomeServer
-from synapse.config.server import HttpListenerConfig, HttpResourceConfig, ListenerConfig
-from synapse.http.site import SynapseSite
-
-from tests.server import make_request
-from tests.unittest import HomeserverTestCase, create_resource_tree, override_config
-
-
-class WebClientTests(HomeserverTestCase):
-    @override_config(
-        {
-            "web_client_location": "https://example.org",
-        }
-    )
-    def test_webclient_resolves_with_client_resource(self):
-        """
-        Tests that both client and webclient resources can be accessed simultaneously.
-
-        This is a regression test created in response to https://github.com/matrix-org/synapse/issues/11763.
-        """
-        for resource_name_order_list in [
-            ["webclient", "client"],
-            ["client", "webclient"],
-        ]:
-            # Create a dictionary from path regex -> resource
-            resource_dict: Dict[str, Resource] = {}
-
-            for resource_name in resource_name_order_list:
-                resource_dict.update(
-                    SynapseHomeServer._configure_named_resource(self.hs, resource_name)
-                )
-
-            # Create a root resource which ties the above resources together into one
-            root_resource = Resource()
-            create_resource_tree(resource_dict, root_resource)
-
-            # Create a site configured with this resource to make HTTP requests against
-            listener_config = ListenerConfig(
-                port=8008,
-                bind_addresses=["127.0.0.1"],
-                type="http",
-                http_options=HttpListenerConfig(
-                    resources=[HttpResourceConfig(names=resource_name_order_list)]
-                ),
-            )
-            test_site = SynapseSite(
-                logger_name="synapse.access.http.fake",
-                site_tag=self.hs.config.server.server_name,
-                config=listener_config,
-                resource=root_resource,
-                server_version_string="1",
-                max_request_body_size=1234,
-                reactor=self.reactor,
-            )
-
-            # Attempt to make requests to endpoints on both the webclient and client resources
-            # on test_site.
-            self._request_client_and_webclient_resources(test_site)
-
-    def _request_client_and_webclient_resources(self, test_site: SynapseSite) -> None:
-        """Make a request to an endpoint on both the webclient and client-server resources
-        of the given SynapseSite.
-
-        Args:
-            test_site: The SynapseSite object to make requests against.
-        """
-
-        # Ensure that the *webclient* resource is behaving as expected (we get redirected to
-        # the configured web_client_location)
-        channel = make_request(
-            self.reactor,
-            site=test_site,
-            method="GET",
-            path="/_matrix/client",
-        )
-        # Check that we are being redirected to the webclient location URI.
-        self.assertEqual(channel.code, HTTPStatus.FOUND)
-        self.assertEqual(
-            channel.headers.getRawHeaders("Location"), ["https://example.org"]
-        )
-
-        # Ensure that a request to the *client* resource works.
-        channel = make_request(
-            self.reactor,
-            site=test_site,
-            method="GET",
-            path="/_matrix/client/v3/login",
-        )
-        self.assertEqual(channel.code, HTTPStatus.OK)
-        self.assertIn("flows", channel.json_body)
diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py
new file mode 100644
index 0000000000..e430941d27
--- /dev/null
+++ b/tests/logging/test_opentracing.py
@@ -0,0 +1,184 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+from twisted.test.proto_helpers import MemoryReactorClock
+
+from synapse.logging.context import (
+    LoggingContext,
+    make_deferred_yieldable,
+    run_in_background,
+)
+from synapse.logging.opentracing import (
+    start_active_span,
+    start_active_span_follows_from,
+)
+from synapse.util import Clock
+
+try:
+    from synapse.logging.scopecontextmanager import LogContextScopeManager
+except ImportError:
+    LogContextScopeManager = None  # type: ignore
+
+try:
+    import jaeger_client
+except ImportError:
+    jaeger_client = None  # type: ignore
+
+from tests.unittest import TestCase
+
+
+class LogContextScopeManagerTestCase(TestCase):
+    if LogContextScopeManager is None:
+        skip = "Requires opentracing"  # type: ignore[unreachable]
+    if jaeger_client is None:
+        skip = "Requires jaeger_client"  # type: ignore[unreachable]
+
+    def setUp(self) -> None:
+        # since this is a unit test, we don't really want to mess around with the
+        # global variables that power opentracing. We create our own tracer instance
+        # and test with it.
+
+        scope_manager = LogContextScopeManager({})
+        config = jaeger_client.config.Config(
+            config={}, service_name="test", scope_manager=scope_manager
+        )
+
+        self._reporter = jaeger_client.reporter.InMemoryReporter()
+
+        self._tracer = config.create_tracer(
+            sampler=jaeger_client.ConstSampler(True),
+            reporter=self._reporter,
+        )
+
+    def test_start_active_span(self) -> None:
+        # the scope manager assumes a logging context of some sort.
+        with LoggingContext("root context"):
+            self.assertIsNone(self._tracer.active_span)
+
+            # start_active_span should start and activate a span.
+            scope = start_active_span("span", tracer=self._tracer)
+            span = scope.span
+            self.assertEqual(self._tracer.active_span, span)
+            self.assertIsNotNone(span.start_time)
+
+            # entering the context doesn't actually do a whole lot.
+            with scope as ctx:
+                self.assertIs(ctx, scope)
+                self.assertEqual(self._tracer.active_span, span)
+
+            # ... but leaving it unsets the active span, and finishes the span.
+            self.assertIsNone(self._tracer.active_span)
+            self.assertIsNotNone(span.end_time)
+
+        # the span should have been reported
+        self.assertEqual(self._reporter.get_spans(), [span])
+
+    def test_nested_spans(self) -> None:
+        """Starting two spans off inside each other should work"""
+
+        with LoggingContext("root context"):
+            with start_active_span("root span", tracer=self._tracer) as root_scope:
+                self.assertEqual(self._tracer.active_span, root_scope.span)
+
+                scope1 = start_active_span(
+                    "child1",
+                    tracer=self._tracer,
+                )
+                self.assertEqual(
+                    self._tracer.active_span, scope1.span, "child1 was not activated"
+                )
+                self.assertEqual(
+                    scope1.span.context.parent_id, root_scope.span.context.span_id
+                )
+
+                scope2 = start_active_span_follows_from(
+                    "child2",
+                    contexts=(scope1,),
+                    tracer=self._tracer,
+                )
+                self.assertEqual(self._tracer.active_span, scope2.span)
+                self.assertEqual(
+                    scope2.span.context.parent_id, scope1.span.context.span_id
+                )
+
+                with scope1, scope2:
+                    pass
+
+                # the root scope should be restored
+                self.assertEqual(self._tracer.active_span, root_scope.span)
+                self.assertIsNotNone(scope2.span.end_time)
+                self.assertIsNotNone(scope1.span.end_time)
+
+            self.assertIsNone(self._tracer.active_span)
+
+        # the spans should be reported in order of their finishing.
+        self.assertEqual(
+            self._reporter.get_spans(), [scope2.span, scope1.span, root_scope.span]
+        )
+
+    def test_overlapping_spans(self) -> None:
+        """Overlapping spans which are not neatly nested should work"""
+        reactor = MemoryReactorClock()
+        clock = Clock(reactor)
+
+        scopes = []
+
+        async def task(i: int):
+            scope = start_active_span(
+                f"task{i}",
+                tracer=self._tracer,
+            )
+            scopes.append(scope)
+
+            self.assertEqual(self._tracer.active_span, scope.span)
+            await clock.sleep(4)
+            self.assertEqual(self._tracer.active_span, scope.span)
+            scope.close()
+
+        async def root():
+            with start_active_span("root span", tracer=self._tracer) as root_scope:
+                self.assertEqual(self._tracer.active_span, root_scope.span)
+                scopes.append(root_scope)
+
+                d1 = run_in_background(task, 1)
+                await clock.sleep(2)
+                d2 = run_in_background(task, 2)
+
+                # because we did run_in_background, the active span should still be the
+                # root.
+                self.assertEqual(self._tracer.active_span, root_scope.span)
+
+                await make_deferred_yieldable(
+                    defer.gatherResults([d1, d2], consumeErrors=True)
+                )
+
+                self.assertEqual(self._tracer.active_span, root_scope.span)
+
+        with LoggingContext("root context"):
+            # start the test off
+            d1 = defer.ensureDeferred(root())
+
+            # let the tasks complete
+            reactor.pump((2,) * 8)
+
+            self.successResultOf(d1)
+            self.assertIsNone(self._tracer.active_span)
+
+        # the spans should be reported in order of their finishing: task 1, task 2,
+        # root.
+        self.assertEqual(
+            self._reporter.get_spans(),
+            [scopes[1].span, scopes[2].span, scopes[0].span],
+        )
diff --git a/tests/replication/slave/storage/test_account_data.py b/tests/replication/slave/storage/test_account_data.py
index 43e3248703..1524087c43 100644
--- a/tests/replication/slave/storage/test_account_data.py
+++ b/tests/replication/slave/storage/test_account_data.py
@@ -30,7 +30,7 @@ class SlavedAccountDataStoreTestCase(BaseSlavedStoreTestCase):
         )
         self.replicate()
         self.check(
-            "get_global_account_data_by_type_for_user", [TYPE, USER_ID], {"a": 1}
+            "get_global_account_data_by_type_for_user", [USER_ID, TYPE], {"a": 1}
         )
 
         self.get_success(
@@ -38,5 +38,5 @@ class SlavedAccountDataStoreTestCase(BaseSlavedStoreTestCase):
         )
         self.replicate()
         self.check(
-            "get_global_account_data_by_type_for_user", [TYPE, USER_ID], {"a": 2}
+            "get_global_account_data_by_type_for_user", [USER_ID, TYPE], {"a": 2}
         )
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 3adadcb46b..849d00ab4d 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -12,18 +12,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import os
 import urllib.parse
 from http import HTTPStatus
-from unittest.mock import Mock
+from typing import List
 
-from twisted.internet.defer import Deferred
+from parameterized import parameterized
+
+from twisted.test.proto_helpers import MemoryReactor
 
 import synapse.rest.admin
 from synapse.http.server import JsonResource
-from synapse.logging.context import make_deferred_yieldable
 from synapse.rest.admin import VersionServlet
 from synapse.rest.client import groups, login, room
+from synapse.server import HomeServer
+from synapse.util import Clock
 
 from tests import unittest
 from tests.server import FakeSite, make_request
@@ -33,12 +35,12 @@ from tests.test_utils import SMALL_PNG
 class VersionTestCase(unittest.HomeserverTestCase):
     url = "/_synapse/admin/v1/server_version"
 
-    def create_test_resource(self):
+    def create_test_resource(self) -> JsonResource:
         resource = JsonResource(self.hs)
         VersionServlet(self.hs).register(resource)
         return resource
 
-    def test_version_string(self):
+    def test_version_string(self) -> None:
         channel = self.make_request("GET", self.url, shorthand=False)
 
         self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
@@ -54,14 +56,14 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase):
         groups.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.admin_user = self.register_user("admin", "pass", admin=True)
         self.admin_user_tok = self.login("admin", "pass")
 
         self.other_user = self.register_user("user", "pass")
         self.other_user_token = self.login("user", "pass")
 
-    def test_delete_group(self):
+    def test_delete_group(self) -> None:
         # Create a new group
         channel = self.make_request(
             "POST",
@@ -112,7 +114,7 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase):
         self.assertNotIn(group_id, self._get_groups_user_is_in(self.admin_user_tok))
         self.assertNotIn(group_id, self._get_groups_user_is_in(self.other_user_token))
 
-    def _check_group(self, group_id, expect_code):
+    def _check_group(self, group_id: str, expect_code: int) -> None:
         """Assert that trying to fetch the given group results in the given
         HTTP status code
         """
@@ -124,7 +126,7 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual(expect_code, channel.code, msg=channel.json_body)
 
-    def _get_groups_user_is_in(self, access_token):
+    def _get_groups_user_is_in(self, access_token: str) -> List[str]:
         """Returns the list of groups the user is in (given their access token)"""
         channel = self.make_request("GET", b"/joined_groups", access_token=access_token)
 
@@ -143,59 +145,15 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
         room.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         # Allow for uploading and downloading to/from the media repo
         self.media_repo = hs.get_media_repository_resource()
         self.download_resource = self.media_repo.children[b"download"]
         self.upload_resource = self.media_repo.children[b"upload"]
 
-    def make_homeserver(self, reactor, clock):
-
-        self.fetches = []
-
-        async def get_file(destination, path, output_stream, args=None, max_size=None):
-            """
-            Returns tuple[int,dict,str,int] of file length, response headers,
-            absolute URI, and response code.
-            """
-
-            def write_to(r):
-                data, response = r
-                output_stream.write(data)
-                return response
-
-            d = Deferred()
-            d.addCallback(write_to)
-            self.fetches.append((d, destination, path, args))
-            return await make_deferred_yieldable(d)
-
-        client = Mock()
-        client.get_file = get_file
-
-        self.storage_path = self.mktemp()
-        self.media_store_path = self.mktemp()
-        os.mkdir(self.storage_path)
-        os.mkdir(self.media_store_path)
-
-        config = self.default_config()
-        config["media_store_path"] = self.media_store_path
-        config["thumbnail_requirements"] = {}
-        config["max_image_pixels"] = 2000000
-
-        provider_config = {
-            "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend",
-            "store_local": True,
-            "store_synchronous": False,
-            "store_remote": True,
-            "config": {"directory": self.storage_path},
-        }
-        config["media_storage_providers"] = [provider_config]
-
-        hs = self.setup_test_homeserver(config=config, federation_http_client=client)
-
-        return hs
-
-    def _ensure_quarantined(self, admin_user_tok, server_and_media_id):
+    def _ensure_quarantined(
+        self, admin_user_tok: str, server_and_media_id: str
+    ) -> None:
         """Ensure a piece of media is quarantined when trying to access it."""
         channel = make_request(
             self.reactor,
@@ -216,12 +174,18 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
             ),
         )
 
-    def test_quarantine_media_requires_admin(self):
+    @parameterized.expand(
+        [
+            # Attempt quarantine media APIs as non-admin
+            "/_synapse/admin/v1/media/quarantine/example.org/abcde12345",
+            # And the roomID/userID endpoint
+            "/_synapse/admin/v1/room/!room%3Aexample.com/media/quarantine",
+        ]
+    )
+    def test_quarantine_media_requires_admin(self, url: str) -> None:
         self.register_user("nonadmin", "pass", admin=False)
         non_admin_user_tok = self.login("nonadmin", "pass")
 
-        # Attempt quarantine media APIs as non-admin
-        url = "/_synapse/admin/v1/media/quarantine/example.org/abcde12345"
         channel = self.make_request(
             "POST",
             url.encode("ascii"),
@@ -235,22 +199,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
             msg="Expected forbidden on quarantining media as a non-admin",
         )
 
-        # And the roomID/userID endpoint
-        url = "/_synapse/admin/v1/room/!room%3Aexample.com/media/quarantine"
-        channel = self.make_request(
-            "POST",
-            url.encode("ascii"),
-            access_token=non_admin_user_tok,
-        )
-
-        # Expect a forbidden error
-        self.assertEqual(
-            HTTPStatus.FORBIDDEN,
-            channel.code,
-            msg="Expected forbidden on quarantining media as a non-admin",
-        )
-
-    def test_quarantine_media_by_id(self):
+    def test_quarantine_media_by_id(self) -> None:
         self.register_user("id_admin", "pass", admin=True)
         admin_user_tok = self.login("id_admin", "pass")
 
@@ -295,7 +244,15 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
         # Attempt to access the media
         self._ensure_quarantined(admin_user_tok, server_name_and_media_id)
 
-    def test_quarantine_all_media_in_room(self, override_url_template=None):
+    @parameterized.expand(
+        [
+            # regular API path
+            "/_synapse/admin/v1/room/%s/media/quarantine",
+            # deprecated API path
+            "/_synapse/admin/v1/quarantine_media/%s",
+        ]
+    )
+    def test_quarantine_all_media_in_room(self, url: str) -> None:
         self.register_user("room_admin", "pass", admin=True)
         admin_user_tok = self.login("room_admin", "pass")
 
@@ -333,16 +290,9 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
             tok=non_admin_user_tok,
         )
 
-        # Quarantine all media in the room
-        if override_url_template:
-            url = override_url_template % urllib.parse.quote(room_id)
-        else:
-            url = "/_synapse/admin/v1/room/%s/media/quarantine" % urllib.parse.quote(
-                room_id
-            )
         channel = self.make_request(
             "POST",
-            url,
+            url % urllib.parse.quote(room_id),
             access_token=admin_user_tok,
         )
         self.pump(1.0)
@@ -359,11 +309,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
         self._ensure_quarantined(admin_user_tok, server_and_media_id_1)
         self._ensure_quarantined(admin_user_tok, server_and_media_id_2)
 
-    def test_quarantine_all_media_in_room_deprecated_api_path(self):
-        # Perform the above test with the deprecated API path
-        self.test_quarantine_all_media_in_room("/_synapse/admin/v1/quarantine_media/%s")
-
-    def test_quarantine_all_media_by_user(self):
+    def test_quarantine_all_media_by_user(self) -> None:
         self.register_user("user_admin", "pass", admin=True)
         admin_user_tok = self.login("user_admin", "pass")
 
@@ -401,7 +347,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
         self._ensure_quarantined(admin_user_tok, server_and_media_id_1)
         self._ensure_quarantined(admin_user_tok, server_and_media_id_2)
 
-    def test_cannot_quarantine_safe_media(self):
+    def test_cannot_quarantine_safe_media(self) -> None:
         self.register_user("user_admin", "pass", admin=True)
         admin_user_tok = self.login("user_admin", "pass")
 
@@ -475,7 +421,7 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase):
         room.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.admin_user = self.register_user("admin", "pass", admin=True)
         self.admin_user_tok = self.login("admin", "pass")
 
@@ -488,7 +434,7 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase):
         self.url = f"/_synapse/admin/v1/purge_history/{self.room_id}"
         self.url_status = "/_synapse/admin/v1/purge_history_status/"
 
-    def test_purge_history(self):
+    def test_purge_history(self) -> None:
         """
         Simple test of purge history API.
         Test only that is is possible to call, get status HTTPStatus.OK and purge_id.
diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py
index b70350b6f1..71068d16cd 100644
--- a/tests/rest/admin/test_federation.py
+++ b/tests/rest/admin/test_federation.py
@@ -20,7 +20,7 @@ from twisted.test.proto_helpers import MemoryReactor
 
 import synapse.rest.admin
 from synapse.api.errors import Codes
-from synapse.rest.client import login
+from synapse.rest.client import login, room
 from synapse.server import HomeServer
 from synapse.types import JsonDict
 from synapse.util import Clock
@@ -43,20 +43,22 @@ class FederationTestCase(unittest.HomeserverTestCase):
 
     @parameterized.expand(
         [
-            ("/_synapse/admin/v1/federation/destinations",),
-            ("/_synapse/admin/v1/federation/destinations/dummy",),
+            ("GET", "/_synapse/admin/v1/federation/destinations"),
+            ("GET", "/_synapse/admin/v1/federation/destinations/dummy"),
+            (
+                "POST",
+                "/_synapse/admin/v1/federation/destinations/dummy/reset_connection",
+            ),
         ]
     )
-    def test_requester_is_no_admin(self, url: str) -> None:
-        """
-        If the user is not a server admin, an error 403 is returned.
-        """
+    def test_requester_is_no_admin(self, method: str, url: str) -> None:
+        """If the user is not a server admin, an error 403 is returned."""
 
         self.register_user("user", "pass", admin=False)
         other_user_tok = self.login("user", "pass")
 
         channel = self.make_request(
-            "GET",
+            method,
             url,
             content={},
             access_token=other_user_tok,
@@ -66,9 +68,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
         self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
 
     def test_invalid_parameter(self) -> None:
-        """
-        If parameters are invalid, an error is returned.
-        """
+        """If parameters are invalid, an error is returned."""
 
         # negative limit
         channel = self.make_request(
@@ -120,10 +120,18 @@ class FederationTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
 
+        # invalid destination
+        channel = self.make_request(
+            "POST",
+            self.url + "/dummy/reset_connection",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+        self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
+
     def test_limit(self) -> None:
-        """
-        Testing list of destinations with limit
-        """
+        """Testing list of destinations with limit"""
 
         number_destinations = 20
         self._create_destinations(number_destinations)
@@ -141,9 +149,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
         self._check_fields(channel.json_body["destinations"])
 
     def test_from(self) -> None:
-        """
-        Testing list of destinations with a defined starting point (from)
-        """
+        """Testing list of destinations with a defined starting point (from)"""
 
         number_destinations = 20
         self._create_destinations(number_destinations)
@@ -161,9 +167,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
         self._check_fields(channel.json_body["destinations"])
 
     def test_limit_and_from(self) -> None:
-        """
-        Testing list of destinations with a defined starting point and limit
-        """
+        """Testing list of destinations with a defined starting point and limit"""
 
         number_destinations = 20
         self._create_destinations(number_destinations)
@@ -181,9 +185,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
         self._check_fields(channel.json_body["destinations"])
 
     def test_next_token(self) -> None:
-        """
-        Testing that `next_token` appears at the right place
-        """
+        """Testing that `next_token` appears at the right place"""
 
         number_destinations = 20
         self._create_destinations(number_destinations)
@@ -242,9 +244,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
         self.assertNotIn("next_token", channel.json_body)
 
     def test_list_all_destinations(self) -> None:
-        """
-        List all destinations.
-        """
+        """List all destinations."""
         number_destinations = 5
         self._create_destinations(number_destinations)
 
@@ -263,9 +263,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
         self._check_fields(channel.json_body["destinations"])
 
     def test_order_by(self) -> None:
-        """
-        Testing order list with parameter `order_by`
-        """
+        """Testing order list with parameter `order_by`"""
 
         def _order_test(
             expected_destination_list: List[str],
@@ -444,6 +442,39 @@ class FederationTestCase(unittest.HomeserverTestCase):
         self.assertIsNone(channel.json_body["failure_ts"])
         self.assertIsNone(channel.json_body["last_successful_stream_ordering"])
 
+    def test_destination_reset_connection(self) -> None:
+        """Reset timeouts and wake up destination."""
+        self._create_destination("sub0.example.com", 100, 100, 100)
+
+        channel = self.make_request(
+            "POST",
+            self.url + "/sub0.example.com/reset_connection",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+
+        retry_timings = self.get_success(
+            self.store.get_destination_retry_timings("sub0.example.com")
+        )
+        self.assertIsNone(retry_timings)
+
+    def test_destination_reset_connection_not_required(self) -> None:
+        """Try to reset timeouts of a destination with no timeouts and get an error."""
+        self._create_destination("sub0.example.com", None, 0, 0)
+
+        channel = self.make_request(
+            "POST",
+            self.url + "/sub0.example.com/reset_connection",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+        self.assertEqual(
+            "The retry timing does not need to be reset for this destination.",
+            channel.json_body["error"],
+        )
+
     def _create_destination(
         self,
         destination: str,
@@ -496,3 +527,271 @@ class FederationTestCase(unittest.HomeserverTestCase):
             self.assertIn("retry_interval", c)
             self.assertIn("failure_ts", c)
             self.assertIn("last_successful_stream_ordering", c)
+
+
+class DestinationMembershipTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastore()
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+        self.dest = "sub0.example.com"
+        self.url = f"/_synapse/admin/v1/federation/destinations/{self.dest}/rooms"
+
+        # Record that we successfully contacted a destination in the DB.
+        self.get_success(
+            self.store.set_destination_retry_timings(self.dest, None, 0, 0)
+        )
+
+    def test_requester_is_no_admin(self) -> None:
+        """If the user is not a server admin, an error 403 is returned."""
+
+        self.register_user("user", "pass", admin=False)
+        other_user_tok = self.login("user", "pass")
+
+        channel = self.make_request(
+            "GET",
+            self.url,
+            access_token=other_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+    def test_invalid_parameter(self) -> None:
+        """If parameters are invalid, an error is returned."""
+
+        # negative limit
+        channel = self.make_request(
+            "GET",
+            self.url + "?limit=-5",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+        self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
+
+        # negative from
+        channel = self.make_request(
+            "GET",
+            self.url + "?from=-5",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+        self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
+
+        # invalid search order
+        channel = self.make_request(
+            "GET",
+            self.url + "?dir=bar",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+        self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
+
+        # invalid destination
+        channel = self.make_request(
+            "GET",
+            "/_synapse/admin/v1/federation/destinations/%s/rooms" % ("invalid",),
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+        self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
+
+    def test_limit(self) -> None:
+        """Testing list of destinations with limit"""
+
+        number_rooms = 5
+        self._create_destination_rooms(number_rooms)
+
+        channel = self.make_request(
+            "GET",
+            self.url + "?limit=3",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+        self.assertEqual(channel.json_body["total"], number_rooms)
+        self.assertEqual(len(channel.json_body["rooms"]), 3)
+        self.assertEqual(channel.json_body["next_token"], "3")
+        self._check_fields(channel.json_body["rooms"])
+
+    def test_from(self) -> None:
+        """Testing list of rooms with a defined starting point (from)"""
+
+        number_rooms = 10
+        self._create_destination_rooms(number_rooms)
+
+        channel = self.make_request(
+            "GET",
+            self.url + "?from=5",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+        self.assertEqual(channel.json_body["total"], number_rooms)
+        self.assertEqual(len(channel.json_body["rooms"]), 5)
+        self.assertNotIn("next_token", channel.json_body)
+        self._check_fields(channel.json_body["rooms"])
+
+    def test_limit_and_from(self) -> None:
+        """Testing list of rooms with a defined starting point and limit"""
+
+        number_rooms = 10
+        self._create_destination_rooms(number_rooms)
+
+        channel = self.make_request(
+            "GET",
+            self.url + "?from=3&limit=5",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+        self.assertEqual(channel.json_body["total"], number_rooms)
+        self.assertEqual(channel.json_body["next_token"], "8")
+        self.assertEqual(len(channel.json_body["rooms"]), 5)
+        self._check_fields(channel.json_body["rooms"])
+
+    def test_order_direction(self) -> None:
+        """Testing order list with parameter `dir`"""
+        number_rooms = 4
+        self._create_destination_rooms(number_rooms)
+
+        # get list in forward direction
+        channel_asc = self.make_request(
+            "GET",
+            self.url + "?dir=f",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.OK, channel_asc.code, msg=channel_asc.json_body)
+        self.assertEqual(channel_asc.json_body["total"], number_rooms)
+        self.assertEqual(number_rooms, len(channel_asc.json_body["rooms"]))
+        self._check_fields(channel_asc.json_body["rooms"])
+
+        # get list in backward direction
+        channel_desc = self.make_request(
+            "GET",
+            self.url + "?dir=b",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.OK, channel_desc.code, msg=channel_desc.json_body)
+        self.assertEqual(channel_desc.json_body["total"], number_rooms)
+        self.assertEqual(number_rooms, len(channel_desc.json_body["rooms"]))
+        self._check_fields(channel_desc.json_body["rooms"])
+
+        # test that both lists have different directions
+        for i in range(0, number_rooms):
+            self.assertEqual(
+                channel_asc.json_body["rooms"][i]["room_id"],
+                channel_desc.json_body["rooms"][number_rooms - 1 - i]["room_id"],
+            )
+
+    def test_next_token(self) -> None:
+        """Testing that `next_token` appears at the right place"""
+
+        number_rooms = 5
+        self._create_destination_rooms(number_rooms)
+
+        #  `next_token` does not appear
+        # Number of results is the number of entries
+        channel = self.make_request(
+            "GET",
+            self.url + "?limit=5",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+        self.assertEqual(channel.json_body["total"], number_rooms)
+        self.assertEqual(len(channel.json_body["rooms"]), number_rooms)
+        self.assertNotIn("next_token", channel.json_body)
+
+        #  `next_token` does not appear
+        # Number of max results is larger than the number of entries
+        channel = self.make_request(
+            "GET",
+            self.url + "?limit=6",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+        self.assertEqual(channel.json_body["total"], number_rooms)
+        self.assertEqual(len(channel.json_body["rooms"]), number_rooms)
+        self.assertNotIn("next_token", channel.json_body)
+
+        #  `next_token` does appear
+        # Number of max results is smaller than the number of entries
+        channel = self.make_request(
+            "GET",
+            self.url + "?limit=4",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+        self.assertEqual(channel.json_body["total"], number_rooms)
+        self.assertEqual(len(channel.json_body["rooms"]), 4)
+        self.assertEqual(channel.json_body["next_token"], "4")
+
+        # Check
+        # Set `from` to value of `next_token` for request remaining entries
+        #  `next_token` does not appear
+        channel = self.make_request(
+            "GET",
+            self.url + "?from=4",
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+        self.assertEqual(channel.json_body["total"], number_rooms)
+        self.assertEqual(len(channel.json_body["rooms"]), 1)
+        self.assertNotIn("next_token", channel.json_body)
+
+    def test_destination_rooms(self) -> None:
+        """Testing that request the list of rooms is successfully."""
+        number_rooms = 3
+        self._create_destination_rooms(number_rooms)
+
+        channel = self.make_request(
+            "GET",
+            self.url,
+            access_token=self.admin_user_tok,
+        )
+
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+        self.assertEqual(channel.json_body["total"], number_rooms)
+        self.assertEqual(number_rooms, len(channel.json_body["rooms"]))
+        self._check_fields(channel.json_body["rooms"])
+
+    def _create_destination_rooms(self, number_rooms: int) -> None:
+        """Create a number rooms for destination
+
+        Args:
+            number_rooms: Number of rooms to be created
+        """
+        for _ in range(0, number_rooms):
+            room_id = self.helper.create_room_as(
+                self.admin_user, tok=self.admin_user_tok
+            )
+            self.get_success(
+                self.store.store_destination_rooms_entries((self.dest,), room_id, 1234)
+            )
+
+    def _check_fields(self, content: List[JsonDict]) -> None:
+        """Checks that the expected room attributes are present in content
+
+        Args:
+            content: List that is checked for content
+        """
+        for c in content:
+            self.assertIn("room_id", c)
+            self.assertIn("stream_ordering", c)
diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
index 3495a0366a..23da0ad736 100644
--- a/tests/rest/admin/test_room.py
+++ b/tests/rest/admin/test_room.py
@@ -2468,7 +2468,6 @@ PURGE_TABLES = [
     "event_search",
     "events",
     "group_rooms",
-    "public_room_list_stream",
     "receipts_graph",
     "receipts_linearized",
     "room_aliases",
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 9711405735..272637e965 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -23,13 +23,17 @@ from unittest.mock import Mock, patch
 
 from parameterized import parameterized, parameterized_class
 
+from twisted.test.proto_helpers import MemoryReactor
+
 import synapse.rest.admin
 from synapse.api.constants import UserTypes
 from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError
 from synapse.api.room_versions import RoomVersions
 from synapse.rest.client import devices, login, logout, profile, room, sync
 from synapse.rest.media.v1.filepath import MediaFilePaths
+from synapse.server import HomeServer
 from synapse.types import JsonDict, UserID
+from synapse.util import Clock
 
 from tests import unittest
 from tests.server import FakeSite, make_request
@@ -44,7 +48,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
         profile.register_servlets,
     ]
 
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
 
         self.url = "/_synapse/admin/v1/register"
 
@@ -61,12 +65,12 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
 
         self.hs.config.registration.registration_shared_secret = "shared"
 
-        self.hs.get_media_repository = Mock()
-        self.hs.get_deactivate_account_handler = Mock()
+        self.hs.get_media_repository = Mock()  # type: ignore[assignment]
+        self.hs.get_deactivate_account_handler = Mock()  # type: ignore[assignment]
 
         return self.hs
 
-    def test_disabled(self):
+    def test_disabled(self) -> None:
         """
         If there is no shared secret, registration through this method will be
         prevented.
@@ -80,7 +84,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
             "Shared secret registration is not enabled", channel.json_body["error"]
         )
 
-    def test_get_nonce(self):
+    def test_get_nonce(self) -> None:
         """
         Calling GET on the endpoint will return a randomised nonce, using the
         homeserver's secrets provider.
@@ -93,7 +97,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
 
             self.assertEqual(channel.json_body, {"nonce": "abcd"})
 
-    def test_expired_nonce(self):
+    def test_expired_nonce(self) -> None:
         """
         Calling GET on the endpoint will return a randomised nonce, which will
         only last for SALT_TIMEOUT (60s).
@@ -118,7 +122,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual("unrecognised nonce", channel.json_body["error"])
 
-    def test_register_incorrect_nonce(self):
+    def test_register_incorrect_nonce(self) -> None:
         """
         Only the provided nonce can be used, as it's checked in the MAC.
         """
@@ -141,7 +145,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
         self.assertEqual("HMAC incorrect", channel.json_body["error"])
 
-    def test_register_correct_nonce(self):
+    def test_register_correct_nonce(self) -> None:
         """
         When the correct nonce is provided, and the right key is provided, the
         user is registered.
@@ -168,7 +172,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
         self.assertEqual("@bob:test", channel.json_body["user_id"])
 
-    def test_nonce_reuse(self):
+    def test_nonce_reuse(self) -> None:
         """
         A valid unrecognised nonce.
         """
@@ -197,14 +201,14 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual("unrecognised nonce", channel.json_body["error"])
 
-    def test_missing_parts(self):
+    def test_missing_parts(self) -> None:
         """
         Synapse will complain if you don't give nonce, username, password, and
         mac.  Admin and user_types are optional.  Additional checks are done for length
         and type.
         """
 
-        def nonce():
+        def nonce() -> str:
             channel = self.make_request("GET", self.url)
             return channel.json_body["nonce"]
 
@@ -297,7 +301,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual("Invalid user type", channel.json_body["error"])
 
-    def test_displayname(self):
+    def test_displayname(self) -> None:
         """
         Test that displayname of new user is set
         """
@@ -400,7 +404,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
     @override_config(
         {"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0}
     )
-    def test_register_mau_limit_reached(self):
+    def test_register_mau_limit_reached(self) -> None:
         """
         Check we can register a user via the shared secret registration API
         even if the MAU limit is reached.
@@ -450,13 +454,13 @@ class UsersListTestCase(unittest.HomeserverTestCase):
     ]
     url = "/_synapse/admin/v2/users"
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastore()
 
         self.admin_user = self.register_user("admin", "pass", admin=True)
         self.admin_user_tok = self.login("admin", "pass")
 
-    def test_no_auth(self):
+    def test_no_auth(self) -> None:
         """
         Try to list users without authentication.
         """
@@ -465,7 +469,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
 
-    def test_requester_is_no_admin(self):
+    def test_requester_is_no_admin(self) -> None:
         """
         If the user is not a server admin, an error is returned.
         """
@@ -477,7 +481,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
 
-    def test_all_users(self):
+    def test_all_users(self) -> None:
         """
         List all users, including deactivated users.
         """
@@ -497,7 +501,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
         # Check that all fields are available
         self._check_fields(channel.json_body["users"])
 
-    def test_search_term(self):
+    def test_search_term(self) -> None:
         """Test that searching for a users works correctly"""
 
         def _search_test(
@@ -505,7 +509,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
             search_term: str,
             search_field: Optional[str] = "name",
             expected_http_code: Optional[int] = HTTPStatus.OK,
-        ):
+        ) -> None:
             """Search for a user and check that the returned user's id is a match
 
             Args:
@@ -575,7 +579,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
         _search_test(None, "foo", "user_id")
         _search_test(None, "bar", "user_id")
 
-    def test_invalid_parameter(self):
+    def test_invalid_parameter(self) -> None:
         """
         If parameters are invalid, an error is returned.
         """
@@ -640,7 +644,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
 
-    def test_limit(self):
+    def test_limit(self) -> None:
         """
         Testing list of users with limit
         """
@@ -661,7 +665,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
         self.assertEqual(channel.json_body["next_token"], "5")
         self._check_fields(channel.json_body["users"])
 
-    def test_from(self):
+    def test_from(self) -> None:
         """
         Testing list of users with a defined starting point (from)
         """
@@ -682,7 +686,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
         self.assertNotIn("next_token", channel.json_body)
         self._check_fields(channel.json_body["users"])
 
-    def test_limit_and_from(self):
+    def test_limit_and_from(self) -> None:
         """
         Testing list of users with a defined starting point and limit
         """
@@ -703,7 +707,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
         self.assertEqual(len(channel.json_body["users"]), 10)
         self._check_fields(channel.json_body["users"])
 
-    def test_next_token(self):
+    def test_next_token(self) -> None:
         """
         Testing that `next_token` appears at the right place
         """
@@ -765,7 +769,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
         self.assertEqual(len(channel.json_body["users"]), 1)
         self.assertNotIn("next_token", channel.json_body)
 
-    def test_order_by(self):
+    def test_order_by(self) -> None:
         """
         Testing order list with parameter `order_by`
         """
@@ -843,7 +847,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
         expected_user_list: List[str],
         order_by: Optional[str],
         dir: Optional[str] = None,
-    ):
+    ) -> None:
         """Request the list of users in a certain order. Assert that order is what
         we expect
         Args:
@@ -870,7 +874,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
         self.assertEqual(expected_user_list, returned_order)
         self._check_fields(channel.json_body["users"])
 
-    def _check_fields(self, content: List[JsonDict]):
+    def _check_fields(self, content: List[JsonDict]) -> None:
         """Checks that the expected user attributes are present in content
         Args:
             content: List that is checked for content
@@ -886,7 +890,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
             self.assertIn("avatar_url", u)
             self.assertIn("creation_ts", u)
 
-    def _create_users(self, number_users: int):
+    def _create_users(self, number_users: int) -> None:
         """
         Create a number of users
         Args:
@@ -908,7 +912,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
         login.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastore()
 
         self.admin_user = self.register_user("admin", "pass", admin=True)
@@ -931,7 +935,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
             self.store.user_add_threepid("@user:test", "email", "foo@bar.com", 0, 0)
         )
 
-    def test_no_auth(self):
+    def test_no_auth(self) -> None:
         """
         Try to deactivate users without authentication.
         """
@@ -940,7 +944,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
 
-    def test_requester_is_not_admin(self):
+    def test_requester_is_not_admin(self) -> None:
         """
         If the user is not a server admin, an error is returned.
         """
@@ -961,7 +965,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
         self.assertEqual("You are not a server admin", channel.json_body["error"])
 
-    def test_user_does_not_exist(self):
+    def test_user_does_not_exist(self) -> None:
         """
         Tests that deactivation for a user that does not exist returns a HTTPStatus.NOT_FOUND
         """
@@ -975,7 +979,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
 
-    def test_erase_is_not_bool(self):
+    def test_erase_is_not_bool(self) -> None:
         """
         If parameter `erase` is not boolean, return an error
         """
@@ -990,7 +994,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
 
-    def test_user_is_not_local(self):
+    def test_user_is_not_local(self) -> None:
         """
         Tests that deactivation for a user that is not a local returns a HTTPStatus.BAD_REQUEST
         """
@@ -1001,7 +1005,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual("Can only deactivate local users", channel.json_body["error"])
 
-    def test_deactivate_user_erase_true(self):
+    def test_deactivate_user_erase_true(self) -> None:
         """
         Test deactivating a user and set `erase` to `true`
         """
@@ -1046,7 +1050,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
 
         self._is_erased("@user:test", True)
 
-    def test_deactivate_user_erase_false(self):
+    def test_deactivate_user_erase_false(self) -> None:
         """
         Test deactivating a user and set `erase` to `false`
         """
@@ -1091,7 +1095,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
 
         self._is_erased("@user:test", False)
 
-    def test_deactivate_user_erase_true_no_profile(self):
+    def test_deactivate_user_erase_true_no_profile(self) -> None:
         """
         Test deactivating a user and set `erase` to `true`
         if user has no profile information (stored in the database table `profiles`).
@@ -1162,7 +1166,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         sync.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastore()
         self.auth_handler = hs.get_auth_handler()
 
@@ -1185,7 +1189,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.url_prefix = "/_synapse/admin/v2/users/%s"
         self.url_other_user = self.url_prefix % self.other_user
 
-    def test_requester_is_no_admin(self):
+    def test_requester_is_no_admin(self) -> None:
         """
         If the user is not a server admin, an error is returned.
         """
@@ -1210,7 +1214,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
         self.assertEqual("You are not a server admin", channel.json_body["error"])
 
-    def test_user_does_not_exist(self):
+    def test_user_does_not_exist(self) -> None:
         """
         Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
         """
@@ -1224,7 +1228,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
         self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"])
 
-    def test_invalid_parameter(self):
+    def test_invalid_parameter(self) -> None:
         """
         If parameters are invalid, an error is returned.
         """
@@ -1319,7 +1323,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
 
-    def test_get_user(self):
+    def test_get_user(self) -> None:
         """
         Test a simple get of a user.
         """
@@ -1334,7 +1338,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual("User", channel.json_body["displayname"])
         self._check_fields(channel.json_body)
 
-    def test_create_server_admin(self):
+    def test_create_server_admin(self) -> None:
         """
         Check that a new admin user is created successfully.
         """
@@ -1383,7 +1387,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"])
         self._check_fields(channel.json_body)
 
-    def test_create_user(self):
+    def test_create_user(self) -> None:
         """
         Check that a new regular user is created successfully.
         """
@@ -1450,7 +1454,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
     @override_config(
         {"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0}
     )
-    def test_create_user_mau_limit_reached_active_admin(self):
+    def test_create_user_mau_limit_reached_active_admin(self) -> None:
         """
         Check that an admin can register a new user via the admin API
         even if the MAU limit is reached.
@@ -1496,7 +1500,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
     @override_config(
         {"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0}
     )
-    def test_create_user_mau_limit_reached_passive_admin(self):
+    def test_create_user_mau_limit_reached_passive_admin(self) -> None:
         """
         Check that an admin can register a new user via the admin API
         even if the MAU limit is reached.
@@ -1541,7 +1545,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
             "public_baseurl": "https://example.com",
         }
     )
-    def test_create_user_email_notif_for_new_users(self):
+    def test_create_user_email_notif_for_new_users(self) -> None:
         """
         Check that a new regular user is created successfully and
         got an email pusher.
@@ -1584,7 +1588,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
             "public_baseurl": "https://example.com",
         }
     )
-    def test_create_user_email_no_notif_for_new_users(self):
+    def test_create_user_email_no_notif_for_new_users(self) -> None:
         """
         Check that a new regular user is created successfully and
         got not an email pusher.
@@ -1615,7 +1619,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         pushers = list(pushers)
         self.assertEqual(len(pushers), 0)
 
-    def test_set_password(self):
+    def test_set_password(self) -> None:
         """
         Test setting a new password for another user.
         """
@@ -1631,7 +1635,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
         self._check_fields(channel.json_body)
 
-    def test_set_displayname(self):
+    def test_set_displayname(self) -> None:
         """
         Test setting the displayname of another user.
         """
@@ -1659,7 +1663,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual("@user:test", channel.json_body["name"])
         self.assertEqual("foobar", channel.json_body["displayname"])
 
-    def test_set_threepid(self):
+    def test_set_threepid(self) -> None:
         """
         Test setting threepid for an other user.
         """
@@ -1740,7 +1744,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(0, len(channel.json_body["threepids"]))
         self._check_fields(channel.json_body)
 
-    def test_set_duplicate_threepid(self):
+    def test_set_duplicate_threepid(self) -> None:
         """
         Test setting the same threepid for a second user.
         First user loses and second user gets mapping of this threepid.
@@ -1827,7 +1831,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(0, len(channel.json_body["threepids"]))
         self._check_fields(channel.json_body)
 
-    def test_set_external_id(self):
+    def test_set_external_id(self) -> None:
         """
         Test setting external id for an other user.
         """
@@ -1925,7 +1929,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual("@user:test", channel.json_body["name"])
         self.assertEqual(0, len(channel.json_body["external_ids"]))
 
-    def test_set_duplicate_external_id(self):
+    def test_set_duplicate_external_id(self) -> None:
         """
         Test that setting the same external id for a second user fails and
         external id from user must not be changed.
@@ -2048,7 +2052,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         )
         self._check_fields(channel.json_body)
 
-    def test_deactivate_user(self):
+    def test_deactivate_user(self) -> None:
         """
         Test deactivating another user.
         """
@@ -2113,7 +2117,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertNotIn("password_hash", channel.json_body)
 
     @override_config({"user_directory": {"enabled": True, "search_all_users": True}})
-    def test_change_name_deactivate_user_user_directory(self):
+    def test_change_name_deactivate_user_user_directory(self) -> None:
         """
         Test change profile information of a deactivated user and
         check that it does not appear in user directory
@@ -2156,7 +2160,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         profile = self.get_success(self.store.get_user_in_directory(self.other_user))
         self.assertIsNone(profile)
 
-    def test_reactivate_user(self):
+    def test_reactivate_user(self) -> None:
         """
         Test reactivating another user.
         """
@@ -2189,7 +2193,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertNotIn("password_hash", channel.json_body)
 
     @override_config({"password_config": {"localdb_enabled": False}})
-    def test_reactivate_user_localdb_disabled(self):
+    def test_reactivate_user_localdb_disabled(self) -> None:
         """
         Test reactivating another user when using SSO.
         """
@@ -2223,7 +2227,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertNotIn("password_hash", channel.json_body)
 
     @override_config({"password_config": {"enabled": False}})
-    def test_reactivate_user_password_disabled(self):
+    def test_reactivate_user_password_disabled(self) -> None:
         """
         Test reactivating another user when using SSO.
         """
@@ -2256,7 +2260,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         # This key was removed intentionally. Ensure it is not accidentally re-included.
         self.assertNotIn("password_hash", channel.json_body)
 
-    def test_set_user_as_admin(self):
+    def test_set_user_as_admin(self) -> None:
         """
         Test setting the admin flag on a user.
         """
@@ -2284,7 +2288,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual("@user:test", channel.json_body["name"])
         self.assertTrue(channel.json_body["admin"])
 
-    def test_set_user_type(self):
+    def test_set_user_type(self) -> None:
         """
         Test changing user type.
         """
@@ -2335,7 +2339,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual("@user:test", channel.json_body["name"])
         self.assertIsNone(channel.json_body["user_type"])
 
-    def test_accidental_deactivation_prevention(self):
+    def test_accidental_deactivation_prevention(self) -> None:
         """
         Ensure an account can't accidentally be deactivated by using a str value
         for the deactivated body parameter
@@ -2418,7 +2422,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         # This key was removed intentionally. Ensure it is not accidentally re-included.
         self.assertNotIn("password_hash", channel.json_body)
 
-    def _check_fields(self, content: JsonDict):
+    def _check_fields(self, content: JsonDict) -> None:
         """Checks that the expected user attributes are present in content
 
         Args:
@@ -2448,7 +2452,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
         room.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.admin_user = self.register_user("admin", "pass", admin=True)
         self.admin_user_tok = self.login("admin", "pass")
 
@@ -2457,7 +2461,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
             self.other_user
         )
 
-    def test_no_auth(self):
+    def test_no_auth(self) -> None:
         """
         Try to list rooms of an user without authentication.
         """
@@ -2466,7 +2470,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
 
-    def test_requester_is_no_admin(self):
+    def test_requester_is_no_admin(self) -> None:
         """
         If the user is not a server admin, an error is returned.
         """
@@ -2481,7 +2485,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
 
-    def test_user_does_not_exist(self):
+    def test_user_does_not_exist(self) -> None:
         """
         Tests that a lookup for a user that does not exist returns an empty list
         """
@@ -2496,7 +2500,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(0, channel.json_body["total"])
         self.assertEqual(0, len(channel.json_body["joined_rooms"]))
 
-    def test_user_is_not_local(self):
+    def test_user_is_not_local(self) -> None:
         """
         Tests that a lookup for a user that is not a local and participates in no conversation returns an empty list
         """
@@ -2512,7 +2516,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(0, channel.json_body["total"])
         self.assertEqual(0, len(channel.json_body["joined_rooms"]))
 
-    def test_no_memberships(self):
+    def test_no_memberships(self) -> None:
         """
         Tests that a normal lookup for rooms is successfully
         if user has no memberships
@@ -2528,7 +2532,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(0, channel.json_body["total"])
         self.assertEqual(0, len(channel.json_body["joined_rooms"]))
 
-    def test_get_rooms(self):
+    def test_get_rooms(self) -> None:
         """
         Tests that a normal lookup for rooms is successfully
         """
@@ -2549,7 +2553,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(number_rooms, channel.json_body["total"])
         self.assertEqual(number_rooms, len(channel.json_body["joined_rooms"]))
 
-    def test_get_rooms_with_nonlocal_user(self):
+    def test_get_rooms_with_nonlocal_user(self) -> None:
         """
         Tests that a normal lookup for rooms is successful with a non-local user
         """
@@ -2604,7 +2608,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
         login.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastore()
 
         self.admin_user = self.register_user("admin", "pass", admin=True)
@@ -2615,7 +2619,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
             self.other_user
         )
 
-    def test_no_auth(self):
+    def test_no_auth(self) -> None:
         """
         Try to list pushers of an user without authentication.
         """
@@ -2624,7 +2628,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
 
-    def test_requester_is_no_admin(self):
+    def test_requester_is_no_admin(self) -> None:
         """
         If the user is not a server admin, an error is returned.
         """
@@ -2639,7 +2643,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
 
-    def test_user_does_not_exist(self):
+    def test_user_does_not_exist(self) -> None:
         """
         Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
         """
@@ -2653,7 +2657,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
 
-    def test_user_is_not_local(self):
+    def test_user_is_not_local(self) -> None:
         """
         Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
         """
@@ -2668,7 +2672,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual("Can only look up local users", channel.json_body["error"])
 
-    def test_get_pushers(self):
+    def test_get_pushers(self) -> None:
         """
         Tests that a normal lookup for pushers is successfully
         """
@@ -2732,7 +2736,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         login.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastore()
         self.media_repo = hs.get_media_repository_resource()
         self.filepaths = MediaFilePaths(hs.config.media.media_store_path)
@@ -2746,7 +2750,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         )
 
     @parameterized.expand(["GET", "DELETE"])
-    def test_no_auth(self, method: str):
+    def test_no_auth(self, method: str) -> None:
         """Try to list media of an user without authentication."""
         channel = self.make_request(method, self.url, {})
 
@@ -2754,7 +2758,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
 
     @parameterized.expand(["GET", "DELETE"])
-    def test_requester_is_no_admin(self, method: str):
+    def test_requester_is_no_admin(self, method: str) -> None:
         """If the user is not a server admin, an error is returned."""
         other_user_token = self.login("user", "pass")
 
@@ -2768,7 +2772,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
 
     @parameterized.expand(["GET", "DELETE"])
-    def test_user_does_not_exist(self, method: str):
+    def test_user_does_not_exist(self, method: str) -> None:
         """Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND"""
         url = "/_synapse/admin/v1/users/@unknown_person:test/media"
         channel = self.make_request(
@@ -2781,7 +2785,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
 
     @parameterized.expand(["GET", "DELETE"])
-    def test_user_is_not_local(self, method: str):
+    def test_user_is_not_local(self, method: str) -> None:
         """Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST"""
         url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/media"
 
@@ -2794,7 +2798,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual("Can only look up local users", channel.json_body["error"])
 
-    def test_limit_GET(self):
+    def test_limit_GET(self) -> None:
         """Testing list of media with limit"""
 
         number_media = 20
@@ -2813,7 +2817,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(channel.json_body["next_token"], 5)
         self._check_fields(channel.json_body["media"])
 
-    def test_limit_DELETE(self):
+    def test_limit_DELETE(self) -> None:
         """Testing delete of media with limit"""
 
         number_media = 20
@@ -2830,7 +2834,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(channel.json_body["total"], 5)
         self.assertEqual(len(channel.json_body["deleted_media"]), 5)
 
-    def test_from_GET(self):
+    def test_from_GET(self) -> None:
         """Testing list of media with a defined starting point (from)"""
 
         number_media = 20
@@ -2849,7 +2853,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertNotIn("next_token", channel.json_body)
         self._check_fields(channel.json_body["media"])
 
-    def test_from_DELETE(self):
+    def test_from_DELETE(self) -> None:
         """Testing delete of media with a defined starting point (from)"""
 
         number_media = 20
@@ -2866,7 +2870,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(channel.json_body["total"], 15)
         self.assertEqual(len(channel.json_body["deleted_media"]), 15)
 
-    def test_limit_and_from_GET(self):
+    def test_limit_and_from_GET(self) -> None:
         """Testing list of media with a defined starting point and limit"""
 
         number_media = 20
@@ -2885,7 +2889,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(len(channel.json_body["media"]), 10)
         self._check_fields(channel.json_body["media"])
 
-    def test_limit_and_from_DELETE(self):
+    def test_limit_and_from_DELETE(self) -> None:
         """Testing delete of media with a defined starting point and limit"""
 
         number_media = 20
@@ -2903,7 +2907,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(len(channel.json_body["deleted_media"]), 10)
 
     @parameterized.expand(["GET", "DELETE"])
-    def test_invalid_parameter(self, method: str):
+    def test_invalid_parameter(self, method: str) -> None:
         """If parameters are invalid, an error is returned."""
         # unkown order_by
         channel = self.make_request(
@@ -2945,7 +2949,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
 
-    def test_next_token(self):
+    def test_next_token(self) -> None:
         """
         Testing that `next_token` appears at the right place
 
@@ -3010,7 +3014,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(len(channel.json_body["media"]), 1)
         self.assertNotIn("next_token", channel.json_body)
 
-    def test_user_has_no_media_GET(self):
+    def test_user_has_no_media_GET(self) -> None:
         """
         Tests that a normal lookup for media is successfully
         if user has no media created
@@ -3026,7 +3030,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(0, channel.json_body["total"])
         self.assertEqual(0, len(channel.json_body["media"]))
 
-    def test_user_has_no_media_DELETE(self):
+    def test_user_has_no_media_DELETE(self) -> None:
         """
         Tests that a delete is successful if user has no media
         """
@@ -3041,7 +3045,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(0, channel.json_body["total"])
         self.assertEqual(0, len(channel.json_body["deleted_media"]))
 
-    def test_get_media(self):
+    def test_get_media(self) -> None:
         """Tests that a normal lookup for media is successful"""
 
         number_media = 5
@@ -3060,7 +3064,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         self.assertNotIn("next_token", channel.json_body)
         self._check_fields(channel.json_body["media"])
 
-    def test_delete_media(self):
+    def test_delete_media(self) -> None:
         """Tests that a normal delete of media is successful"""
 
         number_media = 5
@@ -3089,7 +3093,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         for local_path in local_paths:
             self.assertFalse(os.path.exists(local_path))
 
-    def test_order_by(self):
+    def test_order_by(self) -> None:
         """
         Testing order list with parameter `order_by`
         """
@@ -3252,7 +3256,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
 
         return media_id
 
-    def _check_fields(self, content: List[JsonDict]):
+    def _check_fields(self, content: List[JsonDict]) -> None:
         """Checks that the expected user attributes are present in content
         Args:
             content: List that is checked for content
@@ -3272,7 +3276,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
         expected_media_list: List[str],
         order_by: Optional[str],
         dir: Optional[str] = None,
-    ):
+    ) -> None:
         """Request the list of media in a certain order. Assert that order is what
         we expect
         Args:
@@ -3312,7 +3316,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
         logout.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastore()
 
         self.admin_user = self.register_user("admin", "pass", admin=True)
@@ -3331,14 +3335,14 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
         return channel.json_body["access_token"]
 
-    def test_no_auth(self):
+    def test_no_auth(self) -> None:
         """Try to login as a user without authentication."""
         channel = self.make_request("POST", self.url, b"{}")
 
         self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
 
-    def test_not_admin(self):
+    def test_not_admin(self) -> None:
         """Try to login as a user as a non-admin user."""
         channel = self.make_request(
             "POST", self.url, b"{}", access_token=self.other_user_tok
@@ -3346,7 +3350,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
 
-    def test_send_event(self):
+    def test_send_event(self) -> None:
         """Test that sending event as a user works."""
         # Create a room.
         room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_tok)
@@ -3360,7 +3364,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
         event = self.get_success(self.store.get_event(event_id))
         self.assertEqual(event.sender, self.other_user)
 
-    def test_devices(self):
+    def test_devices(self) -> None:
         """Tests that logging in as a user doesn't create a new device for them."""
         # Login in as the user
         self._get_token()
@@ -3374,7 +3378,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
         # We should only see the one device (from the login in `prepare`)
         self.assertEqual(len(channel.json_body["devices"]), 1)
 
-    def test_logout(self):
+    def test_logout(self) -> None:
         """Test that calling `/logout` with the token works."""
         # Login in as the user
         puppet_token = self._get_token()
@@ -3397,7 +3401,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
         )
         self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
 
-    def test_user_logout_all(self):
+    def test_user_logout_all(self) -> None:
         """Tests that the target user calling `/logout/all` does *not* expire
         the token.
         """
@@ -3424,7 +3428,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
         )
         self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
 
-    def test_admin_logout_all(self):
+    def test_admin_logout_all(self) -> None:
         """Tests that the admin user calling `/logout/all` does expire the
         token.
         """
@@ -3464,7 +3468,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
             "form_secret": "123secret",
         }
     )
-    def test_consent(self):
+    def test_consent(self) -> None:
         """Test that sending a message is not subject to the privacy policies."""
         # Have the admin user accept the terms.
         self.get_success(self.store.user_set_consent_version(self.admin_user, "1.0"))
@@ -3492,7 +3496,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
     @override_config(
         {"limit_usage_by_mau": True, "max_mau_value": 1, "mau_trial_days": 0}
     )
-    def test_mau_limit(self):
+    def test_mau_limit(self) -> None:
         # Create a room as the admin user. This will bump the monthly active users to 1.
         room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
 
@@ -3524,14 +3528,14 @@ class WhoisRestTestCase(unittest.HomeserverTestCase):
         login.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.admin_user = self.register_user("admin", "pass", admin=True)
         self.admin_user_tok = self.login("admin", "pass")
 
         self.other_user = self.register_user("user", "pass")
-        self.url = self.url_prefix % self.other_user
+        self.url = self.url_prefix % self.other_user  # type: ignore[attr-defined]
 
-    def test_no_auth(self):
+    def test_no_auth(self) -> None:
         """
         Try to get information of an user without authentication.
         """
@@ -3539,7 +3543,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
 
-    def test_requester_is_not_admin(self):
+    def test_requester_is_not_admin(self) -> None:
         """
         If the user is not a server admin, an error is returned.
         """
@@ -3554,11 +3558,11 @@ class WhoisRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
 
-    def test_user_is_not_local(self):
+    def test_user_is_not_local(self) -> None:
         """
         Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
         """
-        url = self.url_prefix % "@unknown_person:unknown_domain"
+        url = self.url_prefix % "@unknown_person:unknown_domain"  # type: ignore[attr-defined]
 
         channel = self.make_request(
             "GET",
@@ -3568,7 +3572,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual("Can only whois a local user", channel.json_body["error"])
 
-    def test_get_whois_admin(self):
+    def test_get_whois_admin(self) -> None:
         """
         The lookup should succeed for an admin.
         """
@@ -3581,7 +3585,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(self.other_user, channel.json_body["user_id"])
         self.assertIn("devices", channel.json_body)
 
-    def test_get_whois_user(self):
+    def test_get_whois_user(self) -> None:
         """
         The lookup should succeed for a normal user looking up their own information.
         """
@@ -3604,7 +3608,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
         login.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastore()
 
         self.admin_user = self.register_user("admin", "pass", admin=True)
@@ -3617,7 +3621,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
         )
 
     @parameterized.expand(["POST", "DELETE"])
-    def test_no_auth(self, method: str):
+    def test_no_auth(self, method: str) -> None:
         """
         Try to get information of an user without authentication.
         """
@@ -3626,7 +3630,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
 
     @parameterized.expand(["POST", "DELETE"])
-    def test_requester_is_not_admin(self, method: str):
+    def test_requester_is_not_admin(self, method: str) -> None:
         """
         If the user is not a server admin, an error is returned.
         """
@@ -3637,7 +3641,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
 
     @parameterized.expand(["POST", "DELETE"])
-    def test_user_is_not_local(self, method: str):
+    def test_user_is_not_local(self, method: str) -> None:
         """
         Tests that shadow-banning for a user that is not a local returns a HTTPStatus.BAD_REQUEST
         """
@@ -3646,7 +3650,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
         channel = self.make_request(method, url, access_token=self.admin_user_tok)
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
 
-    def test_success(self):
+    def test_success(self) -> None:
         """
         Shadow-banning should succeed for an admin.
         """
@@ -3682,7 +3686,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
         login.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastore()
 
         self.admin_user = self.register_user("admin", "pass", admin=True)
@@ -3695,7 +3699,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
         )
 
     @parameterized.expand(["GET", "POST", "DELETE"])
-    def test_no_auth(self, method: str):
+    def test_no_auth(self, method: str) -> None:
         """
         Try to get information of a user without authentication.
         """
@@ -3705,7 +3709,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
         self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
 
     @parameterized.expand(["GET", "POST", "DELETE"])
-    def test_requester_is_no_admin(self, method: str):
+    def test_requester_is_no_admin(self, method: str) -> None:
         """
         If the user is not a server admin, an error is returned.
         """
@@ -3721,7 +3725,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
         self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
 
     @parameterized.expand(["GET", "POST", "DELETE"])
-    def test_user_does_not_exist(self, method: str):
+    def test_user_does_not_exist(self, method: str) -> None:
         """
         Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
         """
@@ -3743,7 +3747,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
             ("DELETE", "Only local users can be ratelimited"),
         ]
     )
-    def test_user_is_not_local(self, method: str, error_msg: str):
+    def test_user_is_not_local(self, method: str, error_msg: str) -> None:
         """
         Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
         """
@@ -3760,7 +3764,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual(error_msg, channel.json_body["error"])
 
-    def test_invalid_parameter(self):
+    def test_invalid_parameter(self) -> None:
         """
         If parameters are invalid, an error is returned.
         """
@@ -3808,7 +3812,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
         self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
         self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
 
-    def test_return_zero_when_null(self):
+    def test_return_zero_when_null(self) -> None:
         """
         If values in database are `null` API should return an int `0`
         """
@@ -3834,7 +3838,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
         self.assertEqual(0, channel.json_body["messages_per_second"])
         self.assertEqual(0, channel.json_body["burst_count"])
 
-    def test_success(self):
+    def test_success(self) -> None:
         """
         Rate-limiting (set/update/delete) should succeed for an admin.
         """
@@ -3908,7 +3912,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase):
         login.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs) -> None:
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastore()
 
         self.admin_user = self.register_user("admin", "pass", admin=True)
diff --git a/tests/rest/admin/test_username_available.py b/tests/rest/admin/test_username_available.py
index 7978626e71..b21f6d4689 100644
--- a/tests/rest/admin/test_username_available.py
+++ b/tests/rest/admin/test_username_available.py
@@ -14,9 +14,13 @@
 
 from http import HTTPStatus
 
+from twisted.test.proto_helpers import MemoryReactor
+
 import synapse.rest.admin
 from synapse.api.errors import Codes, SynapseError
 from synapse.rest.client import login
+from synapse.server import HomeServer
+from synapse.util import Clock
 
 from tests import unittest
 
@@ -28,11 +32,11 @@ class UsernameAvailableTestCase(unittest.HomeserverTestCase):
     ]
     url = "/_synapse/admin/v1/username_available"
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.register_user("admin", "pass", admin=True)
         self.admin_user_tok = self.login("admin", "pass")
 
-        async def check_username(username):
+        async def check_username(username: str) -> bool:
             if username == "allowed":
                 return True
             raise SynapseError(
@@ -44,24 +48,24 @@ class UsernameAvailableTestCase(unittest.HomeserverTestCase):
         handler = self.hs.get_registration_handler()
         handler.check_username = check_username
 
-    def test_username_available(self):
+    def test_username_available(self) -> None:
         """
         The endpoint should return a HTTPStatus.OK response if the username does not exist
         """
 
         url = "%s?username=%s" % (self.url, "allowed")
-        channel = self.make_request("GET", url, None, self.admin_user_tok)
+        channel = self.make_request("GET", url, access_token=self.admin_user_tok)
 
         self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
         self.assertTrue(channel.json_body["available"])
 
-    def test_username_unavailable(self):
+    def test_username_unavailable(self) -> None:
         """
         The endpoint should return a HTTPStatus.OK response if the username does not exist
         """
 
         url = "%s?username=%s" % (self.url, "disallowed")
-        channel = self.make_request("GET", url, None, self.admin_user_tok)
+        channel = self.make_request("GET", url, access_token=self.admin_user_tok)
 
         self.assertEqual(
             HTTPStatus.BAD_REQUEST,
diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py
index 2860579c2e..ead883ded8 100644
--- a/tests/rest/client/test_profile.py
+++ b/tests/rest/client/test_profile.py
@@ -13,8 +13,12 @@
 # limitations under the License.
 
 """Tests REST events for /profile paths."""
+from typing import Any, Dict
+
+from synapse.api.errors import Codes
 from synapse.rest import admin
 from synapse.rest.client import login, profile, room
+from synapse.types import UserID
 
 from tests import unittest
 
@@ -25,6 +29,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
         admin.register_servlets_for_client_rest_resource,
         login.register_servlets,
         profile.register_servlets,
+        room.register_servlets,
     ]
 
     def make_homeserver(self, reactor, clock):
@@ -150,6 +155,157 @@ class ProfileTestCase(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 200, channel.result)
         return channel.json_body.get("avatar_url")
 
+    @unittest.override_config({"max_avatar_size": 50})
+    def test_avatar_size_limit_global(self):
+        """Tests that the maximum size limit for avatars is enforced when updating a
+        global profile.
+        """
+        self._setup_local_files(
+            {
+                "small": {"size": 40},
+                "big": {"size": 60},
+            }
+        )
+
+        channel = self.make_request(
+            "PUT",
+            f"/profile/{self.owner}/avatar_url",
+            content={"avatar_url": "mxc://test/big"},
+            access_token=self.owner_tok,
+        )
+        self.assertEqual(channel.code, 403, channel.result)
+        self.assertEqual(
+            channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body
+        )
+
+        channel = self.make_request(
+            "PUT",
+            f"/profile/{self.owner}/avatar_url",
+            content={"avatar_url": "mxc://test/small"},
+            access_token=self.owner_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.result)
+
+    @unittest.override_config({"max_avatar_size": 50})
+    def test_avatar_size_limit_per_room(self):
+        """Tests that the maximum size limit for avatars is enforced when updating a
+        per-room profile.
+        """
+        self._setup_local_files(
+            {
+                "small": {"size": 40},
+                "big": {"size": 60},
+            }
+        )
+
+        room_id = self.helper.create_room_as(tok=self.owner_tok)
+
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id}/state/m.room.member/{self.owner}",
+            content={"membership": "join", "avatar_url": "mxc://test/big"},
+            access_token=self.owner_tok,
+        )
+        self.assertEqual(channel.code, 403, channel.result)
+        self.assertEqual(
+            channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body
+        )
+
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id}/state/m.room.member/{self.owner}",
+            content={"membership": "join", "avatar_url": "mxc://test/small"},
+            access_token=self.owner_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.result)
+
+    @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]})
+    def test_avatar_allowed_mime_type_global(self):
+        """Tests that the MIME type whitelist for avatars is enforced when updating a
+        global profile.
+        """
+        self._setup_local_files(
+            {
+                "good": {"mimetype": "image/png"},
+                "bad": {"mimetype": "application/octet-stream"},
+            }
+        )
+
+        channel = self.make_request(
+            "PUT",
+            f"/profile/{self.owner}/avatar_url",
+            content={"avatar_url": "mxc://test/bad"},
+            access_token=self.owner_tok,
+        )
+        self.assertEqual(channel.code, 403, channel.result)
+        self.assertEqual(
+            channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body
+        )
+
+        channel = self.make_request(
+            "PUT",
+            f"/profile/{self.owner}/avatar_url",
+            content={"avatar_url": "mxc://test/good"},
+            access_token=self.owner_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.result)
+
+    @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]})
+    def test_avatar_allowed_mime_type_per_room(self):
+        """Tests that the MIME type whitelist for avatars is enforced when updating a
+        per-room profile.
+        """
+        self._setup_local_files(
+            {
+                "good": {"mimetype": "image/png"},
+                "bad": {"mimetype": "application/octet-stream"},
+            }
+        )
+
+        room_id = self.helper.create_room_as(tok=self.owner_tok)
+
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id}/state/m.room.member/{self.owner}",
+            content={"membership": "join", "avatar_url": "mxc://test/bad"},
+            access_token=self.owner_tok,
+        )
+        self.assertEqual(channel.code, 403, channel.result)
+        self.assertEqual(
+            channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body
+        )
+
+        channel = self.make_request(
+            "PUT",
+            f"/rooms/{room_id}/state/m.room.member/{self.owner}",
+            content={"membership": "join", "avatar_url": "mxc://test/good"},
+            access_token=self.owner_tok,
+        )
+        self.assertEqual(channel.code, 200, channel.result)
+
+    def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]):
+        """Stores metadata about files in the database.
+
+        Args:
+            names_and_props: A dictionary with one entry per file, with the key being the
+                file's name, and the value being a dictionary of properties. Supported
+                properties are "mimetype" (for the file's type) and "size" (for the
+                file's size).
+        """
+        store = self.hs.get_datastore()
+
+        for name, props in names_and_props.items():
+            self.get_success(
+                store.store_local_media(
+                    media_id=name,
+                    media_type=props.get("mimetype", "image/png"),
+                    time_now_ms=self.clock.time_msec(),
+                    upload_name=None,
+                    media_length=props.get("size", 50),
+                    user_id=UserID.from_string("@rin:test"),
+                )
+            )
+
 
 class ProfilesRestrictedTestCase(unittest.HomeserverTestCase):
 
diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py
index 6e7c0f11df..407dd32a73 100644
--- a/tests/rest/client/test_register.py
+++ b/tests/rest/client/test_register.py
@@ -726,6 +726,47 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
             {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"},
         )
 
+    @override_config(
+        {
+            "inhibit_user_in_use_error": True,
+        }
+    )
+    def test_inhibit_user_in_use_error(self):
+        """Tests that the 'inhibit_user_in_use_error' configuration flag behaves
+        correctly.
+        """
+        username = "arthur"
+
+        # Manually register the user, so we know the test isn't passing because of a lack
+        # of clashing.
+        reg_handler = self.hs.get_registration_handler()
+        self.get_success(reg_handler.register_user(username))
+
+        # Check that /available correctly ignores the username provided despite the
+        # username being already registered.
+        channel = self.make_request("GET", "register/available?username=" + username)
+        self.assertEquals(200, channel.code, channel.result)
+
+        # Test that when starting a UIA registration flow the request doesn't fail because
+        # of a conflicting username
+        channel = self.make_request(
+            "POST",
+            "register",
+            {"username": username, "type": "m.login.password", "password": "foo"},
+        )
+        self.assertEqual(channel.code, 401)
+        self.assertIn("session", channel.json_body)
+
+        # Test that finishing the registration fails because of a conflicting username.
+        session = channel.json_body["session"]
+        channel = self.make_request(
+            "POST",
+            "register",
+            {"auth": {"session": session, "type": LoginType.DUMMY}},
+        )
+        self.assertEqual(channel.code, 400, channel.json_body)
+        self.assertEqual(channel.json_body["errcode"], Codes.USER_IN_USE)
+
 
 class AccountValidityTestCase(unittest.HomeserverTestCase):
 
diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py
index c9b220e73d..96ae7790bb 100644
--- a/tests/rest/client/test_relations.py
+++ b/tests/rest/client/test_relations.py
@@ -577,7 +577,7 @@ class RelationsTestCase(unittest.HomeserverTestCase):
         self.assertEquals(200, channel.code, channel.json_body)
         room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"]
         self.assertTrue(room_timeline["limited"])
-        self._find_event_in_chunk(room_timeline["events"])
+        assert_bundle(self._find_event_in_chunk(room_timeline["events"]))
 
     def test_aggregation_get_event_for_annotation(self):
         """Test that annotations do not get bundled aggregations included
diff --git a/tests/rest/client/test_room_batch.py b/tests/rest/client/test_room_batch.py
index 721454c187..e9f8704035 100644
--- a/tests/rest/client/test_room_batch.py
+++ b/tests/rest/client/test_room_batch.py
@@ -89,7 +89,7 @@ class RoomBatchTestCase(unittest.HomeserverTestCase):
         self.clock = clock
         self.storage = hs.get_storage()
 
-        self.virtual_user_id = self.register_appservice_user(
+        self.virtual_user_id, _ = self.register_appservice_user(
             "as_user_potato", self.appservice.token
         )
 
diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index 8424383580..1c0cb0cf4f 100644
--- a/tests/rest/client/utils.py
+++ b/tests/rest/client/utils.py
@@ -31,6 +31,7 @@ from typing import (
     overload,
 )
 from unittest.mock import patch
+from urllib.parse import urlencode
 
 import attr
 from typing_extensions import Literal
@@ -147,12 +148,20 @@ class RestHelper:
             expect_code=expect_code,
         )
 
-    def join(self, room=None, user=None, expect_code=200, tok=None):
+    def join(
+        self,
+        room: str,
+        user: Optional[str] = None,
+        expect_code: int = 200,
+        tok: Optional[str] = None,
+        appservice_user_id: Optional[str] = None,
+    ) -> None:
         self.change_membership(
             room=room,
             src=user,
             targ=user,
             tok=tok,
+            appservice_user_id=appservice_user_id,
             membership=Membership.JOIN,
             expect_code=expect_code,
         )
@@ -209,11 +218,12 @@ class RestHelper:
     def change_membership(
         self,
         room: str,
-        src: str,
-        targ: str,
+        src: Optional[str],
+        targ: Optional[str],
         membership: str,
         extra_data: Optional[dict] = None,
         tok: Optional[str] = None,
+        appservice_user_id: Optional[str] = None,
         expect_code: int = 200,
         expect_errcode: Optional[str] = None,
     ) -> None:
@@ -227,15 +237,26 @@ class RestHelper:
             membership: The type of membership event
             extra_data: Extra information to include in the content of the event
             tok: The user access token to use
+            appservice_user_id: The `user_id` URL parameter to pass.
+                This allows driving an application service user
+                using an application service access token in `tok`.
             expect_code: The expected HTTP response code
             expect_errcode: The expected Matrix error code
         """
         temp_id = self.auth_user_id
         self.auth_user_id = src
 
-        path = "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" % (room, targ)
+        path = f"/_matrix/client/r0/rooms/{room}/state/m.room.member/{targ}"
+        url_params: Dict[str, str] = {}
+
         if tok:
-            path = path + "?access_token=%s" % tok
+            url_params["access_token"] = tok
+
+        if appservice_user_id:
+            url_params["user_id"] = appservice_user_id
+
+        if url_params:
+            path += "?" + urlencode(url_params)
 
         data = {"membership": membership}
         data.update(extra_data or {})
diff --git a/tests/test_preview.py b/tests/rest/media/v1/test_html_preview.py
index 46e02f483f..a4b57e3d1f 100644
--- a/tests/test_preview.py
+++ b/tests/rest/media/v1/test_html_preview.py
@@ -16,10 +16,11 @@ from synapse.rest.media.v1.preview_html import (
     _get_html_media_encodings,
     decode_body,
     parse_html_to_open_graph,
+    rebase_url,
     summarize_paragraphs,
 )
 
-from . import unittest
+from tests import unittest
 
 try:
     import lxml
@@ -447,3 +448,34 @@ class MediaEncodingTestCase(unittest.TestCase):
             'text/html; charset="invalid"',
         )
         self.assertEqual(list(encodings), ["utf-8", "cp1252"])
+
+
+class RebaseUrlTestCase(unittest.TestCase):
+    def test_relative(self):
+        """Relative URLs should be resolved based on the context of the base URL."""
+        self.assertEqual(
+            rebase_url("subpage", "https://example.com/foo/"),
+            "https://example.com/foo/subpage",
+        )
+        self.assertEqual(
+            rebase_url("sibling", "https://example.com/foo"),
+            "https://example.com/sibling",
+        )
+        self.assertEqual(
+            rebase_url("/bar", "https://example.com/foo/"),
+            "https://example.com/bar",
+        )
+
+    def test_absolute(self):
+        """Absolute URLs should not be modified."""
+        self.assertEqual(
+            rebase_url("https://alice.com/a/", "https://example.com/foo/"),
+            "https://alice.com/a/",
+        )
+
+    def test_data(self):
+        """Data URLs should not be modified."""
+        self.assertEqual(
+            rebase_url("data:,Hello%2C%20World%21", "https://example.com/foo/"),
+            "data:,Hello%2C%20World%21",
+        )
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
index 16e904f15b..53f6186213 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -12,9 +12,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import base64
 import json
 import os
 import re
+from urllib.parse import urlencode
 
 from twisted.internet._resolver import HostResolution
 from twisted.internet.address import IPv4Address, IPv6Address
@@ -23,6 +25,7 @@ from twisted.test.proto_helpers import AccumulatingProtocol
 
 from synapse.config.oembed import OEmbedEndpointConfig
 from synapse.rest.media.v1.preview_url_resource import IMAGE_CACHE_EXPIRY_MS
+from synapse.types import JsonDict
 from synapse.util.stringutils import parse_and_validate_mxc_uri
 
 from tests import unittest
@@ -142,6 +145,14 @@ class URLPreviewTests(unittest.HomeserverTestCase):
     def create_test_resource(self):
         return self.hs.get_media_repository_resource()
 
+    def _assert_small_png(self, json_body: JsonDict) -> None:
+        """Assert properties from the SMALL_PNG test image."""
+        self.assertTrue(json_body["og:image"].startswith("mxc://"))
+        self.assertEqual(json_body["og:image:height"], 1)
+        self.assertEqual(json_body["og:image:width"], 1)
+        self.assertEqual(json_body["og:image:type"], "image/png")
+        self.assertEqual(json_body["matrix:image:size"], 67)
+
     def test_cache_returns_correct_type(self):
         self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
 
@@ -569,6 +580,66 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             server.data,
         )
 
+    def test_data_url(self):
+        """
+        Requesting to preview a data URL is not supported.
+        """
+        self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+        data = base64.b64encode(SMALL_PNG).decode()
+
+        query_params = urlencode(
+            {
+                "url": f'<html><head><img src="data:image/png;base64,{data}" /></head></html>'
+            }
+        )
+
+        channel = self.make_request(
+            "GET",
+            f"preview_url?{query_params}",
+            shorthand=False,
+        )
+        self.pump()
+
+        self.assertEqual(channel.code, 500)
+
+    def test_inline_data_url(self):
+        """
+        An inline image (as a data URL) should be parsed properly.
+        """
+        self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+        data = base64.b64encode(SMALL_PNG)
+
+        end_content = (
+            b"<html><head>" b'<img src="data:image/png;base64,%s" />' b"</head></html>"
+        ) % (data,)
+
+        channel = self.make_request(
+            "GET",
+            "preview_url?url=http://matrix.org",
+            shorthand=False,
+            await_result=False,
+        )
+        self.pump()
+
+        client = self.reactor.tcpClients[0][2].buildProtocol(None)
+        server = AccumulatingProtocol()
+        server.makeConnection(FakeTransport(client, self.reactor))
+        client.makeConnection(FakeTransport(server, self.reactor))
+        client.dataReceived(
+            (
+                b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+                b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+            )
+            % (len(end_content),)
+            + end_content
+        )
+
+        self.pump()
+        self.assertEqual(channel.code, 200)
+        self._assert_small_png(channel.json_body)
+
     def test_oembed_photo(self):
         """Test an oEmbed endpoint which returns a 'photo' type which redirects the preview to a new URL."""
         self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
@@ -626,10 +697,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 200)
         body = channel.json_body
         self.assertEqual(body["og:url"], "http://twitter.com/matrixdotorg/status/12345")
-        self.assertTrue(body["og:image"].startswith("mxc://"))
-        self.assertEqual(body["og:image:height"], 1)
-        self.assertEqual(body["og:image:width"], 1)
-        self.assertEqual(body["og:image:type"], "image/png")
+        self._assert_small_png(body)
 
     def test_oembed_rich(self):
         """Test an oEmbed endpoint which returns HTML content via the 'rich' type."""
@@ -820,10 +888,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(
             body["og:url"], "http://www.twitter.com/matrixdotorg/status/12345"
         )
-        self.assertTrue(body["og:image"].startswith("mxc://"))
-        self.assertEqual(body["og:image:height"], 1)
-        self.assertEqual(body["og:image:width"], 1)
-        self.assertEqual(body["og:image:type"], "image/png")
+        self._assert_small_png(body)
 
     def _download_image(self):
         """Downloads an image into the URL cache.
diff --git a/tests/server.py b/tests/server.py
index a0cd14ea45..82990c2eb9 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -313,7 +313,7 @@ def make_request(
     req = request(channel, site)
     req.content = BytesIO(content)
     # Twisted expects to be at the end of the content when parsing the request.
-    req.content.seek(SEEK_END)
+    req.content.seek(0, SEEK_END)
 
     if access_token:
         req.requestHeaders.addRawHeader(
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index 329490caad..ddcb7f5549 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -266,7 +266,9 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
         service = Mock(id=self.as_list[0]["id"])
         events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")])
         txn = self.get_success(
-            defer.ensureDeferred(self.store.create_appservice_txn(service, events, []))
+            defer.ensureDeferred(
+                self.store.create_appservice_txn(service, events, [], [])
+            )
         )
         self.assertEquals(txn.id, 1)
         self.assertEquals(txn.events, events)
@@ -280,7 +282,9 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
         self.get_success(self._set_last_txn(service.id, 9643))  # AS is falling behind
         self.get_success(self._insert_txn(service.id, 9644, events))
         self.get_success(self._insert_txn(service.id, 9645, events))
-        txn = self.get_success(self.store.create_appservice_txn(service, events, []))
+        txn = self.get_success(
+            self.store.create_appservice_txn(service, events, [], [])
+        )
         self.assertEquals(txn.id, 9646)
         self.assertEquals(txn.events, events)
         self.assertEquals(txn.service, service)
@@ -291,7 +295,9 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
         service = Mock(id=self.as_list[0]["id"])
         events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")])
         self.get_success(self._set_last_txn(service.id, 9643))
-        txn = self.get_success(self.store.create_appservice_txn(service, events, []))
+        txn = self.get_success(
+            self.store.create_appservice_txn(service, events, [], [])
+        )
         self.assertEquals(txn.id, 9644)
         self.assertEquals(txn.events, events)
         self.assertEquals(txn.service, service)
@@ -313,7 +319,9 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
         self.get_success(self._insert_txn(self.as_list[2]["id"], 10, events))
         self.get_success(self._insert_txn(self.as_list[3]["id"], 9643, events))
 
-        txn = self.get_success(self.store.create_appservice_txn(service, events, []))
+        txn = self.get_success(
+            self.store.create_appservice_txn(service, events, [], [])
+        )
         self.assertEquals(txn.id, 9644)
         self.assertEquals(txn.events, events)
         self.assertEquals(txn.service, service)
@@ -481,10 +489,10 @@ class ApplicationServiceStoreTypeStreamIds(unittest.HomeserverTestCase):
             ValueError,
         )
 
-    def test_set_type_stream_id_for_appservice(self) -> None:
+    def test_set_appservice_stream_type_pos(self) -> None:
         read_receipt_value = 1024
         self.get_success(
-            self.store.set_type_stream_id_for_appservice(
+            self.store.set_appservice_stream_type_pos(
                 self.service, "read_receipt", read_receipt_value
             )
         )
@@ -494,7 +502,7 @@ class ApplicationServiceStoreTypeStreamIds(unittest.HomeserverTestCase):
         self.assertEqual(result, read_receipt_value)
 
         self.get_success(
-            self.store.set_type_stream_id_for_appservice(
+            self.store.set_appservice_stream_type_pos(
                 self.service, "presence", read_receipt_value
             )
         )
@@ -503,9 +511,9 @@ class ApplicationServiceStoreTypeStreamIds(unittest.HomeserverTestCase):
         )
         self.assertEqual(result, read_receipt_value)
 
-    def test_set_type_stream_id_for_appservice_invalid_type(self) -> None:
+    def test_set_appservice_stream_type_pos_invalid_type(self) -> None:
         self.get_failure(
-            self.store.set_type_stream_id_for_appservice(self.service, "foobar", 1024),
+            self.store.set_appservice_stream_type_pos(self.service, "foobar", 1024),
             ValueError,
         )
 
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index 7b7f6c349e..e3273a93f9 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -19,6 +19,7 @@ from twisted.trial import unittest
 from synapse.api.constants import EventTypes
 from synapse.api.room_versions import RoomVersions
 from synapse.events import EventBase
+from synapse.events.snapshot import EventContext
 from synapse.rest import admin
 from synapse.rest.client import login, room
 from synapse.storage.databases.main.events import _LinkMap
@@ -391,7 +392,9 @@ class EventChainStoreTestCase(HomeserverTestCase):
         def _persist(txn):
             # We need to persist the events to the events and state_events
             # tables.
-            persist_events_store._store_event_txn(txn, [(e, {}) for e in events])
+            persist_events_store._store_event_txn(
+                txn, [(e, EventContext()) for e in events]
+            )
 
             # Actually call the function that calculates the auth chain stuff.
             persist_events_store._persist_event_auth_chain_txn(txn, events)
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index 7f5b28aed8..48f1e9d841 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -341,7 +341,9 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
         # Register an AS user.
         user = self.register_user("user", "pass")
         token = self.login(user, "pass")
-        as_user = self.register_appservice_user("as_user_potato", self.appservice.token)
+        as_user, _ = self.register_appservice_user(
+            "as_user_potato", self.appservice.token
+        )
 
         # Join the AS user to rooms owned by the normal user.
         public, private = self._create_rooms_and_inject_memberships(
diff --git a/tests/unittest.py b/tests/unittest.py
index 1431848367..6fc617601a 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -620,18 +620,19 @@ class HomeserverTestCase(TestCase):
         self,
         username: str,
         appservice_token: str,
-    ) -> str:
+    ) -> Tuple[str, str]:
         """Register an appservice user as an application service.
         Requires the client-facing registration API be registered.
 
         Args:
             username: the user to be registered by an application service.
-                Should be a full username, i.e. ""@localpart:hostname" as opposed to just "localpart"
+                Should NOT be a full username, i.e. just "localpart" as opposed to "@localpart:hostname"
             appservice_token: the acccess token for that application service.
 
         Raises: if the request to '/register' does not return 200 OK.
 
-        Returns: the MXID of the new user.
+        Returns:
+            The MXID of the new user, the device ID of the new user's first device.
         """
         channel = self.make_request(
             "POST",
@@ -643,7 +644,7 @@ class HomeserverTestCase(TestCase):
             access_token=appservice_token,
         )
         self.assertEqual(channel.code, 200, channel.json_body)
-        return channel.json_body["user_id"]
+        return channel.json_body["user_id"], channel.json_body["device_id"]
 
     def login(
         self,
diff --git a/tox.ini b/tox.ini
index 2ffca14b22..32679e9106 100644
--- a/tox.ini
+++ b/tox.ini
@@ -117,8 +117,7 @@ usedevelop=true
 skip_install = true
 usedevelop = false
 deps =
-    # Old automat version for Twisted
-    Automat == 0.3.0
+    Automat == 0.8.0
     lxml
     {[base]deps}