summary refs log tree commit diff
diff options
context:
space:
mode:
authorBrendan Abolivier <babolivier@matrix.org>2022-03-28 13:54:02 +0100
committerBrendan Abolivier <babolivier@matrix.org>2022-03-28 13:54:02 +0100
commit25507bffc67c40e83cbcd4a79fdfee3667855a7c (patch)
tree5620b2a06a5a9894ac875ddcf3b232db45cae48d
parentMerge branch 'develop' of github.com:matrix-org/synapse into babolivier/sign_... (diff)
parentAdd restrictions by default to open registration in Synapse (#12091) (diff)
downloadsynapse-github/babolivier/sign_json_module.tar.xz
Merge branch 'develop' into babolivier/sign_json_module github/babolivier/sign_json_module babolivier/sign_json_module
-rw-r--r--.dockerignore1
-rw-r--r--.github/workflows/release-artifacts.yml7
-rw-r--r--.github/workflows/tests.yml37
-rw-r--r--CHANGES.md140
-rw-r--r--MANIFEST.in2
-rw-r--r--README.rst3
-rw-r--r--changelog.d/11998.doc1
-rw-r--r--changelog.d/12036.misc1
-rw-r--r--changelog.d/12038.misc1
-rw-r--r--changelog.d/12083.misc1
-rw-r--r--changelog.d/12087.bugfix1
-rw-r--r--changelog.d/12091.misc1
-rw-r--r--changelog.d/12108.misc1
-rw-r--r--changelog.d/12113.misc1
-rw-r--r--changelog.d/12118.misc1
-rw-r--r--changelog.d/12128.misc1
-rw-r--r--changelog.d/12136.misc1
-rw-r--r--changelog.d/12198.misc1
-rw-r--r--changelog.d/12199.misc1
-rw-r--r--changelog.d/12216.misc1
-rw-r--r--changelog.d/12219.misc1
-rw-r--r--changelog.d/12224.misc1
-rw-r--r--changelog.d/12225.misc1
-rw-r--r--changelog.d/12227.bugfix1
-rw-r--r--changelog.d/12228.bugfix1
-rw-r--r--changelog.d/12231.doc1
-rw-r--r--changelog.d/12232.bugfix1
-rw-r--r--changelog.d/12237.misc1
-rw-r--r--changelog.d/12240.misc1
-rw-r--r--changelog.d/12242.misc1
-rw-r--r--changelog.d/12243.doc1
-rw-r--r--changelog.d/12244.misc1
-rw-r--r--changelog.d/12246.doc1
-rw-r--r--changelog.d/12248.misc1
-rw-r--r--changelog.d/12250.feature1
-rw-r--r--changelog.d/12256.misc1
-rw-r--r--changelog.d/12258.misc1
-rw-r--r--changelog.d/12261.bugfix1
-rw-r--r--changelog.d/12262.misc1
-rw-r--r--changelog.d/12266.misc1
-rw-r--r--changelog.d/12269.misc1
-rw-r--r--changelog.d/12272.misc1
-rw-r--r--changelog.d/12275.doc1
-rw-r--r--changelog.d/12283.misc1
-rw-r--r--changelog.d/12285.bugfix1
-rw-r--r--changelog.d/12288.misc1
-rw-r--r--changelog.d/12291.misc1
-rw-r--r--changelog.d/12301.misc1
-rw-r--r--contrib/snap/snapcraft.yaml (renamed from snap/snapcraft.yaml)0
-rw-r--r--debian/changelog30
-rw-r--r--demo/.gitignore11
-rw-r--r--demo/README26
-rwxr-xr-xdemo/clean.sh3
-rwxr-xr-xdemo/start.sh72
-rw-r--r--docker/Dockerfile2
-rw-r--r--docs/SUMMARY.md1
-rw-r--r--docs/code_style.md2
-rw-r--r--docs/development/contributing_guide.md11
-rw-r--r--docs/development/demo.md41
-rw-r--r--docs/development/room-dag-concepts.md71
-rw-r--r--docs/federate.md3
-rw-r--r--docs/modules/spam_checker_callbacks.md10
-rw-r--r--docs/modules/third_party_rules_callbacks.md43
-rw-r--r--docs/openid.md4
-rw-r--r--docs/postgres.md15
-rw-r--r--docs/sample_config.yaml58
-rw-r--r--docs/templates.md14
-rw-r--r--docs/turn-howto.md5
-rw-r--r--docs/upgrade.md58
-rw-r--r--docs/workers.md72
-rw-r--r--mypy.ini40
-rwxr-xr-xscripts-dev/build_debian_packages.py (renamed from scripts-dev/build_debian_packages)0
-rwxr-xr-xscripts-dev/check-newsfragment.sh (renamed from scripts-dev/check-newsfragment)0
-rwxr-xr-xscripts-dev/complement.sh2
-rwxr-xr-xscripts-dev/generate_sample_config.sh (renamed from scripts-dev/generate_sample_config)4
-rwxr-xr-xscripts-dev/lint.sh4
-rwxr-xr-xscripts-dev/release.py71
-rwxr-xr-xscripts-dev/sign_json.py (renamed from scripts-dev/sign_json)0
-rwxr-xr-xsetup.py3
-rw-r--r--stubs/txredisapi.pyi9
-rw-r--r--synapse/__init__.py23
-rwxr-xr-xsynapse/_scripts/synctl.py (renamed from synctl)0
-rw-r--r--synapse/api/constants.py4
-rw-r--r--synapse/api/filtering.py23
-rw-r--r--synapse/app/generic_worker.py5
-rw-r--r--synapse/app/homeserver.py24
-rw-r--r--synapse/appservice/__init__.py133
-rw-r--r--synapse/appservice/api.py24
-rw-r--r--synapse/config/_base.pyi2
-rw-r--r--synapse/config/background_updates.py68
-rw-r--r--synapse/config/database.py6
-rw-r--r--synapse/config/experimental.py3
-rw-r--r--synapse/config/homeserver.py2
-rw-r--r--synapse/config/oidc.py10
-rw-r--r--synapse/config/registration.py14
-rw-r--r--synapse/config/server.py4
-rw-r--r--synapse/config/spam_checker.py4
-rw-r--r--synapse/events/spamcheck.py11
-rw-r--r--synapse/events/third_party_rules.py65
-rw-r--r--synapse/events/utils.py101
-rw-r--r--synapse/federation/federation_server.py9
-rw-r--r--synapse/federation/send_queue.py2
-rw-r--r--synapse/federation/sender/__init__.py26
-rw-r--r--synapse/federation/sender/per_destination_queue.py10
-rw-r--r--synapse/federation/transport/server/__init__.py15
-rw-r--r--synapse/federation/transport/server/_base.py2
-rw-r--r--synapse/handlers/account.py11
-rw-r--r--synapse/handlers/appservice.py4
-rw-r--r--synapse/handlers/deactivate_account.py12
-rw-r--r--synapse/handlers/device.py4
-rw-r--r--synapse/handlers/directory.py6
-rw-r--r--synapse/handlers/events.py3
-rw-r--r--synapse/handlers/federation.py121
-rw-r--r--synapse/handlers/initial_sync.py14
-rw-r--r--synapse/handlers/message.py71
-rw-r--r--synapse/handlers/oidc.py18
-rw-r--r--synapse/handlers/pagination.py25
-rw-r--r--synapse/handlers/presence.py6
-rw-r--r--synapse/handlers/profile.py6
-rw-r--r--synapse/handlers/receipts.py2
-rw-r--r--synapse/handlers/relations.py271
-rw-r--r--synapse/handlers/room.py13
-rw-r--r--synapse/handlers/room_batch.py112
-rw-r--r--synapse/handlers/room_member.py33
-rw-r--r--synapse/handlers/room_summary.py15
-rw-r--r--synapse/handlers/search.py3
-rw-r--r--synapse/handlers/sso.py8
-rw-r--r--synapse/handlers/sync.py39
-rw-r--r--synapse/handlers/typing.py9
-rw-r--r--synapse/handlers/user_directory.py4
-rw-r--r--synapse/http/matrixfederationclient.py2
-rw-r--r--synapse/http/server.py1
-rw-r--r--synapse/logging/context.py44
-rw-r--r--synapse/module_api/__init__.py20
-rw-r--r--synapse/push/baserules.py38
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py42
-rw-r--r--synapse/push/clientformat.py2
-rw-r--r--synapse/python_dependencies.py10
-rw-r--r--synapse/replication/http/_base.py47
-rw-r--r--synapse/replication/slave/storage/client_ips.py2
-rw-r--r--synapse/replication/tcp/client.py6
-rw-r--r--synapse/replication/tcp/external_cache.py4
-rw-r--r--synapse/replication/tcp/handler.py6
-rw-r--r--synapse/replication/tcp/redis.py8
-rw-r--r--synapse/replication/tcp/resource.py10
-rw-r--r--synapse/replication/tcp/streams/_base.py12
-rw-r--r--synapse/res/templates/sso_auth_account_details.html10
-rw-r--r--synapse/res/templates/sso_auth_account_details.js2
-rw-r--r--synapse/res/templates/sso_footer.html2
-rw-r--r--synapse/rest/__init__.py7
-rw-r--r--synapse/rest/admin/__init__.py3
-rw-r--r--synapse/rest/admin/rooms.py9
-rw-r--r--synapse/rest/client/mutual_rooms.py (renamed from synapse/rest/client/shared_rooms.py)21
-rw-r--r--synapse/rest/client/notifications.py9
-rw-r--r--synapse/rest/client/relations.py188
-rw-r--r--synapse/rest/client/room.py3
-rw-r--r--synapse/rest/client/room_batch.py23
-rw-r--r--synapse/rest/client/sync.py132
-rw-r--r--synapse/rest/client/user_directory.py4
-rw-r--r--synapse/rest/client/versions.py1
-rw-r--r--synapse/rest/media/v1/_base.py1
-rw-r--r--synapse/rest/media/v1/preview_html.py39
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py23
-rw-r--r--synapse/rest/media/v1/storage_provider.py9
-rw-r--r--synapse/rest/synapse/client/pick_username.py8
-rw-r--r--synapse/server.py14
-rw-r--r--synapse/state/__init__.py6
-rw-r--r--synapse/storage/background_updates.py47
-rw-r--r--synapse/storage/database.py75
-rw-r--r--synapse/storage/databases/main/account_data.py41
-rw-r--r--synapse/storage/databases/main/cache.py67
-rw-r--r--synapse/storage/databases/main/deviceinbox.py3
-rw-r--r--synapse/storage/databases/main/events.py61
-rw-r--r--synapse/storage/databases/main/events_worker.py4
-rw-r--r--synapse/storage/databases/main/group_server.py156
-rw-r--r--synapse/storage/databases/main/monthly_active_users.py38
-rw-r--r--synapse/storage/databases/main/registration.py2
-rw-r--r--synapse/storage/databases/main/relations.py218
-rw-r--r--synapse/storage/databases/main/roommember.py51
-rw-r--r--synapse/storage/databases/main/search.py13
-rw-r--r--synapse/storage/databases/main/stream.py18
-rw-r--r--synapse/storage/databases/main/user_directory.py29
-rw-r--r--synapse/storage/engines/__init__.py2
-rw-r--r--synapse/storage/engines/postgres.py45
-rw-r--r--synapse/storage/persist_events.py15
-rw-r--r--synapse/storage/relations.py31
-rw-r--r--synapse/storage/schema/main/delta/30/as_users.py1
-rw-r--r--synapse/storage/state.py8
-rw-r--r--synapse/types.py11
-rw-r--r--synapse/util/__init__.py7
-rw-r--r--synapse/util/async_helpers.py131
-rw-r--r--synapse/util/caches/descriptors.py85
-rw-r--r--synapse/util/caches/treecache.py2
-rw-r--r--synapse/util/check_dependencies.py86
-rw-r--r--synapse/util/templates.py5
-rw-r--r--synapse/visibility.py35
-rw-r--r--tests/appservice/test_appservice.py45
-rw-r--r--tests/config/test_background_update.py58
-rw-r--r--tests/config/test_registration_config.py22
-rw-r--r--tests/events/test_utils.py5
-rw-r--r--tests/federation/test_federation_sender.py52
-rw-r--r--tests/handlers/test_admin.py18
-rw-r--r--tests/handlers/test_appservice.py56
-rw-r--r--tests/handlers/test_auth.py34
-rw-r--r--tests/handlers/test_cas.py19
-rw-r--r--tests/handlers/test_deactivate_account.py2
-rw-r--r--tests/handlers/test_device.py76
-rw-r--r--tests/handlers/test_directory.py84
-rw-r--r--tests/handlers/test_e2e_keys.py36
-rw-r--r--tests/handlers/test_federation.py36
-rw-r--r--tests/handlers/test_oidc.py94
-rw-r--r--tests/handlers/test_password_providers.py1
-rw-r--r--tests/handlers/test_presence.py13
-rw-r--r--tests/handlers/test_profile.py49
-rw-r--r--tests/handlers/test_room_summary.py3
-rw-r--r--tests/handlers/test_saml.py24
-rw-r--r--tests/handlers/test_typing.py35
-rw-r--r--tests/module_api/test_api.py10
-rw-r--r--tests/push/test_http.py106
-rw-r--r--tests/push/test_push_rule_evaluator.py23
-rw-r--r--tests/replication/_base.py4
-rw-r--r--tests/replication/tcp/streams/test_events.py2
-rw-r--r--tests/replication/tcp/streams/test_typing.py2
-rw-r--r--tests/replication/test_federation_ack.py2
-rw-r--r--tests/rest/admin/test_background_updates.py21
-rw-r--r--tests/rest/admin/test_user.py19
-rw-r--r--tests/rest/client/test_account.py58
-rw-r--r--tests/rest/client/test_mutual_rooms.py (renamed from tests/rest/client/test_shared_rooms.py)30
-rw-r--r--tests/rest/client/test_relations.py1536
-rw-r--r--tests/rest/client/test_retention.py29
-rw-r--r--tests/rest/client/test_rooms.py18
-rw-r--r--tests/rest/client/test_third_party_rules.py121
-rw-r--r--tests/rest/client/test_transactions.py19
-rw-r--r--tests/rest/key/v2/test_remote_key_resource.py44
-rw-r--r--tests/rest/media/v1/test_base.py4
-rw-r--r--tests/rest/media/v1/test_filepath.py48
-rw-r--r--tests/rest/media/v1/test_html_preview.py102
-rw-r--r--tests/rest/media/v1/test_media_storage.py110
-rw-r--r--tests/rest/media/v1/test_oembed.py10
-rw-r--r--tests/rest/media/v1/test_url_preview.py83
-rw-r--r--tests/rest/test_health.py8
-rw-r--r--tests/rest/test_well_known.py20
-rw-r--r--tests/server.py20
-rw-r--r--tests/storage/test_account_data.py17
-rw-r--r--tests/storage/test_background_update.py273
-rw-r--r--tests/storage/test_database.py176
-rw-r--r--tests/storage/test_id_generators.py80
-rw-r--r--tests/storage/test_stream.py20
-rw-r--r--tests/storage/test_unsafe_locale.py46
-rw-r--r--tests/test_visibility.py76
-rw-r--r--tests/util/caches/test_descriptors.py231
-rw-r--r--tests/util/test_async_helpers.py152
-rw-r--r--tests/util/test_check_dependencies.py53
-rw-r--r--tests/util/test_rwlock.py395
-rw-r--r--tests/utils.py110
-rw-r--r--tox.ini30
256 files changed, 6188 insertions, 3054 deletions
diff --git a/.dockerignore b/.dockerignore
index 617f701597..434231fce9 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -7,6 +7,5 @@
 !MANIFEST.in
 !README.rst
 !setup.py
-!synctl
 
 **/__pycache__
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index eee3633d50..ed4fc6179d 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -31,7 +31,7 @@ jobs:
           # if we're running from a tag, get the full list of distros; otherwise just use debian:sid
           dists='["debian:sid"]'
           if [[ $GITHUB_REF == refs/tags/* ]]; then
-              dists=$(scripts-dev/build_debian_packages --show-dists-json)
+              dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
           fi
           echo "::set-output name=distros::$dists"
     # map the step outputs to job outputs
@@ -74,7 +74,7 @@ jobs:
         # see https://github.com/docker/build-push-action/issues/252
         # for the cache magic here
         run: |
-          ./src/scripts-dev/build_debian_packages \
+          ./src/scripts-dev/build_debian_packages.py \
             --docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
             --docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
             --docker-build-arg=--progress=plain \
@@ -112,7 +112,8 @@ jobs:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
         with:
           files: |
-            python-dist/*
+            Sdist/*
+            Wheel/*
             debs.tar.xz
           # if it's not already published, keep the release as a draft.
           draft: true
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index e9e4277322..2afddf58d1 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -16,7 +16,8 @@ jobs:
       - uses: actions/checkout@v2
       - uses: actions/setup-python@v2
       - run: pip install -e .
-      - run: scripts-dev/generate_sample_config --check
+      - run: scripts-dev/generate_sample_config.sh --check
+      - run: scripts-dev/config-lint.sh
 
   lint:
     runs-on: ubuntu-latest
@@ -51,7 +52,7 @@ jobs:
           fetch-depth: 0
       - uses: actions/setup-python@v2
       - run: "pip install 'towncrier>=18.6.0rc1'"
-      - run: scripts-dev/check-newsfragment
+      - run: scripts-dev/check-newsfragment.sh
         env:
           PULL_REQUEST_NUMBER: ${{ github.event.number }}
 
@@ -376,7 +377,7 @@ jobs:
       # Run Complement
       - run: |
           set -o pipefail
-          go test -v -json -p 1 -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt
+          go test -v -json -tags synapse_blacklist,msc2403,msc2716,msc3030 ./tests/... 2>&1 | gotestfmt
         shell: bash
         name: Run Complement Tests
         env:
@@ -387,34 +388,22 @@ jobs:
   tests-done:
     if: ${{ always() }}
     needs:
+      - check-sampleconfig
       - lint
       - lint-crlf
       - lint-newsfile
       - trial
       - trial-olddeps
       - sytest
+      - export-data
       - portdb
       - complement
     runs-on: ubuntu-latest
     steps:
-      - name: Set build result
-        env:
-          NEEDS_CONTEXT: ${{ toJSON(needs) }}
-        # the `jq` incantation dumps out a series of "<job> <result>" lines.
-        # we set it to an intermediate variable to avoid a pipe, which makes it
-        # hard to set $rc.
-        run: |
-          rc=0
-          results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT)
-          while read job result ; do
-              # The newsfile lint may be skipped on non PR builds
-              if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then
-                continue
-              fi
-
-              if [ "$result" != "success" ]; then
-                  echo "::set-failed ::Job $job returned $result"
-                  rc=1
-              fi
-          done <<< $results
-          exit $rc
+      - uses: matrix-org/done-action@v2
+        with:
+          needs: ${{ toJSON(needs) }}
+
+          # The newsfile lint may be skipped on non PR builds
+          skippable:
+            lint-newsfile
diff --git a/CHANGES.md b/CHANGES.md
index 0a87f5cd42..b0244a16f0 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,10 +1,148 @@
-Synapse 1.54.0rc1 (2022-03-02)
+Synapse 1.55.2 (2022-03-24)
+===========================
+
+This patch version reverts the earlier fixes from Synapse 1.55.1, which could cause problems in certain deployments, and instead adds a cap to the version of Jinja to be installed. Again, this is to fix an incompatibility with version 3.1.0 of the [Jinja](https://pypi.org/project/Jinja2/) library, and again, deployments of Synapse using the `matrixdotorg/synapse` Docker image or Debian packages from packages.matrix.org are not affected.
+
+Internal Changes
+----------------
+
+- Pin Jinja to <3.1.0, as Synapse fails to start with Jinja 3.1.0. ([\#12297](https://github.com/matrix-org/synapse/issues/12297))
+- Revert changes from 1.55.1 as they caused problems with older versions of Jinja ([\#12296](https://github.com/matrix-org/synapse/issues/12296))
+
+
+Synapse 1.55.1 (2022-03-24)
+===========================
+
+This is a patch release that fixes an incompatibility with version 3.1.0 of the [Jinja](https://pypi.org/project/Jinja2/) library, released on March 24th, 2022. Deployments of Synapse using the `matrixdotorg/synapse` Docker image or Debian packages from packages.matrix.org are not affected.
+
+Internal Changes
+----------------
+
+- Remove uses of the long-deprecated `jinja2.Markup` which would prevent Synapse from starting with Jinja 3.1.0 or above installed. ([\#12289](https://github.com/matrix-org/synapse/issues/12289))
+
+
+Synapse 1.55.0 (2022-03-22)
+===========================
+
+This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version.
+
+This release also moves the location of the `synctl` script; see the [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved) for more details.
+
+
+Internal Changes
+----------------
+
+- Tweak copy for default Single Sign-On account details template to better adhere to mobile app store guidelines. ([\#12265](https://github.com/matrix-org/synapse/issues/12265), [\#12260](https://github.com/matrix-org/synapse/issues/12260))
+
+
+Synapse 1.55.0rc1 (2022-03-15)
 ==============================
 
+Features
+--------
+
+- Add third-party rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028))
+- Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132))
+- Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135))
+- Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151))
+- Add a new Jinja2 template filter to extract the local part of an email address. ([\#12212](https://github.com/matrix-org/synapse/issues/12212))
+
+
+Bugfixes
+--------
+
+- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090))
+- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189))
+- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157))
+- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175))
+- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215))
+- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234))
+
+
+Improved Documentation
+----------------------
+
+- Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. ([\#11998](https://github.com/matrix-org/synapse/issues/11998))
+- Improve documentation for demo scripts. ([\#12143](https://github.com/matrix-org/synapse/issues/12143))
+- Updates to the Room DAG concepts development document. ([\#12179](https://github.com/matrix-org/synapse/issues/12179))
+- Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. ([\#12196](https://github.com/matrix-org/synapse/issues/12196))
+- Document that contributors can sign off privately by email. ([\#12204](https://github.com/matrix-org/synapse/issues/12204))
+
+
+Deprecations and Removals
+-------------------------
+
+- **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**
+- **`synctl` has been moved into into `synapse._scripts` and is exposed as an entry point; see [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved). ([\#12140](https://github.com/matrix-org/synapse/issues/12140))
+- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138))
+- The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200))
+
+
+Internal Changes
+----------------
+
+- Simplify the `ApplicationService` class' set of public methods related to interest checking. ([\#11915](https://github.com/matrix-org/synapse/issues/11915))
+- Add config settings for background update parameters. ([\#11980](https://github.com/matrix-org/synapse/issues/11980))
+- Correct type hints for txredis. ([\#12042](https://github.com/matrix-org/synapse/issues/12042))
+- Limit the size of `aggregation_key` on annotations. ([\#12101](https://github.com/matrix-org/synapse/issues/12101))
+- Add type hints to tests files. ([\#12108](https://github.com/matrix-org/synapse/issues/12108), [\#12146](https://github.com/matrix-org/synapse/issues/12146), [\#12207](https://github.com/matrix-org/synapse/issues/12207), [\#12208](https://github.com/matrix-org/synapse/issues/12208))
+- Move scripts to Synapse package and expose as setuptools entry points. ([\#12118](https://github.com/matrix-org/synapse/issues/12118))
+- Add support for cancellation to `ReadWriteLock`. ([\#12120](https://github.com/matrix-org/synapse/issues/12120))
+- Fix data validation to compare to lists, not sequences. ([\#12128](https://github.com/matrix-org/synapse/issues/12128))
+- Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131))
+- Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136))
+- Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137))
+- Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142))
+- Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144))
+- Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145))
+- Add test for `ObservableDeferred`'s cancellation behaviour. ([\#12149](https://github.com/matrix-org/synapse/issues/12149))
+- Use `ParamSpec` in type hints for `synapse.logging.context`. ([\#12150](https://github.com/matrix-org/synapse/issues/12150))
+- Prune unused jobs from `tox` config. ([\#12152](https://github.com/matrix-org/synapse/issues/12152))
+- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12153](https://github.com/matrix-org/synapse/issues/12153))
+- Avoid generating state groups for local out-of-band leaves. ([\#12154](https://github.com/matrix-org/synapse/issues/12154))
+- Avoid trying to calculate the state at outlier events. ([\#12155](https://github.com/matrix-org/synapse/issues/12155), [\#12173](https://github.com/matrix-org/synapse/issues/12173), [\#12202](https://github.com/matrix-org/synapse/issues/12202))
+- Fix some type annotations. ([\#12156](https://github.com/matrix-org/synapse/issues/12156))
+- Add type hints for `ObservableDeferred` attributes. ([\#12159](https://github.com/matrix-org/synapse/issues/12159))
+- Use a prebuilt Action for the `tests-done` CI job. ([\#12161](https://github.com/matrix-org/synapse/issues/12161))
+- Reduce number of DB queries made during processing of `/sync`. ([\#12163](https://github.com/matrix-org/synapse/issues/12163))
+- Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. ([\#12180](https://github.com/matrix-org/synapse/issues/12180))
+- Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. ([\#12182](https://github.com/matrix-org/synapse/issues/12182))
+- Add cancellation support to `@cached` and `@cachedList` decorators. ([\#12183](https://github.com/matrix-org/synapse/issues/12183))
+- Remove unused variables. ([\#12187](https://github.com/matrix-org/synapse/issues/12187))
+- Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. ([\#12188](https://github.com/matrix-org/synapse/issues/12188))
+- Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. ([\#12192](https://github.com/matrix-org/synapse/issues/12192))
+- Remove some dead code. ([\#12197](https://github.com/matrix-org/synapse/issues/12197))
+- Fix a misleading comment in the function `check_event_for_spam`. ([\#12203](https://github.com/matrix-org/synapse/issues/12203))
+- Remove unnecessary `pass` statements. ([\#12206](https://github.com/matrix-org/synapse/issues/12206))
+- Update the SSO username picker template to comply with SIWA guidelines. ([\#12210](https://github.com/matrix-org/synapse/issues/12210))
+- Improve code documentation for the typing stream over replication. ([\#12211](https://github.com/matrix-org/synapse/issues/12211))
+
+
+Synapse 1.54.0 (2022-03-08)
+===========================
+
 Please note that this will be the last release of Synapse that is compatible with Mjolnir 1.3.1 and earlier.
 Administrators of servers which have the Mjolnir module installed are advised to upgrade Mjolnir to version 1.3.2 or later.
 
 
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. ([\#12141](https://github.com/matrix-org/synapse/issues/12141))
+- Fix a bug introduced in Synapse 1.54.0rc1 where runtime dependency version checks would mistakenly check development dependencies if they were present and would not accept pre-release versions of dependencies. ([\#12129](https://github.com/matrix-org/synapse/issues/12129), [\#12177](https://github.com/matrix-org/synapse/issues/12177))
+
+
+Internal Changes
+----------------
+
+- Update release script to insert the previous version when writing "No significant changes" line in the changelog. ([\#12127](https://github.com/matrix-org/synapse/issues/12127))
+- Relax the version guard for "packaging" added in [\#12088](https://github.com/matrix-org/synapse/issues/12088). ([\#12166](https://github.com/matrix-org/synapse/issues/12166))
+
+
+Synapse 1.54.0rc1 (2022-03-02)
+==============================
+
+
 Features
 --------
 
diff --git a/MANIFEST.in b/MANIFEST.in
index 7e903518e1..d744c090ac 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,3 @@
-include synctl
 include LICENSE
 include VERSION
 include *.rst
@@ -52,5 +51,4 @@ prune contrib
 prune debian
 prune demo/etc
 prune docker
-prune snap
 prune stubs
diff --git a/README.rst b/README.rst
index 4281c87d1f..595fb5ff62 100644
--- a/README.rst
+++ b/README.rst
@@ -312,6 +312,9 @@ We recommend using the demo which starts 3 federated instances running on ports
 
 (to stop, you can use `./demo/stop.sh`)
 
+See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html)
+for more information.
+
 If you just want to start a single instance of the app and run it directly::
 
     # Create the homeserver.yaml config once
diff --git a/changelog.d/11998.doc b/changelog.d/11998.doc
deleted file mode 100644
index 33ab7b7880..0000000000
--- a/changelog.d/11998.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page.
\ No newline at end of file
diff --git a/changelog.d/12036.misc b/changelog.d/12036.misc
new file mode 100644
index 0000000000..d2996730cc
--- /dev/null
+++ b/changelog.d/12036.misc
@@ -0,0 +1 @@
+Rename `shared_rooms` to `mutual_rooms` (MSC2666), as per proposal changes.
\ No newline at end of file
diff --git a/changelog.d/12038.misc b/changelog.d/12038.misc
new file mode 100644
index 0000000000..e2a65726b6
--- /dev/null
+++ b/changelog.d/12038.misc
@@ -0,0 +1 @@
+Remove check on `update_user_directory` for shared rooms handler (MSC2666), and update/expand documentation.
\ No newline at end of file
diff --git a/changelog.d/12083.misc b/changelog.d/12083.misc
new file mode 100644
index 0000000000..88fd6b92ee
--- /dev/null
+++ b/changelog.d/12083.misc
@@ -0,0 +1 @@
+Refactor `create_new_client_event` to use a new parameter, `state_event_ids`, which accurately describes the usage with [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) instead of abusing `auth_event_ids`.
diff --git a/changelog.d/12087.bugfix b/changelog.d/12087.bugfix
new file mode 100644
index 0000000000..6dacdddd0d
--- /dev/null
+++ b/changelog.d/12087.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug which caused the `/_matrix/federation/v1/state` and `.../state_ids` endpoints to return incorrect or invalid data when called for an event which we have stored as an "outlier".
diff --git a/changelog.d/12091.misc b/changelog.d/12091.misc
new file mode 100644
index 0000000000..def44987b4
--- /dev/null
+++ b/changelog.d/12091.misc
@@ -0,0 +1 @@
+Refuse to start if registration is enabled without email, captcha, or token-based verification unless new config flag `enable_registration_without_verification` is set.
diff --git a/changelog.d/12108.misc b/changelog.d/12108.misc
deleted file mode 100644
index 0360dbd61e..0000000000
--- a/changelog.d/12108.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add type hints to `tests/rest/client`.
diff --git a/changelog.d/12113.misc b/changelog.d/12113.misc
deleted file mode 100644
index 102e064053..0000000000
--- a/changelog.d/12113.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor the tests for event relations.
diff --git a/changelog.d/12118.misc b/changelog.d/12118.misc
deleted file mode 100644
index a2c397d907..0000000000
--- a/changelog.d/12118.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move scripts to Synapse package and expose as setuptools entry points.
diff --git a/changelog.d/12128.misc b/changelog.d/12128.misc
deleted file mode 100644
index 0570a8e327..0000000000
--- a/changelog.d/12128.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix data validation to compare to lists, not sequences.
diff --git a/changelog.d/12136.misc b/changelog.d/12136.misc
deleted file mode 100644
index 98b1c1c9d8..0000000000
--- a/changelog.d/12136.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove unused mocks from `test_typing`.
\ No newline at end of file
diff --git a/changelog.d/12198.misc b/changelog.d/12198.misc
new file mode 100644
index 0000000000..6b184a9053
--- /dev/null
+++ b/changelog.d/12198.misc
@@ -0,0 +1 @@
+Add tests for database transaction callbacks.
diff --git a/changelog.d/12199.misc b/changelog.d/12199.misc
new file mode 100644
index 0000000000..16dec1d26d
--- /dev/null
+++ b/changelog.d/12199.misc
@@ -0,0 +1 @@
+Handle cancellation in `DatabasePool.runInteraction()`.
diff --git a/changelog.d/12216.misc b/changelog.d/12216.misc
new file mode 100644
index 0000000000..dc398ac1e0
--- /dev/null
+++ b/changelog.d/12216.misc
@@ -0,0 +1 @@
+Add missing type hints for cache storage.
diff --git a/changelog.d/12219.misc b/changelog.d/12219.misc
new file mode 100644
index 0000000000..6079414092
--- /dev/null
+++ b/changelog.d/12219.misc
@@ -0,0 +1 @@
+Clean-up logic around rebasing URLs for URL image previews.
diff --git a/changelog.d/12224.misc b/changelog.d/12224.misc
new file mode 100644
index 0000000000..b67a701dbb
--- /dev/null
+++ b/changelog.d/12224.misc
@@ -0,0 +1 @@
+Add type hints to tests files.
diff --git a/changelog.d/12225.misc b/changelog.d/12225.misc
new file mode 100644
index 0000000000..23105c727c
--- /dev/null
+++ b/changelog.d/12225.misc
@@ -0,0 +1 @@
+Use the `ignored_users` table in additional places instead of re-parsing the account data.
diff --git a/changelog.d/12227.bugfix b/changelog.d/12227.bugfix
new file mode 100644
index 0000000000..1a7dccf465
--- /dev/null
+++ b/changelog.d/12227.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where events from ignored users were still considered for relations.
diff --git a/changelog.d/12228.bugfix b/changelog.d/12228.bugfix
new file mode 100644
index 0000000000..4755777139
--- /dev/null
+++ b/changelog.d/12228.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in v1.53.0 where an unnecessary query could be performed when fetching bundled aggregations for threads.
diff --git a/changelog.d/12231.doc b/changelog.d/12231.doc
new file mode 100644
index 0000000000..16593d2b92
--- /dev/null
+++ b/changelog.d/12231.doc
@@ -0,0 +1 @@
+Fix the link to the module documentation in the legacy spam checker warning message.
diff --git a/changelog.d/12232.bugfix b/changelog.d/12232.bugfix
new file mode 100644
index 0000000000..1a7dccf465
--- /dev/null
+++ b/changelog.d/12232.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where events from ignored users were still considered for relations.
diff --git a/changelog.d/12237.misc b/changelog.d/12237.misc
new file mode 100644
index 0000000000..41c9dcbd37
--- /dev/null
+++ b/changelog.d/12237.misc
@@ -0,0 +1 @@
+Refactor the relations endpoints to add a `RelationsHandler`.
diff --git a/changelog.d/12240.misc b/changelog.d/12240.misc
new file mode 100644
index 0000000000..c5b6356799
--- /dev/null
+++ b/changelog.d/12240.misc
@@ -0,0 +1 @@
+Add type hints to tests files.
\ No newline at end of file
diff --git a/changelog.d/12242.misc b/changelog.d/12242.misc
new file mode 100644
index 0000000000..38e7e0f7d1
--- /dev/null
+++ b/changelog.d/12242.misc
@@ -0,0 +1 @@
+Generate announcement links in the release script.
diff --git a/changelog.d/12243.doc b/changelog.d/12243.doc
new file mode 100644
index 0000000000..b2031f0a40
--- /dev/null
+++ b/changelog.d/12243.doc
@@ -0,0 +1 @@
+Remove incorrect prefixes in the worker documentation for some endpoints.
diff --git a/changelog.d/12244.misc b/changelog.d/12244.misc
new file mode 100644
index 0000000000..950d48e4c6
--- /dev/null
+++ b/changelog.d/12244.misc
@@ -0,0 +1 @@
+Improve error message when dependencies check finds a broken installation.
\ No newline at end of file
diff --git a/changelog.d/12246.doc b/changelog.d/12246.doc
new file mode 100644
index 0000000000..e7fcc1b99c
--- /dev/null
+++ b/changelog.d/12246.doc
@@ -0,0 +1 @@
+Correct `check_username_for_spam` annotations and docs.
\ No newline at end of file
diff --git a/changelog.d/12248.misc b/changelog.d/12248.misc
new file mode 100644
index 0000000000..2b1290d1e1
--- /dev/null
+++ b/changelog.d/12248.misc
@@ -0,0 +1 @@
+Add missing type hints for storage.
\ No newline at end of file
diff --git a/changelog.d/12250.feature b/changelog.d/12250.feature
new file mode 100644
index 0000000000..29a2724457
--- /dev/null
+++ b/changelog.d/12250.feature
@@ -0,0 +1 @@
+Allow registering admin users using the module API. Contributed by Famedly.
diff --git a/changelog.d/12256.misc b/changelog.d/12256.misc
new file mode 100644
index 0000000000..c5b6356799
--- /dev/null
+++ b/changelog.d/12256.misc
@@ -0,0 +1 @@
+Add type hints to tests files.
\ No newline at end of file
diff --git a/changelog.d/12258.misc b/changelog.d/12258.misc
new file mode 100644
index 0000000000..80024c8e91
--- /dev/null
+++ b/changelog.d/12258.misc
@@ -0,0 +1 @@
+Compress metrics HTTP resource when enabled. Contributed by Nick @ Beeper.
diff --git a/changelog.d/12261.bugfix b/changelog.d/12261.bugfix
new file mode 100644
index 0000000000..1bfde4c380
--- /dev/null
+++ b/changelog.d/12261.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse 1.52 where admins could not deactivate and GDPR-erase a user if Synapse was configured with limits on avatars.
diff --git a/changelog.d/12262.misc b/changelog.d/12262.misc
new file mode 100644
index 0000000000..574ac4752c
--- /dev/null
+++ b/changelog.d/12262.misc
@@ -0,0 +1 @@
+Refuse to start if DB has non-`C` locale, unless config flag `allow_unsafe_db_locale` is set to true.
\ No newline at end of file
diff --git a/changelog.d/12266.misc b/changelog.d/12266.misc
new file mode 100644
index 0000000000..59e2718370
--- /dev/null
+++ b/changelog.d/12266.misc
@@ -0,0 +1 @@
+Optionally include account validity expiration information to experimental [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) account status responses.
diff --git a/changelog.d/12269.misc b/changelog.d/12269.misc
new file mode 100644
index 0000000000..ed79cbb528
--- /dev/null
+++ b/changelog.d/12269.misc
@@ -0,0 +1 @@
+Use type stubs for `psycopg2`.
diff --git a/changelog.d/12272.misc b/changelog.d/12272.misc
new file mode 100644
index 0000000000..95589f3361
--- /dev/null
+++ b/changelog.d/12272.misc
@@ -0,0 +1 @@
+Add a new cache `_get_membership_from_event_id` to speed up push rule calculations in large rooms.
diff --git a/changelog.d/12275.doc b/changelog.d/12275.doc
new file mode 100644
index 0000000000..2e26ad21eb
--- /dev/null
+++ b/changelog.d/12275.doc
@@ -0,0 +1 @@
+Corrected Authentik OpenID typo, added helpful note for troubleshooting. Contributed by @IronTooch.
diff --git a/changelog.d/12283.misc b/changelog.d/12283.misc
new file mode 100644
index 0000000000..e9f2208500
--- /dev/null
+++ b/changelog.d/12283.misc
@@ -0,0 +1 @@
+Re-enable Complement concurrency in CI.
diff --git a/changelog.d/12285.bugfix b/changelog.d/12285.bugfix
new file mode 100644
index 0000000000..1a7dccf465
--- /dev/null
+++ b/changelog.d/12285.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where events from ignored users were still considered for relations.
diff --git a/changelog.d/12288.misc b/changelog.d/12288.misc
new file mode 100644
index 0000000000..ee8fbfd290
--- /dev/null
+++ b/changelog.d/12288.misc
@@ -0,0 +1 @@
+Refuse to start if DB has non-`C` locale, unless config flag `allow_unsafe_db_locale` is set to true.
diff --git a/changelog.d/12291.misc b/changelog.d/12291.misc
new file mode 100644
index 0000000000..b55dd68f92
--- /dev/null
+++ b/changelog.d/12291.misc
@@ -0,0 +1 @@
+Remove unused test utilities.
diff --git a/changelog.d/12301.misc b/changelog.d/12301.misc
new file mode 100644
index 0000000000..a4cd94ee5e
--- /dev/null
+++ b/changelog.d/12301.misc
@@ -0,0 +1 @@
+Enhance logging for inbound federation events.
diff --git a/snap/snapcraft.yaml b/contrib/snap/snapcraft.yaml
index dd4c8478d5..dd4c8478d5 100644
--- a/snap/snapcraft.yaml
+++ b/contrib/snap/snapcraft.yaml
diff --git a/debian/changelog b/debian/changelog
index df3db85b8e..3c899e6024 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,33 @@
+matrix-synapse-py3 (1.55.2) stable; urgency=medium
+
+  * New synapse release 1.55.2.
+
+ -- Synapse Packaging team <packages@matrix.org>  Thu, 24 Mar 2022 19:07:11 +0000
+
+matrix-synapse-py3 (1.55.1) stable; urgency=medium
+
+  * New synapse release 1.55.1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Thu, 24 Mar 2022 17:44:23 +0000
+
+matrix-synapse-py3 (1.55.0) stable; urgency=medium
+
+  * New synapse release 1.55.0.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 22 Mar 2022 13:59:26 +0000
+
+matrix-synapse-py3 (1.55.0~rc1) stable; urgency=medium
+
+  * New synapse release 1.55.0~rc1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 15 Mar 2022 10:59:31 +0000
+
+matrix-synapse-py3 (1.54.0) stable; urgency=medium
+
+  * New synapse release 1.54.0.
+
+ -- Synapse Packaging team <packages@matrix.org>  Tue, 08 Mar 2022 10:54:52 +0000
+
 matrix-synapse-py3 (1.54.0~rc1) stable; urgency=medium
 
   * New synapse release 1.54.0~rc1.
diff --git a/demo/.gitignore b/demo/.gitignore
index 4d12712343..5663aba8e7 100644
--- a/demo/.gitignore
+++ b/demo/.gitignore
@@ -1,7 +1,4 @@
-*.db
-*.log
-*.log.*
-*.pid
-
-/media_store.*
-/etc
+# Ignore all the temporary files from the demo servers.
+8080/
+8081/
+8082/
diff --git a/demo/README b/demo/README
deleted file mode 100644
index a5a95bd196..0000000000
--- a/demo/README
+++ /dev/null
@@ -1,26 +0,0 @@
-DO NOT USE THESE DEMO SERVERS IN PRODUCTION
-
-Requires you to have done:
-    python setup.py develop
-
-
-The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
-
-To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs
-and are configured in a highly insecure way. Do not use these configuration files in production.
-
-stop.sh will stop the synapse servers and the webclient.
-
-clean.sh will delete the databases and log files.
-
-To start a completely new set of servers, run:
-
-    ./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
-
-
-Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db}
-
-
-
-Also note that when joining a public room on a different HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name.
-
diff --git a/demo/clean.sh b/demo/clean.sh
index e9b440d90d..7f1e192021 100755
--- a/demo/clean.sh
+++ b/demo/clean.sh
@@ -4,6 +4,9 @@ set -e
 
 DIR="$( cd "$( dirname "$0" )" && pwd )"
 
+# Ensure that the servers are stopped.
+$DIR/stop.sh
+
 PID_FILE="$DIR/servers.pid"
 
 if [ -f "$PID_FILE" ]; then
diff --git a/demo/start.sh b/demo/start.sh
index 8ffb14e30a..5a9972d24c 100755
--- a/demo/start.sh
+++ b/demo/start.sh
@@ -6,8 +6,6 @@ CWD=$(pwd)
 
 cd "$DIR/.." || exit
 
-mkdir -p demo/etc
-
 PYTHONPATH=$(readlink -f "$(pwd)")
 export PYTHONPATH
 
@@ -21,22 +19,27 @@ for port in 8080 8081 8082; do
     mkdir -p demo/$port
     pushd demo/$port || exit
 
-    #rm $DIR/etc/$port.config
+    # Generate the configuration for the homeserver at localhost:848x.
     python3 -m synapse.app.homeserver \
         --generate-config \
-        -H "localhost:$https_port" \
-        --config-path "$DIR/etc/$port.config" \
+        --server-name "localhost:$port" \
+        --config-path "$port.config" \
         --report-stats no
 
-    if ! grep -F "Customisation made by demo/start.sh" -q "$DIR/etc/$port.config"; then
-        # Generate tls keys
-        openssl req -x509 -newkey rsa:4096 -keyout "$DIR/etc/localhost:$https_port.tls.key" -out "$DIR/etc/localhost:$https_port.tls.crt" -days 365 -nodes -subj "/O=matrix"
+    if ! grep -F "Customisation made by demo/start.sh" -q "$port.config"; then
+        # Generate TLS keys.
+        openssl req -x509 -newkey rsa:4096 \
+          -keyout "localhost:$port.tls.key" \
+          -out "localhost:$port.tls.crt" \
+          -days 365 -nodes -subj "/O=matrix"
 
-        # Regenerate configuration
+        # Add customisations to the configuration.
         {
-            printf '\n\n# Customisation made by demo/start.sh\n'
+            printf '\n\n# Customisation made by demo/start.sh\n\n'
             echo "public_baseurl: http://localhost:$port/"
             echo 'enable_registration: true'
+            echo 'enable_registration_without_verification: true'
+            echo ''
 
 			# Warning, this heredoc depends on the interaction of tabs and spaces.
 			# Please don't accidentaly bork me with your fancy settings.
@@ -63,38 +66,34 @@ for port in 8080 8081 8082; do
 
             echo "${listeners}"
 
-            # Disable tls for the servers
-            printf '\n\n# Disable tls on the servers.'
+            # Disable TLS for the servers
+            printf '\n\n# Disable TLS for the servers.'
             echo '# DO NOT USE IN PRODUCTION'
             echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true'
             echo 'federation_verify_certificates: false'
 
-            # Set tls paths
-            echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\""
-            echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\""
+            # Set paths for the TLS certificates.
+            echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\""
+            echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\""
 
             # Ignore keys from the trusted keys server
             echo '# Ignore keys from the trusted keys server'
             echo 'trusted_key_servers:'
             echo '  - server_name: "matrix.org"'
             echo '    accept_keys_insecurely: true'
-
-			# Reduce the blacklist
-			blacklist=$(cat <<-BLACK
-			# Set the blacklist so that it doesn't include 127.0.0.1, ::1
-			federation_ip_range_blacklist:
-			  - '10.0.0.0/8'
-			  - '172.16.0.0/12'
-			  - '192.168.0.0/16'
-			  - '100.64.0.0/10'
-			  - '169.254.0.0/16'
-			  - 'fe80::/64'
-			  - 'fc00::/7'
-			BLACK
+            echo ''
+
+			# Allow the servers to communicate over localhost.
+			allow_list=$(cat <<-ALLOW_LIST
+			# Allow the servers to communicate over localhost.
+			ip_range_whitelist:
+			  - '127.0.0.1/8'
+			  - '::1/128'
+			ALLOW_LIST
 			)
 
-            echo "${blacklist}"
-        } >> "$DIR/etc/$port.config"
+            echo "${allow_list}"
+        } >> "$port.config"
     fi
 
     # Check script parameters
@@ -141,19 +140,18 @@ for port in 8080 8081 8082; do
 			    burst_count: 1000
 			RC
 			)
-            echo "${ratelimiting}" >> "$DIR/etc/$port.config"
+            echo "${ratelimiting}" >> "$port.config"
         fi
     fi
 
-    if ! grep -F "full_twisted_stacktraces" -q  "$DIR/etc/$port.config"; then
-        echo "full_twisted_stacktraces: true" >> "$DIR/etc/$port.config"
-    fi
-    if ! grep -F "report_stats" -q  "$DIR/etc/$port.config" ; then
-        echo "report_stats: false" >> "$DIR/etc/$port.config"
+    # Always disable reporting of stats if the option is not there.
+    if ! grep -F "report_stats" -q  "$port.config" ; then
+        echo "report_stats: false" >> "$port.config"
     fi
 
+    # Run the homeserver in the background.
     python3 -m synapse.app.homeserver \
-        --config-path "$DIR/etc/$port.config" \
+        --config-path "$port.config" \
         -D \
 
     popd || exit
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 327275a9ca..24b5515eb9 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -46,7 +46,7 @@ RUN \
     && rm -rf /var/lib/apt/lists/*
 
 # Copy just what we need to pip install
-COPY MANIFEST.in README.rst setup.py synctl /synapse/
+COPY MANIFEST.in README.rst setup.py /synapse/
 COPY synapse/__init__.py /synapse/synapse/__init__.py
 COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
 
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index ef9cabf555..21f80efc99 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -82,6 +82,7 @@
   - [Release Cycle](development/releases.md)
   - [Git Usage](development/git.md)
   - [Testing]()
+    - [Demo scripts](development/demo.md)
   - [OpenTracing](opentracing.md)
   - [Database Schemas](development/database_schema.md)
   - [Experimental features](development/experimental_features.md)
diff --git a/docs/code_style.md b/docs/code_style.md
index 4d8e7c973d..e7c9cd1a5e 100644
--- a/docs/code_style.md
+++ b/docs/code_style.md
@@ -172,6 +172,6 @@ frobber:
 ```
 
 Note that the sample configuration is generated from the synapse code
-and is maintained by a script, `scripts-dev/generate_sample_config`.
+and is maintained by a script, `scripts-dev/generate_sample_config.sh`.
 Making sure that the output from this script matches the desired format
 is left as an exercise for the reader!
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 8448685952..071202e196 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -458,6 +458,17 @@ Git allows you to add this signoff automatically when using the `-s`
 flag to `git commit`, which uses the name and email set in your
 `user.name` and `user.email` git configs.
 
+### Private Sign off
+
+If you would like to provide your legal name privately to the Matrix.org
+Foundation (instead of in a public commit or comment), you can do so
+by emailing your legal name and a link to the pull request to
+[dco@matrix.org](mailto:dco@matrix.org?subject=Private%20sign%20off).
+It helps to include "sign off" or similar in the subject line. You will then
+be instructed further.
+
+Once private sign off is complete, doing so for future contributions will not
+be required.
 
 # 10. Turn feedback into better code.
 
diff --git a/docs/development/demo.md b/docs/development/demo.md
new file mode 100644
index 0000000000..4277252ceb
--- /dev/null
+++ b/docs/development/demo.md
@@ -0,0 +1,41 @@
+# Synapse demo setup
+
+**DO NOT USE THESE DEMO SERVERS IN PRODUCTION**
+
+Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies).
+
+The demo setup allows running three federation Synapse servers, with server
+names `localhost:8080`, `localhost:8081`, and `localhost:8082`.
+
+You can access them via any Matrix client over HTTP at `localhost:8080`,
+`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`,
+`localhost:8481`, and `localhost:8482`.
+
+To enable the servers to communicate, self-signed SSL certificates are generated
+and the servers are configured in a highly insecure way, including:
+
+* Not checking certificates over federation.
+* Not verifying keys.
+
+The servers are configured to store their data under `demo/8080`, `demo/8081`, and
+`demo/8082`. This includes configuration, logs, SQLite databases, and media.
+
+Note that when joining a public room on a different HS via "#foo:bar.net", then
+you are (in the current impl) joining a room with room_id "foo". This means that
+it won't work if your HS already has a room with that name.
+
+## Using the demo scripts
+
+There's three main scripts with straightforward purposes:
+
+* `start.sh` will start the Synapse servers, generating any missing configuration.
+  * This accepts a single parameter `--no-rate-limit` to "disable" rate limits
+    (they actually still exist, but are very high).
+* `stop.sh` will stop the Synapse servers.
+* `clean.sh` will delete the configuration, databases, log files, etc.
+
+To start a completely new set of servers, run:
+
+```sh
+./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh
+```
diff --git a/docs/development/room-dag-concepts.md b/docs/development/room-dag-concepts.md
index cbc7cf2949..3eb4d5acc4 100644
--- a/docs/development/room-dag-concepts.md
+++ b/docs/development/room-dag-concepts.md
@@ -30,37 +30,72 @@ rather than skipping any that arrived late; whereas if you're looking at a
 historical section of timeline (i.e. `/messages`), you want to see the best
 representation of the state of the room as others were seeing it at the time.
 
+## Outliers
 
-## Forward extremity
+We mark an event as an `outlier` when we haven't figured out the state for the
+room at that point in the DAG yet. They are "floating" events that we haven't
+yet correlated to the DAG.
 
-Most-recent-in-time events in the DAG which are not referenced by any other events' `prev_events` yet.
+Outliers typically arise when we fetch the auth chain or state for a given
+event. When that happens, we just grab the events in the state/auth chain,
+without calculating the state at those events, or backfilling their
+`prev_events`.
 
-The forward extremities of a room are used as the `prev_events` when the next event is sent.
+So, typically, we won't have the `prev_events` of an `outlier` in the database,
+(though it's entirely possible that we *might* have them for some other
+reason). Other things that make outliers different from regular events:
 
+ * We don't have state for them, so there should be no entry in
+   `event_to_state_groups` for an outlier. (In practice this isn't always
+   the case, though I'm not sure why: see https://github.com/matrix-org/synapse/issues/12201).
 
-## Backward extremity
+ * We don't record entries for them in the `event_edges`,
+   `event_forward_extremeties` or `event_backward_extremities` tables.
 
-The current marker of where we have backfilled up to and will generally be the
-`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when
-backfilling history.
+Since outliers are not tied into the DAG, they do not normally form part of the
+timeline sent down to clients via `/sync` or `/messages`; however there is an
+exception:
 
-When we persist a non-outlier event, we clear it as a backward extremity and set
-all of its `prev_events` as the new backward extremities if they aren't already
-persisted in the `events` table.
+### Out-of-band membership events
 
+A special case of outlier events are some membership events for federated rooms
+that we aren't full members of. For example:
 
-## Outliers
+ * invites received over federation, before we join the room
+ * *rejections* for said invites
+ * knock events for rooms that we would like to join but have not yet joined.
 
-We mark an event as an `outlier` when we haven't figured out the state for the
-room at that point in the DAG yet.
+In all the above cases, we don't have the state for the room, which is why they
+are treated as outliers. They are a bit special though, in that they are
+proactively sent to clients via `/sync`.
 
-We won't *necessarily* have the `prev_events` of an `outlier` in the database,
-but it's entirely possible that we *might*.
+## Forward extremity
+
+Most-recent-in-time events in the DAG which are not referenced by any other
+events' `prev_events` yet. (In this definition, outliers, rejected events, and
+soft-failed events don't count.)
+
+The forward extremities of a room (or at least, a subset of them, if there are
+more than ten) are used as the `prev_events` when the next event is sent.
+
+The "current state" of a room (ie: the state which would be used if we
+generated a new event) is, therefore, the resolution of the room states
+at each of the forward extremities.
+
+## Backward extremity
+
+The current marker of where we have backfilled up to and will generally be the
+`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when
+backfilling history.
 
-For example, when we fetch the event auth chain or state for a given event, we
-mark all of those claimed auth events as outliers because we haven't done the
-state calculation ourself.
+Note that, unlike forward extremities, we typically don't have any backward
+extremity events themselves in the database - or, if we do, they will be "outliers" (see
+above). Either way, we don't expect to have the room state at a backward extremity.
 
+When we persist a non-outlier event, if it was previously a backward extremity,
+we clear it as a backward extremity and set all of its `prev_events` as the new
+backward extremities if they aren't already persisted as non-outliers. This
+therefore keeps the backward extremities up-to-date.
 
 ## State groups
 
diff --git a/docs/federate.md b/docs/federate.md
index 5107f995be..df4c87da51 100644
--- a/docs/federate.md
+++ b/docs/federate.md
@@ -63,4 +63,5 @@ release of Synapse.
 
 If you want to get up and running quickly with a trio of homeservers in a
 private federation, there is a script in the `demo` directory. This is mainly
-useful just for development purposes. See [demo/README](https://github.com/matrix-org/synapse/tree/develop/demo/).
+useful just for development purposes. See
+[demo scripts](https://matrix-org.github.io/synapse/develop/development/demo.html).
diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md
index 2b672b78f9..472d957180 100644
--- a/docs/modules/spam_checker_callbacks.md
+++ b/docs/modules/spam_checker_callbacks.md
@@ -172,7 +172,7 @@ any of the subsequent implementations of this callback.
 _First introduced in Synapse v1.37.0_
 
 ```python
-async def check_username_for_spam(user_profile: Dict[str, str]) -> bool
+async def check_username_for_spam(user_profile: synapse.module_api.UserProfile) -> bool
 ```
 
 Called when computing search results in the user directory. The module must return a
@@ -182,9 +182,11 @@ search results; otherwise return `False`.
 
 The profile is represented as a dictionary with the following keys:
 
-* `user_id`: The Matrix ID for this user.
-* `display_name`: The user's display name.
-* `avatar_url`: The `mxc://` URL to the user's avatar.
+* `user_id: str`. The Matrix ID for this user.
+* `display_name: Optional[str]`. The user's display name, or `None` if this user
+  has not set a display name.
+* `avatar_url: Optional[str]`. The `mxc://` URL to the user's avatar, or `None`
+  if this user has not set an avatar.
 
 The module is given a copy of the original dictionary, so modifying it from within the
 module cannot modify a user's profile when included in user directory search results.
diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md
index 09ac838107..1d3c39967f 100644
--- a/docs/modules/third_party_rules_callbacks.md
+++ b/docs/modules/third_party_rules_callbacks.md
@@ -148,6 +148,49 @@ deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#c
 
 If multiple modules implement this callback, Synapse runs them all in order.
 
+### `check_can_shutdown_room`
+
+_First introduced in Synapse v1.55.0_
+
+```python
+async def check_can_shutdown_room(
+    user_id: str, room_id: str,
+) -> bool:
+```
+
+Called when an admin user requests the shutdown of a room. The module must return a
+boolean indicating whether the shutdown can go through. If the callback returns `False`,
+the shutdown will not proceed and the caller will see a `M_FORBIDDEN` error.
+
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
+### `check_can_deactivate_user`
+
+_First introduced in Synapse v1.55.0_
+
+```python
+async def check_can_deactivate_user(
+    user_id: str, by_admin: bool,
+) -> bool:
+```
+
+Called when the deactivation of a user is requested. User deactivation can be
+performed by an admin or the user themselves, so developers are encouraged to check the
+requester when implementing this callback. The module must return a
+boolean indicating whether the deactivation can go through. If the callback returns `False`,
+the deactivation will not proceed and the caller will see a `M_FORBIDDEN` error.
+
+The module is passed two parameters, `user_id` which is the ID of the user being deactivated, and `by_admin` which is `True` if the request is made by a serve admin, and `False` otherwise.
+
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
+
 ### `on_profile_update`
 
 _First introduced in Synapse v1.54.0_
diff --git a/docs/openid.md b/docs/openid.md
index 171ea3b712..19cacaafef 100644
--- a/docs/openid.md
+++ b/docs/openid.md
@@ -225,6 +225,8 @@ oidc_providers:
 3. Create an application for synapse in Authentik and link it to the provider.
 4. Note the slug of your application, Client ID and Client Secret.
 
+Note: RSA keys must be used for signing for Authentik, ECC keys do not work.
+
 Synapse config:
 ```yaml
 oidc_providers:
@@ -240,7 +242,7 @@ oidc_providers:
       - "email"
     user_mapping_provider:
       config:
-        localpart_template: "{{ user.preferred_username }}}"
+        localpart_template: "{{ user.preferred_username }}"
         display_name_template: "{{ user.preferred_username|capitalize }}" # TO BE FILLED: If your users have names in Authentik and you want those in Synapse, this should be replaced with user.name|capitalize.
 ```
 
diff --git a/docs/postgres.md b/docs/postgres.md
index 0562021da5..cbc32e1836 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -153,9 +153,9 @@ database file (typically `homeserver.db`) to another location. Once the
 copy is complete, restart synapse. For instance:
 
 ```sh
-./synctl stop
+synctl stop
 cp homeserver.db homeserver.db.snapshot
-./synctl start
+synctl start
 ```
 
 Copy the old config file into a new config file:
@@ -192,10 +192,10 @@ Once that has completed, change the synapse config to point at the
 PostgreSQL database configuration file `homeserver-postgres.yaml`:
 
 ```sh
-./synctl stop
+synctl stop
 mv homeserver.yaml homeserver-old-sqlite.yaml
 mv homeserver-postgres.yaml homeserver.yaml
-./synctl start
+synctl start
 ```
 
 Synapse should now be running against PostgreSQL.
@@ -234,12 +234,13 @@ host    all         all             ::1/128     ident
 ### Fixing incorrect `COLLATE` or `CTYPE`
 
 Synapse will refuse to set up a new database if it has the wrong values of
-`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
-different locales can cause issues if the locale library is updated from
+`COLLATE` and `CTYPE` set. Synapse will also refuse to start an existing database with incorrect values
+of `COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in the 
+`database` section of the config, is set to true. Using different locales can cause issues if the locale library is updated from
 underneath the database, or if a different version of the locale is used on any
 replicas.
 
-The safest way to fix the issue is to dump the database and recreate it with
+If you have a databse with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with
 the correct locale parameter (as shown above). It is also possible to change the
 parameters on a live database and run a `REINDEX` on the entire database,
 however extreme care must be taken to avoid database corruption.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 6f3623c88a..a21b48ab2e 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -783,6 +783,12 @@ caches:
 # 'txn_limit' gives the maximum number of transactions to run per connection
 # before reconnecting. Defaults to 0, which means no limit.
 #
+# 'allow_unsafe_locale' is an option specific to Postgres. Under the default behavior, Synapse will refuse to
+# start if the postgres db is set to a non-C locale. You can override this behavior (which is *not* recommended)
+# by setting 'allow_unsafe_locale' to true. Note that doing so may corrupt your database. You can find more information
+# here: https://matrix-org.github.io/synapse/latest/postgres.html#fixing-incorrect-collate-or-ctype and here:
+# https://wiki.postgresql.org/wiki/Locale_data_changes
+#
 # 'args' gives options which are passed through to the database engine,
 # except for options starting 'cp_', which are used to configure the Twisted
 # connection pool. For a reference to valid arguments, see:
@@ -1212,10 +1218,18 @@ oembed:
 # Registration can be rate-limited using the parameters in the "Ratelimiting"
 # section of this file.
 
-# Enable registration for new users.
+# Enable registration for new users. Defaults to 'false'. It is highly recommended that if you enable registration,
+# you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration
+# without any verification, you must also set `enable_registration_without_verification`, found below.
 #
 #enable_registration: false
 
+# Enable registration without email or captcha verification. Note: this option is *not* recommended,
+# as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect
+# unless `enable_registration` is also enabled.
+#
+#enable_registration_without_verification: true
+
 # Time that a user's session remains valid for, after they log in.
 #
 # Note that this is not currently compatible with guest logins.
@@ -1947,8 +1961,14 @@ saml2_config:
 #
 #             localpart_template: Jinja2 template for the localpart of the MXID.
 #                 If this is not set, the user will be prompted to choose their
-#                 own username (see 'sso_auth_account_details.html' in the 'sso'
-#                 section of this file).
+#                 own username (see the documentation for the
+#                 'sso_auth_account_details.html' template). This template can
+#                 use the 'localpart_from_email' filter.
+#
+#             confirm_localpart: Whether to prompt the user to validate (or
+#                 change) the generated localpart (see the documentation for the
+#                 'sso_auth_account_details.html' template), instead of
+#                 registering the account right away.
 #
 #             display_name_template: Jinja2 template for the display name to set
 #                 on first login. If unset, no displayname will be set.
@@ -2729,3 +2749,35 @@ redis:
   # Optional password if configured on the Redis instance
   #
   #password: <secret_password>
+
+
+## Background Updates ##
+
+# Background updates are database updates that are run in the background in batches.
+# The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to
+# sleep can all be configured. This is helpful to speed up or slow down the updates.
+#
+background_updates:
+    # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set
+    # a time to change the default.
+    #
+    #background_update_duration_ms: 500
+
+    # Whether to sleep between updates. Defaults to True. Uncomment to change the default.
+    #
+    #sleep_enabled: false
+
+    # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment
+    # and set a duration to change the default.
+    #
+    #sleep_duration_ms: 300
+
+    # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and
+    # set a size to change the default.
+    #
+    #min_batch_size: 10
+
+    # The batch size to use for the first iteration of a new background update. The default is 100.
+    # Uncomment and set a size to change the default.
+    #
+    #default_batch_size: 50
diff --git a/docs/templates.md b/docs/templates.md
index 2b66e9d862..f87692a453 100644
--- a/docs/templates.md
+++ b/docs/templates.md
@@ -36,6 +36,13 @@ Turns a `mxc://` URL for media content into an HTTP(S) one using the homeserver'
 
 Example: `message.sender_avatar_url|mxc_to_http(32,32)`
 
+```python
+localpart_from_email(address: str) -> str
+```
+
+Returns the local part of an email address (e.g. `alice` in `alice@example.com`).
+
+Example: `user.email_address|localpart_from_email`
 
 ## Email templates
 
@@ -176,8 +183,11 @@ Below are the templates Synapse will look for when generating pages related to S
              for the brand of the IdP
     * `user_attributes`: an object containing details about the user that
       we received from the IdP. May have the following attributes:
-        * display_name: the user's display_name
-        * emails: a list of email addresses
+        * `display_name`: the user's display name
+        * `emails`: a list of email addresses
+        * `localpart`: the local part of the Matrix user ID to register,
+          if `localpart_template` is set in the mapping provider configuration (empty
+          string if not)
   The template should render a form which submits the following fields:
     * `username`: the localpart of the user's chosen user id
 * `sso_new_user_consent.html`: HTML page allowing the user to consent to the
diff --git a/docs/turn-howto.md b/docs/turn-howto.md
index eba7ca6124..3a2cd04e36 100644
--- a/docs/turn-howto.md
+++ b/docs/turn-howto.md
@@ -238,8 +238,9 @@ After updating the homeserver configuration, you must restart synapse:
 
   * If you use synctl:
     ```sh
-    cd /where/you/run/synapse
-    ./synctl restart
+    # Depending on how Synapse is installed, synctl may already be on
+    # your PATH. If not, you may need to activate a virtual environment.
+    synctl restart
     ```
   * If you use systemd:
     ```sh
diff --git a/docs/upgrade.md b/docs/upgrade.md
index f9be3ac6bc..062e823333 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -47,7 +47,7 @@ this document.
     3.  Restart Synapse:
 
         ```bash
-        ./synctl restart
+        synctl restart
         ```
 
 To check whether your update was successful, you can check the running
@@ -85,6 +85,62 @@ process, for example:
     dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
     ```
 
+# Upgrading to v1.56.0
+
+## Groups/communities feature has been deprecated
+
+The non-standard groups/communities feature in Synapse has been deprecated and will
+be disabled by default in Synapse v1.58.0.
+
+You can test disabling it by adding the following to your homeserver configuration:
+
+```yaml
+experimental_features:
+  groups_enabled: false
+```
+
+## Change in behaviour for PostgreSQL databases with unsafe locale
+
+Synapse now refuses to start when using PostgreSQL with non-`C` values for `COLLATE` and
+`CTYPE` unless the config flag `allow_unsafe_locale`, found in the database section of
+the configuration file, is set to `true`. See the [PostgreSQL documentation](https://matrix-org.github.io/synapse/latest/postgres.html#fixing-incorrect-collate-or-ctype)
+for more information and instructions on how to fix a database with incorrect values.
+
+# Upgrading to v1.55.0
+
+## Open registration without verification is now disabled by default
+
+Synapse will refuse to start if registration is enabled without email, captcha, or token-based verification unless the new config 
+flag `enable_registration_without_verification` is set to "true".
+
+
+## `synctl` script has been moved
+
+The `synctl` script
+[has been made](https://github.com/matrix-org/synapse/pull/12140) an
+[entry point](https://packaging.python.org/en/latest/specifications/entry-points/)
+and no longer exists at the root of Synapse's source tree. If you wish to use
+`synctl` to manage your homeserver, you should invoke `synctl` directly, e.g. 
+`synctl start` instead of `./synctl start` or `/path/to/synctl start`. 
+
+You will need to ensure `synctl` is on your `PATH`.
+  - This is automatically the case when using
+    [Debian packages](https://packages.matrix.org/debian/) or
+    [docker images](https://hub.docker.com/r/matrixdotorg/synapse)
+    provided by Matrix.org.
+  - When installing from a wheel, sdist, or PyPI, a `synctl` executable is added 
+    to your Python installation's `bin`. This should be on your `PATH`
+    automatically, though you might need to activate a virtual environment
+    depending on how you installed Synapse.
+
+
+## Compatibility dropped for Mjolnir 1.3.1 and earlier
+
+Synapse v1.55.0 drops support for Mjolnir 1.3.1 and earlier.
+If you use the Mjolnir module to moderate your homeserver,
+please upgrade Mjolnir to version 1.3.2 or later before upgrading Synapse.
+
+
 # Upgrading to v1.54.0
 
 ## Legacy structured logging configuration removal
diff --git a/docs/workers.md b/docs/workers.md
index b0f8599ef0..8ac95e39bb 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -185,8 +185,8 @@ worker: refer to the [stream writers](#stream-writers) section below for further
 information.
 
     # Sync requests
-    ^/_matrix/client/(v2_alpha|r0|v3)/sync$
-    ^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$
+    ^/_matrix/client/(r0|v3)/sync$
+    ^/_matrix/client/(api/v1|r0|v3)/events$
     ^/_matrix/client/(api/v1|r0|v3)/initialSync$
     ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$
 
@@ -200,13 +200,9 @@ information.
     ^/_matrix/federation/v1/query/
     ^/_matrix/federation/v1/make_join/
     ^/_matrix/federation/v1/make_leave/
-    ^/_matrix/federation/v1/send_join/
-    ^/_matrix/federation/v2/send_join/
-    ^/_matrix/federation/v1/send_leave/
-    ^/_matrix/federation/v2/send_leave/
-    ^/_matrix/federation/v1/invite/
-    ^/_matrix/federation/v2/invite/
-    ^/_matrix/federation/v1/query_auth/
+    ^/_matrix/federation/(v1|v2)/send_join/
+    ^/_matrix/federation/(v1|v2)/send_leave/
+    ^/_matrix/federation/(v1|v2)/invite/
     ^/_matrix/federation/v1/event_auth/
     ^/_matrix/federation/v1/exchange_third_party_invite/
     ^/_matrix/federation/v1/user/devices/
@@ -274,6 +270,8 @@ information.
 Additionally, the following REST endpoints can be handled for GET requests:
 
     ^/_matrix/federation/v1/groups/
+    ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
+    ^/_matrix/client/(r0|v3|unstable)/groups/
 
 Pagination requests can also be handled, but all requests for a given
 room must be routed to the same instance. Additionally, care must be taken to
@@ -351,8 +349,11 @@ is only supported with Redis-based replication.)
 
 To enable this, the worker must have a HTTP replication listener configured,
 have a `worker_name` and be listed in the `instance_map` config. The same worker
-can handle multiple streams. For example, to move event persistence off to a
-dedicated worker, the shared configuration would include:
+can handle multiple streams, but unless otherwise documented, each stream can only
+have a single writer.
+
+For example, to move event persistence off to a dedicated worker, the shared
+configuration would include:
 
 ```yaml
 instance_map:
@@ -370,8 +371,8 @@ streams and the endpoints associated with them:
 
 ##### The `events` stream
 
-The `events` stream also experimentally supports having multiple writers, where
-work is sharded between them by room ID. Note that you *must* restart all worker
+The `events` stream experimentally supports having multiple writers, where work
+is sharded between them by room ID. Note that you *must* restart all worker
 instances when adding or removing event persisters. An example `stream_writers`
 configuration with multiple writers:
 
@@ -384,38 +385,38 @@ stream_writers:
 
 ##### The `typing` stream
 
-The following endpoints should be routed directly to the workers configured as
-stream writers for the `typing` stream:
+The following endpoints should be routed directly to the worker configured as
+the stream writer for the `typing` stream:
 
     ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing
 
 ##### The `to_device` stream
 
-The following endpoints should be routed directly to the workers configured as
-stream writers for the `to_device` stream:
+The following endpoints should be routed directly to the worker configured as
+the stream writer for the `to_device` stream:
 
-    ^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/
+    ^/_matrix/client/(r0|v3|unstable)/sendToDevice/
 
 ##### The `account_data` stream
 
-The following endpoints should be routed directly to the workers configured as
-stream writers for the `account_data` stream:
+The following endpoints should be routed directly to the worker configured as
+the stream writer for the `account_data` stream:
 
-    ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags
-    ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data
+    ^/_matrix/client/(r0|v3|unstable)/.*/tags
+    ^/_matrix/client/(r0|v3|unstable)/.*/account_data
 
 ##### The `receipts` stream
 
-The following endpoints should be routed directly to the workers configured as
-stream writers for the `receipts` stream:
+The following endpoints should be routed directly to the worker configured as
+the stream writer for the `receipts` stream:
 
-    ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt
-    ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers
+    ^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt
+    ^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers
 
 ##### The `presence` stream
 
-The following endpoints should be routed directly to the workers configured as
-stream writers for the `presence` stream:
+The following endpoints should be routed directly to the worker configured as
+the stream writer for the `presence` stream:
 
     ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
 
@@ -525,19 +526,28 @@ Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for
 Handles searches in the user directory. It can handle REST endpoints matching
 the following regular expressions:
 
-    ^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$
+    ^/_matrix/client/(r0|v3|unstable)/user_directory/search$
 
-When using this worker you must also set `update_user_directory: False` in the
+When using this worker you must also set `update_user_directory: false` in the
 shared configuration file to stop the main synapse running background
 jobs related to updating the user directory.
 
+Above endpoint is not *required* to be routed to this worker. By default,
+`update_user_directory` is set to `true`, which means the main process
+will handle updates. All workers configured with `client` can handle the above
+endpoint as long as either this worker or the main process are configured to
+handle it, and are online.
+
+If `update_user_directory` is set to `false`, and this worker is not running,
+the above endpoint may give outdated results.
+
 ### `synapse.app.frontend_proxy`
 
 Proxies some frequently-requested client endpoints to add caching and remove
 load from the main synapse. It can handle REST endpoints matching the following
 regular expressions:
 
-    ^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload
+    ^/_matrix/client/(r0|v3|unstable)/keys/upload
 
 If `use_presence` is False in the homeserver config, it can also handle REST
 endpoints matching the following regular expressions:
diff --git a/mypy.ini b/mypy.ini
index 23ca4eaa5a..24d4ba15d4 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -11,7 +11,7 @@ local_partial_types = True
 no_implicit_optional = True
 
 files =
-  scripts-dev/sign_json,
+  scripts-dev/,
   setup.py,
   synapse/,
   tests/
@@ -23,18 +23,25 @@ files =
 # https://docs.python.org/3/library/re.html#re.X
 exclude = (?x)
   ^(
+   |scripts-dev/build_debian_packages.py
+   |scripts-dev/check_signature.py
+   |scripts-dev/definitions.py
+   |scripts-dev/federation_client.py
+   |scripts-dev/hash_history.py
+   |scripts-dev/list_url_patterns.py
+   |scripts-dev/release.py
+   |scripts-dev/tail-synapse.py
+
    |synapse/_scripts/export_signing_key.py
    |synapse/_scripts/move_remote_media_to_new_store.py
    |synapse/_scripts/synapse_port_db.py
    |synapse/_scripts/update_synapse_database.py
+
    |synapse/storage/databases/__init__.py
    |synapse/storage/databases/main/__init__.py
    |synapse/storage/databases/main/cache.py
    |synapse/storage/databases/main/devices.py
    |synapse/storage/databases/main/event_federation.py
-   |synapse/storage/databases/main/group_server.py
-   |synapse/storage/databases/main/metrics.py
-   |synapse/storage/databases/main/monthly_active_users.py
    |synapse/storage/databases/main/push_rule.py
    |synapse/storage/databases/main/receipts.py
    |synapse/storage/databases/main/roommember.py
@@ -56,14 +63,6 @@ exclude = (?x)
    |tests/federation/test_federation_server.py
    |tests/federation/transport/test_knocking.py
    |tests/federation/transport/test_server.py
-   |tests/handlers/test_cas.py
-   |tests/handlers/test_directory.py
-   |tests/handlers/test_e2e_keys.py
-   |tests/handlers/test_federation.py
-   |tests/handlers/test_oidc.py
-   |tests/handlers/test_presence.py
-   |tests/handlers/test_profile.py
-   |tests/handlers/test_saml.py
    |tests/handlers/test_typing.py
    |tests/http/federation/test_matrix_federation_agent.py
    |tests/http/federation/test_srv_resolver.py
@@ -75,24 +74,15 @@ exclude = (?x)
    |tests/logging/test_terse_json.py
    |tests/module_api/test_api.py
    |tests/push/test_email.py
-   |tests/push/test_http.py
    |tests/push/test_presentable_names.py
    |tests/push/test_push_rule_evaluator.py
    |tests/rest/client/test_transactions.py
-   |tests/rest/key/v2/test_remote_key_resource.py
-   |tests/rest/media/v1/test_base.py
    |tests/rest/media/v1/test_media_storage.py
-   |tests/rest/media/v1/test_url_preview.py
    |tests/scripts/test_new_matrix_user.py
    |tests/server.py
    |tests/server_notices/test_resource_limits_server_notices.py
    |tests/state/test_v2.py
-   |tests/storage/test_background_update.py
    |tests/storage/test_base.py
-   |tests/storage/test_client_ips.py
-   |tests/storage/test_database.py
-   |tests/storage/test_event_federation.py
-   |tests/storage/test_id_generators.py
    |tests/storage/test_roommember.py
    |tests/test_metrics.py
    |tests/test_phone_home.py
@@ -244,10 +234,7 @@ disallow_untyped_defs = True
 [mypy-tests.storage.test_user_directory]
 disallow_untyped_defs = True
 
-[mypy-tests.rest.admin.*]
-disallow_untyped_defs = True
-
-[mypy-tests.rest.client.*]
+[mypy-tests.rest.*]
 disallow_untyped_defs = True
 
 [mypy-tests.federation.transport.test_client]
@@ -348,3 +335,6 @@ ignore_missing_imports = True
 
 [mypy-zope]
 ignore_missing_imports = True
+
+[mypy-incremental.*]
+ignore_missing_imports = True
diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages.py
index 7ff96a1ee6..7ff96a1ee6 100755
--- a/scripts-dev/build_debian_packages
+++ b/scripts-dev/build_debian_packages.py
diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment.sh
index 493558ad65..493558ad65 100755
--- a/scripts-dev/check-newsfragment
+++ b/scripts-dev/check-newsfragment.sh
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index 0aecb3daf1..0a79a4063f 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -71,4 +71,4 @@ fi
 
 # Run the tests!
 echo "Images built; running complement"
-go test -v -tags synapse_blacklist,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
+go test -v -tags synapse_blacklist,msc2403,msc2716,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
diff --git a/scripts-dev/generate_sample_config b/scripts-dev/generate_sample_config.sh
index 185e277933..375897eacb 100755
--- a/scripts-dev/generate_sample_config
+++ b/scripts-dev/generate_sample_config.sh
@@ -15,11 +15,11 @@ check() {
 
 if [ "$1" == "--check" ]; then
     diff -u "$SAMPLE_CONFIG" <(synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml) >/dev/null || {
-        echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
+        echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2
         exit 1
     }
     diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || {
-        echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
+        echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2
         exit 1
     }
 else
diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index df4d4934d0..c063fafa97 100755
--- a/scripts-dev/lint.sh
+++ b/scripts-dev/lint.sh
@@ -85,9 +85,7 @@ else
           "synapse" "docker" "tests"
           # annoyingly, black doesn't find these so we have to list them
           "scripts-dev"
-          "scripts-dev/build_debian_packages"
-          "scripts-dev/sign_json"
-          "contrib" "synctl" "setup.py" "synmark" "stubs" ".ci"
+          "contrib" "setup.py" "synmark" "stubs" ".ci"
       )
   fi
 fi
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index 4e1f99fee4..685fa32b03 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -17,6 +17,8 @@
 """An interactive script for doing a release. See `cli()` below.
 """
 
+import glob
+import os
 import re
 import subprocess
 import sys
@@ -64,11 +66,15 @@ def cli():
 
         ./scripts-dev/release.py tag
 
-        # ... wait for asssets to build ...
+        # ... wait for assets to build ...
 
         ./scripts-dev/release.py publish
         ./scripts-dev/release.py upload
 
+        # Optional: generate some nice links for the announcement
+
+        ./scripts-dev/release.py upload
+
     If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
     `tag`/`publish` command, then a new draft release will be created/published.
     """
@@ -209,8 +215,8 @@ def prepare():
     with open("synapse/__init__.py", "w") as f:
         f.write(parsed_synapse_ast.dumps())
 
-    # Generate changelogs
-    run_until_successful("python3 -m towncrier", shell=True)
+    # Generate changelogs.
+    generate_and_write_changelog(current_version)
 
     # Generate debian changelogs
     if parsed_new_version.pre is not None:
@@ -413,6 +419,41 @@ def upload():
     )
 
 
+@cli.command()
+def announce():
+    """Generate markdown to announce the release."""
+
+    current_version, _, _ = parse_version_from_module()
+    tag_name = f"v{current_version}"
+
+    click.echo(
+        f"""
+Hi everyone. Synapse {current_version} has just been released.
+
+[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) |\
+[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \
+[debs](https://packages.matrix.org/debian/) | \
+[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)"""
+    )
+
+    if "rc" in tag_name:
+        click.echo(
+            """
+Announce the RC in
+- #homeowners:matrix.org (Synapse Announcements)
+- #synapse-dev:matrix.org"""
+        )
+    else:
+        click.echo(
+            """
+Announce the release in
+- #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic
+- #synapse:matrix.org (Synapse Admins), bumping the version in the topic
+- #synapse-dev:matrix.org
+- #synapse-package-maintainers:matrix.org"""
+        )
+
+
 def parse_version_from_module() -> Tuple[
     version.Version, redbaron.RedBaron, redbaron.Node
 ]:
@@ -523,5 +564,29 @@ def get_changes_for_version(wanted_version: version.Version) -> str:
     return "\n".join(version_changelog)
 
 
+def generate_and_write_changelog(current_version: version.Version):
+    # We do this by getting a draft so that we can edit it before writing to the
+    # changelog.
+    result = run_until_successful(
+        "python3 -m towncrier --draft", shell=True, capture_output=True
+    )
+    new_changes = result.stdout.decode("utf-8")
+    new_changes = new_changes.replace(
+        "No significant changes.", f"No significant changes since {current_version}."
+    )
+
+    # Prepend changes to changelog
+    with open("CHANGES.md", "r+") as f:
+        existing_content = f.read()
+        f.seek(0, 0)
+        f.write(new_changes)
+        f.write("\n")
+        f.write(existing_content)
+
+    # Remove all the news fragments
+    for f in glob.iglob("changelog.d/*.*"):
+        os.remove(f)
+
+
 if __name__ == "__main__":
     cli()
diff --git a/scripts-dev/sign_json b/scripts-dev/sign_json.py
index 9459543106..9459543106 100755
--- a/scripts-dev/sign_json
+++ b/scripts-dev/sign_json.py
diff --git a/setup.py b/setup.py
index 318df16766..63da71ad7b 100755
--- a/setup.py
+++ b/setup.py
@@ -108,6 +108,7 @@ CONDITIONAL_REQUIREMENTS["mypy"] = [
     "types-jsonschema>=3.2.0",
     "types-opentracing>=2.4.2",
     "types-Pillow>=8.3.4",
+    "types-psycopg2>=2.9.9",
     "types-pyOpenSSL>=20.0.7",
     "types-PyYAML>=5.4.10",
     "types-requests>=2.26.0",
@@ -155,6 +156,7 @@ setup(
             # Application
             "synapse_homeserver = synapse.app.homeserver:main",
             "synapse_worker = synapse.app.generic_worker:main",
+            "synctl = synapse._scripts.synctl:main",
             # Scripts
             "export_signing_key = synapse._scripts.export_signing_key:main",
             "generate_config = synapse._scripts.generate_config:main",
@@ -177,6 +179,5 @@ setup(
         "Programming Language :: Python :: 3.9",
         "Programming Language :: Python :: 3.10",
     ],
-    scripts=["synctl"],
     cmdclass={"test": TestCommand},
 )
diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi
index 429234d7ae..2d8ca018fb 100644
--- a/stubs/txredisapi.pyi
+++ b/stubs/txredisapi.pyi
@@ -20,7 +20,7 @@ from twisted.internet import protocol
 from twisted.internet.defer import Deferred
 
 class RedisProtocol(protocol.Protocol):
-    def publish(self, channel: str, message: bytes): ...
+    def publish(self, channel: str, message: bytes) -> "Deferred[None]": ...
     def ping(self) -> "Deferred[None]": ...
     def set(
         self,
@@ -52,11 +52,14 @@ def lazyConnection(
     convertNumbers: bool = ...,
 ) -> RedisProtocol: ...
 
-class ConnectionHandler: ...
+# ConnectionHandler doesn't actually inherit from RedisProtocol, but it proxies
+# most methods to it via ConnectionHandler.__getattr__.
+class ConnectionHandler(RedisProtocol):
+    def disconnect(self) -> "Deferred[None]": ...
 
 class RedisFactory(protocol.ReconnectingClientFactory):
     continueTrying: bool
-    handler: RedisProtocol
+    handler: ConnectionHandler
     pool: List[RedisProtocol]
     replyTimeout: Optional[int]
     def __init__(
diff --git a/synapse/__init__.py b/synapse/__init__.py
index b21e1ed0f3..88aef1889c 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -25,6 +25,27 @@ if sys.version_info < (3, 7):
     print("Synapse requires Python 3.7 or above.")
     sys.exit(1)
 
+# Allow using the asyncio reactor via env var.
+if bool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", False)):
+    try:
+        from incremental import Version
+
+        import twisted
+
+        # We need a bugfix that is included in Twisted 21.2.0:
+        # https://twistedmatrix.com/trac/ticket/9787
+        if twisted.version < Version("Twisted", 21, 2, 0):
+            print("Using asyncio reactor requires Twisted>=21.2.0")
+            sys.exit(1)
+
+        import asyncio
+
+        from twisted.internet import asyncioreactor
+
+        asyncioreactor.install(asyncio.get_event_loop())
+    except ImportError:
+        pass
+
 # Twisted and canonicaljson will fail to import when this file is executed to
 # get the __version__ during a fresh install. That's OK and subsequent calls to
 # actually start Synapse will import these libraries fine.
@@ -47,7 +68,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.54.0rc1"
+__version__ = "1.55.2"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
diff --git a/synctl b/synapse/_scripts/synctl.py
index 1ab36949c7..1ab36949c7 100755
--- a/synctl
+++ b/synapse/_scripts/synctl.py
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 36ace7c613..b0c08a074d 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -178,7 +178,9 @@ class RelationTypes:
     ANNOTATION: Final = "m.annotation"
     REPLACE: Final = "m.replace"
     REFERENCE: Final = "m.reference"
-    THREAD: Final = "io.element.thread"
+    THREAD: Final = "m.thread"
+    # TODO Remove this in Synapse >= v1.57.0.
+    UNSTABLE_THREAD: Final = "io.element.thread"
 
 
 class LimitBlockingTypes:
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index cb532d7238..27e97d6f37 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -88,7 +88,9 @@ ROOM_EVENT_FILTER_SCHEMA = {
         "org.matrix.labels": {"type": "array", "items": {"type": "string"}},
         "org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
         # MSC3440, filtering by event relations.
+        "related_by_senders": {"type": "array", "items": {"type": "string"}},
         "io.element.relation_senders": {"type": "array", "items": {"type": "string"}},
+        "related_by_rel_types": {"type": "array", "items": {"type": "string"}},
         "io.element.relation_types": {"type": "array", "items": {"type": "string"}},
     },
 }
@@ -318,19 +320,18 @@ class Filter:
         self.labels = filter_json.get("org.matrix.labels", None)
         self.not_labels = filter_json.get("org.matrix.not_labels", [])
 
-        # Ideally these would be rejected at the endpoint if they were provided
-        # and not supported, but that would involve modifying the JSON schema
-        # based on the homeserver configuration.
+        self.related_by_senders = self.filter_json.get("related_by_senders", None)
+        self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None)
+
+        # Fallback to the unstable prefix if the stable version is not given.
         if hs.config.experimental.msc3440_enabled:
-            self.relation_senders = self.filter_json.get(
+            self.related_by_senders = self.related_by_senders or self.filter_json.get(
                 "io.element.relation_senders", None
             )
-            self.relation_types = self.filter_json.get(
-                "io.element.relation_types", None
+            self.related_by_rel_types = (
+                self.related_by_rel_types
+                or self.filter_json.get("io.element.relation_types", None)
             )
-        else:
-            self.relation_senders = None
-            self.relation_types = None
 
     def filters_all_types(self) -> bool:
         return "*" in self.not_types
@@ -461,7 +462,7 @@ class Filter:
         event_ids = [event.event_id for event in events if isinstance(event, EventBase)]  # type: ignore[attr-defined]
         event_ids_to_keep = set(
             await self._store.events_have_relations(
-                event_ids, self.relation_senders, self.relation_types
+                event_ids, self.related_by_senders, self.related_by_rel_types
             )
         )
 
@@ -474,7 +475,7 @@ class Filter:
     async def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
         result = [event for event in events if self._check(event)]
 
-        if self.relation_senders or self.relation_types:
+        if self.related_by_senders or self.related_by_rel_types:
             return await self._check_event_relations(result)
 
         return result
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 1536a42723..b6f510ed30 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -322,7 +322,8 @@ class GenericWorkerServer(HomeServer):
 
                     presence.register_servlets(self, resource)
 
-                    groups.register_servlets(self, resource)
+                    if self.config.experimental.groups_enabled:
+                        groups.register_servlets(self, resource)
 
                     resources.update({CLIENT_API_PREFIX: resource})
 
@@ -417,7 +418,7 @@ class GenericWorkerServer(HomeServer):
             else:
                 logger.warning("Unsupported listener type: %s", listener.type)
 
-        self.get_tcp_replication().start_replication(self)
+        self.get_replication_command_handler().start_replication(self)
 
 
 def start(config_options: List[str]) -> None:
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index a6789a840e..0f75e7b9d4 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -261,7 +261,10 @@ class SynapseHomeServer(HomeServer):
             resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
 
         if name == "metrics" and self.config.metrics.enable_metrics:
-            resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
+            metrics_resource: Resource = MetricsResource(RegistryProxy)
+            if compress:
+                metrics_resource = gz_wrap(metrics_resource)
+            resources[METRICS_PREFIX] = metrics_resource
 
         if name == "replication":
             resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
@@ -273,7 +276,7 @@ class SynapseHomeServer(HomeServer):
             # If redis is enabled we connect via the replication command handler
             # in the same way as the workers (since we're effectively a client
             # rather than a server).
-            self.get_tcp_replication().start_replication(self)
+            self.get_replication_command_handler().start_replication(self)
 
         for listener in self.config.server.listeners:
             if listener.type == "http":
@@ -348,6 +351,23 @@ def setup(config_options: List[str]) -> SynapseHomeServer:
     if config.server.gc_seconds:
         synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
 
+    if (
+        config.registration.enable_registration
+        and not config.registration.enable_registration_without_verification
+    ):
+        if (
+            not config.captcha.enable_registration_captcha
+            and not config.registration.registrations_require_3pid
+            and not config.registration.registration_requires_token
+        ):
+
+            raise ConfigError(
+                "You have enabled open registration without any verification. This is a known vector for "
+                "spam and abuse. If you would like to allow public registration, please consider adding email, "
+                "captcha, or token-based verification. Otherwise this check can be removed by setting the "
+                "`enable_registration_without_verification` config option to `true`."
+            )
+
     hs = SynapseHomeServer(
         config.server.server_name,
         config=config,
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 4d3f8e4923..07ec95f1d6 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -175,27 +175,14 @@ class ApplicationService:
             return namespace.exclusive
         return False
 
-    async def _matches_user(self, event: EventBase, store: "DataStore") -> bool:
-        if self.is_interested_in_user(event.sender):
-            return True
-
-        # also check m.room.member state key
-        if event.type == EventTypes.Member and self.is_interested_in_user(
-            event.state_key
-        ):
-            return True
-
-        does_match = await self.matches_user_in_member_list(event.room_id, store)
-        return does_match
-
     @cached(num_args=1, cache_context=True)
-    async def matches_user_in_member_list(
+    async def _matches_user_in_member_list(
         self,
         room_id: str,
         store: "DataStore",
         cache_context: _CacheContext,
     ) -> bool:
-        """Check if this service is interested a room based upon it's membership
+        """Check if this service is interested a room based upon its membership
 
         Args:
             room_id: The room to check.
@@ -214,47 +201,110 @@ class ApplicationService:
                 return True
         return False
 
-    def _matches_room_id(self, event: EventBase) -> bool:
-        if hasattr(event, "room_id"):
-            return self.is_interested_in_room(event.room_id)
-        return False
+    def is_interested_in_user(
+        self,
+        user_id: str,
+    ) -> bool:
+        """
+        Returns whether the application is interested in a given user ID.
+
+        The appservice is considered to be interested in a user if either: the
+        user ID is in the appservice's user namespace, or if the user is the
+        appservice's configured sender_localpart.
+
+        Args:
+            user_id: The ID of the user to check.
+
+        Returns:
+            True if the application service is interested in the user, False if not.
+        """
+        return (
+            # User is the appservice's sender_localpart user
+            user_id == self.sender
+            # User is in the appservice's user namespace
+            or self.is_user_in_namespace(user_id)
+        )
+
+    @cached(num_args=1, cache_context=True)
+    async def is_interested_in_room(
+        self,
+        room_id: str,
+        store: "DataStore",
+        cache_context: _CacheContext,
+    ) -> bool:
+        """
+        Returns whether the application service is interested in a given room ID.
+
+        The appservice is considered to be interested in the room if either: the ID or one
+        of the aliases of the room is in the appservice's room ID or alias namespace
+        respectively, or if one of the members of the room fall into the appservice's user
+        namespace.
 
-    async def _matches_aliases(self, event: EventBase, store: "DataStore") -> bool:
-        alias_list = await store.get_aliases_for_room(event.room_id)
+        Args:
+            room_id: The ID of the room to check.
+            store: The homeserver's datastore class.
+
+        Returns:
+            True if the application service is interested in the room, False if not.
+        """
+        # Check if we have interest in this room ID
+        if self.is_room_id_in_namespace(room_id):
+            return True
+
+        # likewise with the room's aliases (if it has any)
+        alias_list = await store.get_aliases_for_room(room_id)
         for alias in alias_list:
-            if self.is_interested_in_alias(alias):
+            if self.is_room_alias_in_namespace(alias):
                 return True
 
-        return False
+        # And finally, perform an expensive check on whether any of the
+        # users in the room match the appservice's user namespace
+        return await self._matches_user_in_member_list(
+            room_id, store, on_invalidate=cache_context.invalidate
+        )
 
-    async def is_interested(self, event: EventBase, store: "DataStore") -> bool:
+    @cached(num_args=1, cache_context=True)
+    async def is_interested_in_event(
+        self,
+        event_id: str,
+        event: EventBase,
+        store: "DataStore",
+        cache_context: _CacheContext,
+    ) -> bool:
         """Check if this service is interested in this event.
 
         Args:
+            event_id: The ID of the event to check. This is purely used for simplifying the
+                caching of calls to this method.
             event: The event to check.
             store: The datastore to query.
 
         Returns:
-            True if this service would like to know about this event.
+            True if this service would like to know about this event, otherwise False.
         """
-        # Do cheap checks first
-        if self._matches_room_id(event):
+        # Check if we're interested in this event's sender by namespace (or if they're the
+        # sender_localpart user)
+        if self.is_interested_in_user(event.sender):
             return True
 
-        # This will check the namespaces first before
-        # checking the store, so should be run before _matches_aliases
-        if await self._matches_user(event, store):
+        # additionally, if this is a membership event, perform the same checks on
+        # the user it references
+        if event.type == EventTypes.Member and self.is_interested_in_user(
+            event.state_key
+        ):
             return True
 
-        # This will check the store, so should be run last
-        if await self._matches_aliases(event, store):
+        # This will check the datastore, so should be run last
+        if await self.is_interested_in_room(
+            event.room_id, store, on_invalidate=cache_context.invalidate
+        ):
             return True
 
         return False
 
-    @cached(num_args=1)
+    @cached(num_args=1, cache_context=True)
     async def is_interested_in_presence(
-        self, user_id: UserID, store: "DataStore"
+        self, user_id: UserID, store: "DataStore", cache_context: _CacheContext
     ) -> bool:
         """Check if this service is interested a user's presence
 
@@ -272,20 +322,19 @@ class ApplicationService:
 
         # Then find out if the appservice is interested in any of those rooms
         for room_id in room_ids:
-            if await self.matches_user_in_member_list(room_id, store):
+            if await self.is_interested_in_room(
+                room_id, store, on_invalidate=cache_context.invalidate
+            ):
                 return True
         return False
 
-    def is_interested_in_user(self, user_id: str) -> bool:
-        return (
-            bool(self._matches_regex(ApplicationService.NS_USERS, user_id))
-            or user_id == self.sender
-        )
+    def is_user_in_namespace(self, user_id: str) -> bool:
+        return bool(self._matches_regex(ApplicationService.NS_USERS, user_id))
 
-    def is_interested_in_alias(self, alias: str) -> bool:
+    def is_room_alias_in_namespace(self, alias: str) -> bool:
         return bool(self._matches_regex(ApplicationService.NS_ALIASES, alias))
 
-    def is_interested_in_room(self, room_id: str) -> bool:
+    def is_room_id_in_namespace(self, room_id: str) -> bool:
         return bool(self._matches_regex(ApplicationService.NS_ROOMS, room_id))
 
     def is_exclusive_user(self, user_id: str) -> bool:
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index a0ea958af6..98fe354014 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -25,7 +25,7 @@ from synapse.appservice import (
     TransactionUnusedFallbackKeys,
 )
 from synapse.events import EventBase
-from synapse.events.utils import serialize_event
+from synapse.events.utils import SerializeEventConfig, serialize_event
 from synapse.http.client import SimpleHttpClient
 from synapse.types import JsonDict, ThirdPartyInstanceID
 from synapse.util.caches.response_cache import ResponseCache
@@ -321,16 +321,18 @@ class ApplicationServiceApi(SimpleHttpClient):
             serialize_event(
                 e,
                 time_now,
-                as_client_event=True,
-                # If this is an invite or a knock membership event, and we're interested
-                # in this user, then include any stripped state alongside the event.
-                include_stripped_room_state=(
-                    e.type == EventTypes.Member
-                    and (
-                        e.membership == Membership.INVITE
-                        or e.membership == Membership.KNOCK
-                    )
-                    and service.is_interested_in_user(e.state_key)
+                config=SerializeEventConfig(
+                    as_client_event=True,
+                    # If this is an invite or a knock membership event, and we're interested
+                    # in this user, then include any stripped state alongside the event.
+                    include_stripped_room_state=(
+                        e.type == EventTypes.Member
+                        and (
+                            e.membership == Membership.INVITE
+                            or e.membership == Membership.KNOCK
+                        )
+                        and service.is_interested_in_user(e.state_key)
+                    ),
                 ),
             )
             for e in events
diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
index 1eb5f5a68c..363d8b4554 100644
--- a/synapse/config/_base.pyi
+++ b/synapse/config/_base.pyi
@@ -19,6 +19,7 @@ from synapse.config import (
     api,
     appservice,
     auth,
+    background_updates,
     cache,
     captcha,
     cas,
@@ -113,6 +114,7 @@ class RootConfig:
     caches: cache.CacheConfig
     federation: federation.FederationConfig
     retention: retention.RetentionConfig
+    background_updates: background_updates.BackgroundUpdateConfig
 
     config_classes: List[Type["Config"]] = ...
     def __init__(self) -> None: ...
diff --git a/synapse/config/background_updates.py b/synapse/config/background_updates.py
new file mode 100644
index 0000000000..f6cdeacc4b
--- /dev/null
+++ b/synapse/config/background_updates.py
@@ -0,0 +1,68 @@
+# Copyright 2022 Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import Config
+
+
+class BackgroundUpdateConfig(Config):
+    section = "background_updates"
+
+    def generate_config_section(self, **kwargs) -> str:
+        return """\
+        ## Background Updates ##
+
+        # Background updates are database updates that are run in the background in batches.
+        # The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to
+        # sleep can all be configured. This is helpful to speed up or slow down the updates.
+        #
+        background_updates:
+            # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set
+            # a time to change the default.
+            #
+            #background_update_duration_ms: 500
+
+            # Whether to sleep between updates. Defaults to True. Uncomment to change the default.
+            #
+            #sleep_enabled: false
+
+            # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment
+            # and set a duration to change the default.
+            #
+            #sleep_duration_ms: 300
+
+            # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and
+            # set a size to change the default.
+            #
+            #min_batch_size: 10
+
+            # The batch size to use for the first iteration of a new background update. The default is 100.
+            # Uncomment and set a size to change the default.
+            #
+            #default_batch_size: 50
+        """
+
+    def read_config(self, config, **kwargs) -> None:
+        bg_update_config = config.get("background_updates") or {}
+
+        self.update_duration_ms = bg_update_config.get(
+            "background_update_duration_ms", 100
+        )
+
+        self.sleep_enabled = bg_update_config.get("sleep_enabled", True)
+
+        self.sleep_duration_ms = bg_update_config.get("sleep_duration_ms", 1000)
+
+        self.min_batch_size = bg_update_config.get("min_batch_size", 1)
+
+        self.default_batch_size = bg_update_config.get("default_batch_size", 100)
diff --git a/synapse/config/database.py b/synapse/config/database.py
index 06ccf15cd9..d7f2219f53 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -37,6 +37,12 @@ DEFAULT_CONFIG = """\
 # 'txn_limit' gives the maximum number of transactions to run per connection
 # before reconnecting. Defaults to 0, which means no limit.
 #
+# 'allow_unsafe_locale' is an option specific to Postgres. Under the default behavior, Synapse will refuse to
+# start if the postgres db is set to a non-C locale. You can override this behavior (which is *not* recommended)
+# by setting 'allow_unsafe_locale' to true. Note that doing so may corrupt your database. You can find more information
+# here: https://matrix-org.github.io/synapse/latest/postgres.html#fixing-incorrect-collate-or-ctype and here:
+# https://wiki.postgresql.org/wiki/Locale_data_changes
+#
 # 'args' gives options which are passed through to the database engine,
 # except for options starting 'cp_', which are used to configure the Twisted
 # connection pool. For a reference to valid arguments, see:
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 41338b39df..064db4487c 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -74,3 +74,6 @@ class ExperimentalConfig(Config):
 
         # MSC3720 (Account status endpoint)
         self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
+
+        # The deprecated groups feature.
+        self.groups_enabled: bool = experimental.get("groups_enabled", True)
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 001605c265..a4ec706908 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -16,6 +16,7 @@ from .account_validity import AccountValidityConfig
 from .api import ApiConfig
 from .appservice import AppServiceConfig
 from .auth import AuthConfig
+from .background_updates import BackgroundUpdateConfig
 from .cache import CacheConfig
 from .captcha import CaptchaConfig
 from .cas import CasConfig
@@ -99,4 +100,5 @@ class HomeServerConfig(RootConfig):
         WorkerConfig,
         RedisConfig,
         ExperimentalConfig,
+        BackgroundUpdateConfig,
     ]
diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index f7e4f9ef22..5d571651cb 100644
--- a/synapse/config/oidc.py
+++ b/synapse/config/oidc.py
@@ -182,8 +182,14 @@ class OIDCConfig(Config):
         #
         #             localpart_template: Jinja2 template for the localpart of the MXID.
         #                 If this is not set, the user will be prompted to choose their
-        #                 own username (see 'sso_auth_account_details.html' in the 'sso'
-        #                 section of this file).
+        #                 own username (see the documentation for the
+        #                 'sso_auth_account_details.html' template). This template can
+        #                 use the 'localpart_from_email' filter.
+        #
+        #             confirm_localpart: Whether to prompt the user to validate (or
+        #                 change) the generated localpart (see the documentation for the
+        #                 'sso_auth_account_details.html' template), instead of
+        #                 registering the account right away.
         #
         #             display_name_template: Jinja2 template for the display name to set
         #                 on first login. If unset, no displayname will be set.
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index ea9b50fe97..40fb329a7f 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -33,6 +33,10 @@ class RegistrationConfig(Config):
                 str(config["disable_registration"])
             )
 
+        self.enable_registration_without_verification = strtobool(
+            str(config.get("enable_registration_without_verification", False))
+        )
+
         self.registrations_require_3pid = config.get("registrations_require_3pid", [])
         self.allowed_local_3pids = config.get("allowed_local_3pids", [])
         self.enable_3pid_lookup = config.get("enable_3pid_lookup", True)
@@ -207,10 +211,18 @@ class RegistrationConfig(Config):
         # Registration can be rate-limited using the parameters in the "Ratelimiting"
         # section of this file.
 
-        # Enable registration for new users.
+        # Enable registration for new users. Defaults to 'false'. It is highly recommended that if you enable registration,
+        # you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration
+        # without any verification, you must also set `enable_registration_without_verification`, found below.
         #
         #enable_registration: false
 
+        # Enable registration without email or captcha verification. Note: this option is *not* recommended,
+        # as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect
+        # unless `enable_registration` is also enabled.
+        #
+        #enable_registration_without_verification: true
+
         # Time that a user's session remains valid for, after they log in.
         #
         # Note that this is not currently compatible with guest logins.
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 49cd0a4f19..38de4b8000 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -676,6 +676,10 @@ class ServerConfig(Config):
         ):
             raise ConfigError("'custom_template_directory' must be a string")
 
+        self.use_account_validity_in_account_status: bool = (
+            config.get("use_account_validity_in_account_status") or False
+        )
+
     def has_tls_listener(self) -> bool:
         return any(listener.tls for listener in self.listeners)
 
diff --git a/synapse/config/spam_checker.py b/synapse/config/spam_checker.py
index a233a9ce03..4c52103b1c 100644
--- a/synapse/config/spam_checker.py
+++ b/synapse/config/spam_checker.py
@@ -25,8 +25,8 @@ logger = logging.getLogger(__name__)
 LEGACY_SPAM_CHECKER_WARNING = """
 This server is using a spam checker module that is implementing the deprecated spam
 checker interface. Please check with the module's maintainer to see if a new version
-supporting Synapse's generic modules system is available.
-For more information, please see https://matrix-org.github.io/synapse/latest/modules.html
+supporting Synapse's generic modules system is available. For more information, please
+see https://matrix-org.github.io/synapse/latest/modules/index.html
 ---------------------------------------------------------------------------------------"""
 
 
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 04afd48274..cd80fcf9d1 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -21,7 +21,6 @@ from typing import (
     Awaitable,
     Callable,
     Collection,
-    Dict,
     List,
     Optional,
     Tuple,
@@ -31,7 +30,7 @@ from typing import (
 from synapse.rest.media.v1._base import FileInfo
 from synapse.rest.media.v1.media_storage import ReadableFileWrapper
 from synapse.spam_checker_api import RegistrationBehaviour
-from synapse.types import RoomAlias
+from synapse.types import RoomAlias, UserProfile
 from synapse.util.async_helpers import maybe_awaitable
 
 if TYPE_CHECKING:
@@ -50,7 +49,7 @@ USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[[str, str, str, str], Awaitable[bo
 USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]]
 USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]]
 USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
-CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[Dict[str, str]], Awaitable[bool]]
+CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[UserProfile], Awaitable[bool]]
 LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[
     [
         Optional[dict],
@@ -245,8 +244,8 @@ class SpamChecker:
         """Checks if a given event is considered "spammy" by this server.
 
         If the server considers an event spammy, then it will be rejected if
-        sent by a local user. If it is sent by a user on another server, then
-        users receive a blank event.
+        sent by a local user. If it is sent by a user on another server, the
+        event is soft-failed.
 
         Args:
             event: the event to be checked
@@ -383,7 +382,7 @@ class SpamChecker:
 
         return True
 
-    async def check_username_for_spam(self, user_profile: Dict[str, str]) -> bool:
+    async def check_username_for_spam(self, user_profile: UserProfile) -> bool:
         """Checks if a user ID or display name are considered "spammy" by this server.
 
         If the server considers a username spammy, then it will not be included in
diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index dd3104faf3..bfca454f51 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -38,6 +38,8 @@ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[
     [str, StateMap[EventBase], str], Awaitable[bool]
 ]
 ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable]
+CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
+CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]]
 ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable]
 ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable]
 
@@ -157,6 +159,12 @@ class ThirdPartyEventRules:
             CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
         ] = []
         self._on_new_event_callbacks: List[ON_NEW_EVENT_CALLBACK] = []
+        self._check_can_shutdown_room_callbacks: List[
+            CHECK_CAN_SHUTDOWN_ROOM_CALLBACK
+        ] = []
+        self._check_can_deactivate_user_callbacks: List[
+            CHECK_CAN_DEACTIVATE_USER_CALLBACK
+        ] = []
         self._on_profile_update_callbacks: List[ON_PROFILE_UPDATE_CALLBACK] = []
         self._on_user_deactivation_status_changed_callbacks: List[
             ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
@@ -173,8 +181,12 @@ class ThirdPartyEventRules:
             CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
         ] = None,
         on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None,
+        check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None,
+        check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None,
         on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None,
-        on_deactivation: Optional[ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK] = None,
+        on_user_deactivation_status_changed: Optional[
+            ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
+        ] = None,
     ) -> None:
         """Register callbacks from modules for each hook."""
         if check_event_allowed is not None:
@@ -196,11 +208,18 @@ class ThirdPartyEventRules:
         if on_new_event is not None:
             self._on_new_event_callbacks.append(on_new_event)
 
+        if check_can_shutdown_room is not None:
+            self._check_can_shutdown_room_callbacks.append(check_can_shutdown_room)
+
+        if check_can_deactivate_user is not None:
+            self._check_can_deactivate_user_callbacks.append(check_can_deactivate_user)
         if on_profile_update is not None:
             self._on_profile_update_callbacks.append(on_profile_update)
 
-        if on_deactivation is not None:
-            self._on_user_deactivation_status_changed_callbacks.append(on_deactivation)
+        if on_user_deactivation_status_changed is not None:
+            self._on_user_deactivation_status_changed_callbacks.append(
+                on_user_deactivation_status_changed,
+            )
 
     async def check_event_allowed(
         self, event: EventBase, context: EventContext
@@ -365,6 +384,46 @@ class ThirdPartyEventRules:
                     "Failed to run module API callback %s: %s", callback, e
                 )
 
+    async def check_can_shutdown_room(self, user_id: str, room_id: str) -> bool:
+        """Intercept requests to shutdown a room. If `False` is returned, the
+         room must not be shut down.
+
+        Args:
+            requester: The ID of the user requesting the shutdown.
+            room_id: The ID of the room.
+        """
+        for callback in self._check_can_shutdown_room_callbacks:
+            try:
+                if await callback(user_id, room_id) is False:
+                    return False
+            except Exception as e:
+                logger.exception(
+                    "Failed to run module API callback %s: %s", callback, e
+                )
+        return True
+
+    async def check_can_deactivate_user(
+        self,
+        user_id: str,
+        by_admin: bool,
+    ) -> bool:
+        """Intercept requests to deactivate a user. If `False` is returned, the
+        user should not be deactivated.
+
+        Args:
+            requester
+            user_id: The ID of the room.
+        """
+        for callback in self._check_can_deactivate_user_callbacks:
+            try:
+                if await callback(user_id, by_admin) is False:
+                    return False
+            except Exception as e:
+                logger.exception(
+                    "Failed to run module API callback %s: %s", callback, e
+                )
+        return True
+
     async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]:
         """Given a room ID, return the state events of that room.
 
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 9386fa29dd..7120062127 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -26,6 +26,7 @@ from typing import (
     Union,
 )
 
+import attr
 from frozendict import frozendict
 
 from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
@@ -37,7 +38,8 @@ from synapse.util.frozenutils import unfreeze
 from . import EventBase
 
 if TYPE_CHECKING:
-    from synapse.storage.databases.main.relations import BundledAggregations
+    from synapse.handlers.relations import BundledAggregations
+    from synapse.server import HomeServer
 
 
 # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
@@ -303,29 +305,37 @@ def format_event_for_client_v2_without_room_id(d: JsonDict) -> JsonDict:
     return d
 
 
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class SerializeEventConfig:
+    as_client_event: bool = True
+    # Function to convert from federation format to client format
+    event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1
+    # ID of the user's auth token - used for namespacing of transaction IDs
+    token_id: Optional[int] = None
+    # List of event fields to include. If empty, all fields will be returned.
+    only_event_fields: Optional[List[str]] = None
+    # Some events can have stripped room state stored in the `unsigned` field.
+    # This is required for invite and knock functionality. If this option is
+    # False, that state will be removed from the event before it is returned.
+    # Otherwise, it will be kept.
+    include_stripped_room_state: bool = False
+
+
+_DEFAULT_SERIALIZE_EVENT_CONFIG = SerializeEventConfig()
+
+
 def serialize_event(
     e: Union[JsonDict, EventBase],
     time_now_ms: int,
     *,
-    as_client_event: bool = True,
-    event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1,
-    token_id: Optional[str] = None,
-    only_event_fields: Optional[List[str]] = None,
-    include_stripped_room_state: bool = False,
+    config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
 ) -> JsonDict:
     """Serialize event for clients
 
     Args:
         e
         time_now_ms
-        as_client_event
-        event_format
-        token_id
-        only_event_fields
-        include_stripped_room_state: Some events can have stripped room state
-            stored in the `unsigned` field. This is required for invite and knock
-            functionality. If this option is False, that state will be removed from the
-            event before it is returned. Otherwise, it will be kept.
+        config: Event serialization config
 
     Returns:
         The serialized event dictionary.
@@ -348,11 +358,11 @@ def serialize_event(
 
     if "redacted_because" in e.unsigned:
         d["unsigned"]["redacted_because"] = serialize_event(
-            e.unsigned["redacted_because"], time_now_ms, event_format=event_format
+            e.unsigned["redacted_because"], time_now_ms, config=config
         )
 
-    if token_id is not None:
-        if token_id == getattr(e.internal_metadata, "token_id", None):
+    if config.token_id is not None:
+        if config.token_id == getattr(e.internal_metadata, "token_id", None):
             txn_id = getattr(e.internal_metadata, "txn_id", None)
             if txn_id is not None:
                 d["unsigned"]["transaction_id"] = txn_id
@@ -361,13 +371,14 @@ def serialize_event(
     # that are meant to provide metadata about a room to an invitee/knocker. They are
     # intended to only be included in specific circumstances, such as down sync, and
     # should not be included in any other case.
-    if not include_stripped_room_state:
+    if not config.include_stripped_room_state:
         d["unsigned"].pop("invite_room_state", None)
         d["unsigned"].pop("knock_room_state", None)
 
-    if as_client_event:
-        d = event_format(d)
+    if config.as_client_event:
+        d = config.event_format(d)
 
+    only_event_fields = config.only_event_fields
     if only_event_fields:
         if not isinstance(only_event_fields, list) or not all(
             isinstance(f, str) for f in only_event_fields
@@ -385,23 +396,26 @@ class EventClientSerializer:
     clients.
     """
 
+    def __init__(self, hs: "HomeServer"):
+        self._msc3440_enabled = hs.config.experimental.msc3440_enabled
+
     def serialize_event(
         self,
         event: Union[JsonDict, EventBase],
         time_now: int,
         *,
+        config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
         bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
-        **kwargs: Any,
     ) -> JsonDict:
         """Serializes a single event.
 
         Args:
             event: The event being serialized.
             time_now: The current time in milliseconds
+            config: Event serialization config
             bundle_aggregations: Whether to include the bundled aggregations for this
                 event. Only applies to non-state events. (State events never include
                 bundled aggregations.)
-            **kwargs: Arguments to pass to `serialize_event`
 
         Returns:
             The serialized event
@@ -410,7 +424,7 @@ class EventClientSerializer:
         if not isinstance(event, EventBase):
             return event
 
-        serialized_event = serialize_event(event, time_now, **kwargs)
+        serialized_event = serialize_event(event, time_now, config=config)
 
         # Check if there are any bundled aggregations to include with the event.
         if bundle_aggregations:
@@ -419,6 +433,7 @@ class EventClientSerializer:
                 self._inject_bundled_aggregations(
                     event,
                     time_now,
+                    config,
                     bundle_aggregations[event.event_id],
                     serialized_event,
                 )
@@ -456,6 +471,7 @@ class EventClientSerializer:
         self,
         event: EventBase,
         time_now: int,
+        config: SerializeEventConfig,
         aggregations: "BundledAggregations",
         serialized_event: JsonDict,
     ) -> None:
@@ -466,6 +482,7 @@ class EventClientSerializer:
             time_now: The current time in milliseconds
             aggregations: The bundled aggregation to serialize.
             serialized_event: The serialized event which may be modified.
+            config: Event serialization config
 
         """
         serialized_aggregations = {}
@@ -493,8 +510,8 @@ class EventClientSerializer:
             thread = aggregations.thread
 
             # Don't bundle aggregations as this could recurse forever.
-            serialized_latest_event = self.serialize_event(
-                thread.latest_event, time_now, bundle_aggregations=None
+            serialized_latest_event = serialize_event(
+                thread.latest_event, time_now, config=config
             )
             # Manually apply an edit, if one exists.
             if thread.latest_edit:
@@ -502,33 +519,53 @@ class EventClientSerializer:
                     thread.latest_event, serialized_latest_event, thread.latest_edit
                 )
 
-            serialized_aggregations[RelationTypes.THREAD] = {
+            thread_summary = {
                 "latest_event": serialized_latest_event,
                 "count": thread.count,
                 "current_user_participated": thread.current_user_participated,
             }
+            serialized_aggregations[RelationTypes.THREAD] = thread_summary
+            if self._msc3440_enabled:
+                serialized_aggregations[RelationTypes.UNSTABLE_THREAD] = thread_summary
 
         # Include the bundled aggregations in the event.
         if serialized_aggregations:
-            serialized_event["unsigned"].setdefault("m.relations", {}).update(
-                serialized_aggregations
-            )
+            # There is likely already an "unsigned" field, but a filter might
+            # have stripped it off (via the event_fields option). The server is
+            # allowed to return additional fields, so add it back.
+            serialized_event.setdefault("unsigned", {}).setdefault(
+                "m.relations", {}
+            ).update(serialized_aggregations)
 
     def serialize_events(
-        self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any
+        self,
+        events: Iterable[Union[JsonDict, EventBase]],
+        time_now: int,
+        *,
+        config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
+        bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
     ) -> List[JsonDict]:
         """Serializes multiple events.
 
         Args:
             event
             time_now: The current time in milliseconds
-            **kwargs: Arguments to pass to `serialize_event`
+            config: Event serialization config
+            bundle_aggregations: Whether to include the bundled aggregations for this
+                event. Only applies to non-state events. (State events never include
+                bundled aggregations.)
 
         Returns:
             The list of serialized events
         """
         return [
-            self.serialize_event(event, time_now=time_now, **kwargs) for event in events
+            self.serialize_event(
+                event,
+                time_now,
+                config=config,
+                bundle_aggregations=bundle_aggregations,
+            )
+            for event in events
         ]
 
 
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 482bbdd867..c7400c737b 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -22,7 +22,6 @@ from typing import (
     Callable,
     Collection,
     Dict,
-    Iterable,
     List,
     Optional,
     Tuple,
@@ -577,10 +576,10 @@ class FederationServer(FederationBase):
     async def _on_context_state_request_compute(
         self, room_id: str, event_id: Optional[str]
     ) -> Dict[str, list]:
+        pdus: Collection[EventBase]
         if event_id:
-            pdus: Iterable[EventBase] = await self.handler.get_state_for_pdu(
-                room_id, event_id
-            )
+            event_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
+            pdus = await self.store.get_events_as_list(event_ids)
         else:
             pdus = (await self.state.get_current_state(room_id)).values()
 
@@ -1093,7 +1092,7 @@ class FederationServer(FederationBase):
         # has started processing).
         while True:
             async with lock:
-                logger.info("handling received PDU: %s", event)
+                logger.info("handling received PDU in room %s: %s", room_id, event)
                 try:
                     with nested_logging_context(event.event_id):
                         await self._federation_event_handler.on_receive_pdu(
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index 0d7c4f5067..d720b5fd3f 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -244,7 +244,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
 
         self.notifier.on_new_replication_data()
 
-    def send_device_messages(self, destination: str) -> None:
+    def send_device_messages(self, destination: str, immediate: bool = False) -> None:
         """As per FederationSender"""
         # We don't need to replicate this as it gets sent down a different
         # stream.
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 6106a486d1..30e2421efc 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -118,7 +118,12 @@ class AbstractFederationSender(metaclass=abc.ABCMeta):
         raise NotImplementedError()
 
     @abc.abstractmethod
-    def send_device_messages(self, destination: str) -> None:
+    def send_device_messages(self, destination: str, immediate: bool = True) -> None:
+        """Tells the sender that a new device message is ready to be sent to the
+        destination. The `immediate` flag specifies whether the messages should
+        be tried to be sent immediately, or whether it can be delayed for a
+        short while (to aid performance).
+        """
         raise NotImplementedError()
 
     @abc.abstractmethod
@@ -146,9 +151,8 @@ class AbstractFederationSender(metaclass=abc.ABCMeta):
 
 
 @attr.s
-class _PresenceQueue:
-    """A queue of destinations that need to be woken up due to new presence
-    updates.
+class _DestinationWakeupQueue:
+    """A queue of destinations that need to be woken up due to new updates.
 
     Staggers waking up of per destination queues to ensure that we don't attempt
     to start TLS connections with many hosts all at once, leading to pinned CPU.
@@ -175,7 +179,7 @@ class _PresenceQueue:
         if not self.processing:
             self._handle()
 
-    @wrap_as_background_process("_PresenceQueue.handle")
+    @wrap_as_background_process("_DestinationWakeupQueue.handle")
     async def _handle(self) -> None:
         """Background process to drain the queue."""
 
@@ -297,7 +301,7 @@ class FederationSender(AbstractFederationSender):
 
         self._external_cache = hs.get_external_cache()
 
-        self._presence_queue = _PresenceQueue(self, self.clock)
+        self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
 
     def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
         """Get or create a PerDestinationQueue for the given destination
@@ -614,7 +618,7 @@ class FederationSender(AbstractFederationSender):
                 states, start_loop=False
             )
 
-            self._presence_queue.add_to_queue(destination)
+            self._destination_wakeup_queue.add_to_queue(destination)
 
     def build_and_send_edu(
         self,
@@ -667,7 +671,7 @@ class FederationSender(AbstractFederationSender):
         else:
             queue.send_edu(edu)
 
-    def send_device_messages(self, destination: str) -> None:
+    def send_device_messages(self, destination: str, immediate: bool = False) -> None:
         if destination == self.server_name:
             logger.warning("Not sending device update to ourselves")
             return
@@ -677,7 +681,11 @@ class FederationSender(AbstractFederationSender):
         ):
             return
 
-        self._get_per_destination_queue(destination).attempt_new_transaction()
+        if immediate:
+            self._get_per_destination_queue(destination).attempt_new_transaction()
+        else:
+            self._get_per_destination_queue(destination).mark_new_data()
+            self._destination_wakeup_queue.add_to_queue(destination)
 
     def wake_destination(self, destination: str) -> None:
         """Called when we want to retry sending transactions to a remote.
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index c8768f22bc..d80f0ac5e8 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -219,6 +219,16 @@ class PerDestinationQueue:
         self._pending_edus.append(edu)
         self.attempt_new_transaction()
 
+    def mark_new_data(self) -> None:
+        """Marks that the destination has new data to send, without starting a
+        new transaction.
+
+        If a transaction loop is already in progress then a new transcation will
+        be attempted when the current one finishes.
+        """
+
+        self._new_data_to_send = True
+
     def attempt_new_transaction(self) -> None:
         """Try to start a new transaction to this destination
 
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index 67a6347907..71b2f90eb9 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -289,7 +289,7 @@ class OpenIdUserInfo(BaseFederationServlet):
         return 200, {"sub": user_id}
 
 
-DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
+SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
     "federation": FEDERATION_SERVLET_CLASSES,
     "room_list": (PublicRoomList,),
     "group_server": GROUP_SERVER_SERVLET_CLASSES,
@@ -298,6 +298,10 @@ DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
     "openid": (OpenIdUserInfo,),
 }
 
+DEFAULT_SERVLET_GROUPS = ("federation", "room_list", "openid")
+
+GROUP_SERVLET_GROUPS = ("group_server", "group_local", "group_attestation")
+
 
 def register_servlets(
     hs: "HomeServer",
@@ -320,16 +324,19 @@ def register_servlets(
             Defaults to ``DEFAULT_SERVLET_GROUPS``.
     """
     if not servlet_groups:
-        servlet_groups = DEFAULT_SERVLET_GROUPS.keys()
+        servlet_groups = DEFAULT_SERVLET_GROUPS
+        # Only allow the groups servlets if the deprecated groups feature is enabled.
+        if hs.config.experimental.groups_enabled:
+            servlet_groups = servlet_groups + GROUP_SERVLET_GROUPS
 
     for servlet_group in servlet_groups:
         # Skip unknown servlet groups.
-        if servlet_group not in DEFAULT_SERVLET_GROUPS:
+        if servlet_group not in SERVLET_GROUPS:
             raise RuntimeError(
                 f"Attempting to register unknown federation servlet: '{servlet_group}'"
             )
 
-        for servletclass in DEFAULT_SERVLET_GROUPS[servlet_group]:
+        for servletclass in SERVLET_GROUPS[servlet_group]:
             # Only allow the `/timestamp_to_event` servlet if msc3030 is enabled
             if (
                 servletclass == FederationTimestampLookupServlet
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index 87e99c7ddf..2529dee613 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -63,7 +63,7 @@ class Authenticator:
 
         self.replication_client = None
         if hs.config.worker.worker_app:
-            self.replication_client = hs.get_tcp_replication()
+            self.replication_client = hs.get_replication_command_handler()
 
     # A method just so we can pass 'self' as the authenticator to the Servlets
     async def authenticate_request(
diff --git a/synapse/handlers/account.py b/synapse/handlers/account.py
index d5badf635b..c05a14304c 100644
--- a/synapse/handlers/account.py
+++ b/synapse/handlers/account.py
@@ -26,6 +26,10 @@ class AccountHandler:
         self._main_store = hs.get_datastores().main
         self._is_mine = hs.is_mine
         self._federation_client = hs.get_federation_client()
+        self._use_account_validity_in_account_status = (
+            hs.config.server.use_account_validity_in_account_status
+        )
+        self._account_validity_handler = hs.get_account_validity_handler()
 
     async def get_account_statuses(
         self,
@@ -106,6 +110,13 @@ class AccountHandler:
                 "deactivated": userinfo.is_deactivated,
             }
 
+            if self._use_account_validity_in_account_status:
+                status[
+                    "org.matrix.expired"
+                ] = await self._account_validity_handler.is_user_expired(
+                    user_id.to_string()
+                )
+
         return status
 
     async def _get_remote_account_statuses(
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index e6461cc3c9..bd913e524e 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -571,7 +571,7 @@ class ApplicationServicesHandler:
         room_alias_str = room_alias.to_string()
         services = self.store.get_app_services()
         alias_query_services = [
-            s for s in services if (s.is_interested_in_alias(room_alias_str))
+            s for s in services if (s.is_room_alias_in_namespace(room_alias_str))
         ]
         for alias_service in alias_query_services:
             is_known_alias = await self.appservice_api.query_alias(
@@ -660,7 +660,7 @@ class ApplicationServicesHandler:
         # inside of a list comprehension anymore.
         interested_list = []
         for s in services:
-            if await s.is_interested(event, self.store):
+            if await s.is_interested_in_event(event.event_id, event, self.store):
                 interested_list.append(s)
 
         return interested_list
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 76ae768e6e..816e1a6d79 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -17,7 +17,7 @@ from typing import TYPE_CHECKING, Optional
 
 from synapse.api.errors import SynapseError
 from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.types import Requester, UserID, create_requester
+from synapse.types import Codes, Requester, UserID, create_requester
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -42,6 +42,7 @@ class DeactivateAccountHandler:
 
         # Flag that indicates whether the process to part users from rooms is running
         self._user_parter_running = False
+        self._third_party_rules = hs.get_third_party_event_rules()
 
         # Start the user parter loop so it can resume parting users from rooms where
         # it left off (if it has work left to do).
@@ -74,6 +75,15 @@ class DeactivateAccountHandler:
         Returns:
             True if identity server supports removing threepids, otherwise False.
         """
+
+        # Check if this user can be deactivated
+        if not await self._third_party_rules.check_can_deactivate_user(
+            user_id, by_admin
+        ):
+            raise SynapseError(
+                403, "Deactivation of this user is forbidden", Codes.FORBIDDEN
+            )
+
         # FIXME: Theoretically there is a race here wherein user resets
         # password using threepid.
 
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 934b5bd734..d5ccaa0c37 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -371,7 +371,6 @@ class DeviceHandler(DeviceWorkerHandler):
                 log_kv(
                     {"reason": "User doesn't have device id.", "device_id": device_id}
                 )
-                pass
             else:
                 raise
 
@@ -414,7 +413,6 @@ class DeviceHandler(DeviceWorkerHandler):
                 # no match
                 set_tag("error", True)
                 set_tag("reason", "User doesn't have that device id.")
-                pass
             else:
                 raise
 
@@ -506,7 +504,7 @@ class DeviceHandler(DeviceWorkerHandler):
                 "Sending device list update notif for %r to: %r", user_id, hosts
             )
             for host in hosts:
-                self.federation_sender.send_device_messages(host)
+                self.federation_sender.send_device_messages(host, immediate=False)
                 log_kv({"message": "sent device update to host", "host": host})
 
     async def notify_user_signature_update(
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index b7064c6624..33d827a45b 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -119,7 +119,7 @@ class DirectoryHandler:
 
         service = requester.app_service
         if service:
-            if not service.is_interested_in_alias(room_alias_str):
+            if not service.is_room_alias_in_namespace(room_alias_str):
                 raise SynapseError(
                     400,
                     "This application service has not reserved this kind of alias.",
@@ -221,7 +221,7 @@ class DirectoryHandler:
     async def delete_appservice_association(
         self, service: ApplicationService, room_alias: RoomAlias
     ) -> None:
-        if not service.is_interested_in_alias(room_alias.to_string()):
+        if not service.is_room_alias_in_namespace(room_alias.to_string()):
             raise SynapseError(
                 400,
                 "This application service has not reserved this kind of alias",
@@ -376,7 +376,7 @@ class DirectoryHandler:
         # non-exclusive locks on the alias (or there are no interested services)
         services = self.store.get_app_services()
         interested_services = [
-            s for s in services if s.is_interested_in_alias(alias.to_string())
+            s for s in services if s.is_room_alias_in_namespace(alias.to_string())
         ]
 
         for service in interested_services:
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index 97e75e60c3..d2ccb5c5d3 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -19,6 +19,7 @@ from typing import TYPE_CHECKING, Iterable, List, Optional
 from synapse.api.constants import EduTypes, EventTypes, Membership
 from synapse.api.errors import AuthError, SynapseError
 from synapse.events import EventBase
+from synapse.events.utils import SerializeEventConfig
 from synapse.handlers.presence import format_user_presence_state
 from synapse.streams.config import PaginationConfig
 from synapse.types import JsonDict, UserID
@@ -120,7 +121,7 @@ class EventStreamHandler:
             chunks = self._event_serializer.serialize_events(
                 events,
                 time_now,
-                as_client_event=as_client_event,
+                config=SerializeEventConfig(as_client_event=as_client_event),
             )
 
             chunk = {
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index eb03a5accb..350ec9c03a 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -23,8 +23,6 @@ from signedjson.key import decode_verify_key_bytes
 from signedjson.sign import verify_signed_json
 from unpaddedbase64 import decode_base64
 
-from twisted.internet import defer
-
 from synapse import event_auth
 from synapse.api.constants import EventContentFields, EventTypes, Membership
 from synapse.api.errors import (
@@ -45,11 +43,7 @@ from synapse.events.snapshot import EventContext
 from synapse.events.validator import EventValidator
 from synapse.federation.federation_client import InvalidResponseError
 from synapse.http.servlet import assert_params_in_dict
-from synapse.logging.context import (
-    make_deferred_yieldable,
-    nested_logging_context,
-    preserve_fn,
-)
+from synapse.logging.context import nested_logging_context
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.replication.http.federation import (
     ReplicationCleanRoomRestServlet,
@@ -355,56 +349,8 @@ class FederationHandler:
         if success:
             return True
 
-        # Huh, well *those* domains didn't work out. Lets try some domains
-        # from the time.
-
-        tried_domains = set(likely_domains)
-        tried_domains.add(self.server_name)
-
-        event_ids = list(extremities.keys())
-
-        logger.debug("calling resolve_state_groups in _maybe_backfill")
-        resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events)
-        states_list = await make_deferred_yieldable(
-            defer.gatherResults(
-                [resolve(room_id, [e]) for e in event_ids], consumeErrors=True
-            )
-        )
-
-        # A map from event_id to state map of event_ids.
-        state_ids: Dict[str, StateMap[str]] = dict(
-            zip(event_ids, [s.state for s in states_list])
-        )
-
-        state_map = await self.store.get_events(
-            [e_id for ids in state_ids.values() for e_id in ids.values()],
-            get_prev_content=False,
-        )
-
-        # A map from event_id to state map of events.
-        state_events: Dict[str, StateMap[EventBase]] = {
-            key: {
-                k: state_map[e_id]
-                for k, e_id in state_dict.items()
-                if e_id in state_map
-            }
-            for key, state_dict in state_ids.items()
-        }
-
-        for e_id in event_ids:
-            likely_extremeties_domains = get_domains_from_state(state_events[e_id])
-
-            success = await try_backfill(
-                [
-                    dom
-                    for dom, _ in likely_extremeties_domains
-                    if dom not in tried_domains
-                ]
-            )
-            if success:
-                return True
-
-            tried_domains.update(dom for dom, _ in likely_extremeties_domains)
+        # TODO: we could also try servers which were previously in the room, but
+        #   are no longer.
 
         return False
 
@@ -1004,54 +950,35 @@ class FederationHandler:
 
         return event
 
-    async def get_state_for_pdu(self, room_id: str, event_id: str) -> List[EventBase]:
-        """Returns the state at the event. i.e. not including said event."""
-
-        event = await self.store.get_event(event_id, check_room_id=room_id)
-
-        state_groups = await self.state_store.get_state_groups(room_id, [event_id])
-
-        if state_groups:
-            _, state = list(state_groups.items()).pop()
-            results = {(e.type, e.state_key): e for e in state}
-
-            if event.is_state():
-                # Get previous state
-                if "replaces_state" in event.unsigned:
-                    prev_id = event.unsigned["replaces_state"]
-                    if prev_id != event.event_id:
-                        prev_event = await self.store.get_event(prev_id)
-                        results[(event.type, event.state_key)] = prev_event
-                else:
-                    del results[(event.type, event.state_key)]
-
-            res = list(results.values())
-            return res
-        else:
-            return []
-
     async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]:
         """Returns the state at the event. i.e. not including said event."""
         event = await self.store.get_event(event_id, check_room_id=room_id)
+        if event.internal_metadata.outlier:
+            raise NotFoundError("State not known at event %s" % (event_id,))
 
         state_groups = await self.state_store.get_state_groups_ids(room_id, [event_id])
 
-        if state_groups:
-            _, state = list(state_groups.items()).pop()
-            results = state
+        # get_state_groups_ids should return exactly one result
+        assert len(state_groups) == 1
 
-            if event.is_state():
-                # Get previous state
-                if "replaces_state" in event.unsigned:
-                    prev_id = event.unsigned["replaces_state"]
-                    if prev_id != event.event_id:
-                        results[(event.type, event.state_key)] = prev_id
-                else:
-                    results.pop((event.type, event.state_key), None)
+        state_map = next(iter(state_groups.values()))
 
-            return list(results.values())
-        else:
-            return []
+        state_key = event.get_state_key()
+        if state_key is not None:
+            # the event was not rejected (get_event raises a NotFoundError for rejected
+            # events) so the state at the event should include the event itself.
+            assert (
+                state_map.get((event.type, state_key)) == event.event_id
+            ), "State at event did not include event itself"
+
+            # ... but we need the state *before* that event
+            if "replaces_state" in event.unsigned:
+                prev_id = event.unsigned["replaces_state"]
+                state_map[(event.type, state_key)] = prev_id
+            else:
+                del state_map[(event.type, state_key)]
+
+        return list(state_map.values())
 
     async def on_backfill_request(
         self, origin: str, room_id: str, pdu_list: List[str], limit: int
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 344f20f37c..a7db8feb57 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, cast
 from synapse.api.constants import EduTypes, EventTypes, Membership
 from synapse.api.errors import SynapseError
 from synapse.events import EventBase
+from synapse.events.utils import SerializeEventConfig
 from synapse.events.validator import EventValidator
 from synapse.handlers.presence import format_user_presence_state
 from synapse.handlers.receipts import ReceiptEventSource
@@ -152,10 +153,13 @@ class InitialSyncHandler:
 
         public_room_ids = await self.store.get_public_room_ids()
 
-        limit = pagin_config.limit
-        if limit is None:
+        if pagin_config.limit is not None:
+            limit = pagin_config.limit
+        else:
             limit = 10
 
+        serializer_options = SerializeEventConfig(as_client_event=as_client_event)
+
         async def handle_room(event: RoomsForUser) -> None:
             d: JsonDict = {
                 "room_id": event.room_id,
@@ -173,7 +177,7 @@ class InitialSyncHandler:
                 d["invite"] = self._event_serializer.serialize_event(
                     invite_event,
                     time_now,
-                    as_client_event=as_client_event,
+                    config=serializer_options,
                 )
 
             rooms_ret.append(d)
@@ -225,7 +229,7 @@ class InitialSyncHandler:
                         self._event_serializer.serialize_events(
                             messages,
                             time_now=time_now,
-                            as_client_event=as_client_event,
+                            config=serializer_options,
                         )
                     ),
                     "start": await start_token.to_string(self.store),
@@ -235,7 +239,7 @@ class InitialSyncHandler:
                 d["state"] = self._event_serializer.serialize_events(
                     current_state.values(),
                     time_now=time_now,
-                    as_client_event=as_client_event,
+                    config=serializer_options,
                 )
 
                 account_data_events = []
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 61cb133ef2..1c4fb4360a 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -493,6 +493,7 @@ class EventCreationHandler:
         allow_no_prev_events: bool = False,
         prev_event_ids: Optional[List[str]] = None,
         auth_event_ids: Optional[List[str]] = None,
+        state_event_ids: Optional[List[str]] = None,
         require_consent: bool = True,
         outlier: bool = False,
         historical: bool = False,
@@ -527,6 +528,15 @@ class EventCreationHandler:
 
                 If non-None, prev_event_ids must also be provided.
 
+            state_event_ids:
+                The full state at a given event. This is used particularly by the MSC2716
+                /batch_send endpoint. One use case is with insertion events which float at
+                the beginning of a historical batch and don't have any `prev_events` to
+                derive from; we add all of these state events as the explicit state so the
+                rest of the historical batch can inherit the same state and state_group.
+                This should normally be left as None, which will cause the auth_event_ids
+                to be calculated based on the room state at the prev_events.
+
             require_consent: Whether to check if the requester has
                 consented to the privacy policy.
 
@@ -612,6 +622,7 @@ class EventCreationHandler:
             allow_no_prev_events=allow_no_prev_events,
             prev_event_ids=prev_event_ids,
             auth_event_ids=auth_event_ids,
+            state_event_ids=state_event_ids,
             depth=depth,
         )
 
@@ -772,6 +783,7 @@ class EventCreationHandler:
         allow_no_prev_events: bool = False,
         prev_event_ids: Optional[List[str]] = None,
         auth_event_ids: Optional[List[str]] = None,
+        state_event_ids: Optional[List[str]] = None,
         ratelimit: bool = True,
         txn_id: Optional[str] = None,
         ignore_shadow_ban: bool = False,
@@ -801,6 +813,14 @@ class EventCreationHandler:
                 based on the room state at the prev_events.
 
                 If non-None, prev_event_ids must also be provided.
+            state_event_ids:
+                The full state at a given event. This is used particularly by the MSC2716
+                /batch_send endpoint. One use case is with insertion events which float at
+                the beginning of a historical batch and don't have any `prev_events` to
+                derive from; we add all of these state events as the explicit state so the
+                rest of the historical batch can inherit the same state and state_group.
+                This should normally be left as None, which will cause the auth_event_ids
+                to be calculated based on the room state at the prev_events.
             ratelimit: Whether to rate limit this send.
             txn_id: The transaction ID.
             ignore_shadow_ban: True if shadow-banned users should be allowed to
@@ -856,8 +876,10 @@ class EventCreationHandler:
                 requester,
                 event_dict,
                 txn_id=txn_id,
+                allow_no_prev_events=allow_no_prev_events,
                 prev_event_ids=prev_event_ids,
                 auth_event_ids=auth_event_ids,
+                state_event_ids=state_event_ids,
                 outlier=outlier,
                 historical=historical,
                 depth=depth,
@@ -893,6 +915,7 @@ class EventCreationHandler:
         allow_no_prev_events: bool = False,
         prev_event_ids: Optional[List[str]] = None,
         auth_event_ids: Optional[List[str]] = None,
+        state_event_ids: Optional[List[str]] = None,
         depth: Optional[int] = None,
     ) -> Tuple[EventBase, EventContext]:
         """Create a new event for a local client
@@ -915,6 +938,15 @@ class EventCreationHandler:
                 Should normally be left as None, which will cause them to be calculated
                 based on the room state at the prev_events.
 
+            state_event_ids:
+                The full state at a given event. This is used particularly by the MSC2716
+                /batch_send endpoint. One use case is with insertion events which float at
+                the beginning of a historical batch and don't have any `prev_events` to
+                derive from; we add all of these state events as the explicit state so the
+                rest of the historical batch can inherit the same state and state_group.
+                This should normally be left as None, which will cause the auth_event_ids
+                to be calculated based on the room state at the prev_events.
+
             depth: Override the depth used to order the event in the DAG.
                 Should normally be set to None, which will cause the depth to be calculated
                 based on the prev_events.
@@ -922,31 +954,26 @@ class EventCreationHandler:
         Returns:
             Tuple of created event, context
         """
-        # Strip down the auth_event_ids to only what we need to auth the event.
+        # Strip down the state_event_ids to only what we need to auth the event.
         # For example, we don't need extra m.room.member that don't match event.sender
-        full_state_ids_at_event = None
-        if auth_event_ids is not None:
-            # If auth events are provided, prev events must be also.
+        if state_event_ids is not None:
+            # Do a quick check to make sure that prev_event_ids is present to
+            # make the type-checking around `builder.build` happy.
             # prev_event_ids could be an empty array though.
             assert prev_event_ids is not None
 
-            # Copy the full auth state before it stripped down
-            full_state_ids_at_event = auth_event_ids.copy()
-
             temp_event = await builder.build(
                 prev_event_ids=prev_event_ids,
-                auth_event_ids=auth_event_ids,
+                auth_event_ids=state_event_ids,
                 depth=depth,
             )
-            auth_events = await self.store.get_events_as_list(auth_event_ids)
+            state_events = await self.store.get_events_as_list(state_event_ids)
             # Create a StateMap[str]
-            auth_event_state_map = {
-                (e.type, e.state_key): e.event_id for e in auth_events
-            }
-            # Actually strip down and use the necessary auth events
+            state_map = {(e.type, e.state_key): e.event_id for e in state_events}
+            # Actually strip down and only use the necessary auth events
             auth_event_ids = self._event_auth_handler.compute_auth_events(
                 event=temp_event,
-                current_state_ids=auth_event_state_map,
+                current_state_ids=state_map,
                 for_verification=False,
             )
 
@@ -989,12 +1016,16 @@ class EventCreationHandler:
             context = EventContext.for_outlier()
         elif (
             event.type == EventTypes.MSC2716_INSERTION
-            and full_state_ids_at_event
+            and state_event_ids
             and builder.internal_metadata.is_historical()
         ):
+            # Add explicit state to the insertion event so it has state to derive
+            # from even though it's floating with no `prev_events`. The rest of
+            # the batch can derive from this state and state_group.
+            #
             # TODO(faster_joins): figure out how this works, and make sure that the
             #   old state is complete.
-            old_state = await self.store.get_events_as_list(full_state_ids_at_event)
+            old_state = await self.store.get_events_as_list(state_event_ids)
             context = await self.state.compute_event_context(event, old_state=old_state)
         else:
             context = await self.state.compute_event_context(event)
@@ -1069,6 +1100,9 @@ class EventCreationHandler:
         if relation_type == RelationTypes.ANNOTATION:
             aggregation_key = relation["key"]
 
+            if len(aggregation_key) > 500:
+                raise SynapseError(400, "Aggregation key is too long")
+
             already_exists = await self.store.has_user_annotated_event(
                 relates_to, event.type, aggregation_key, event.sender
             )
@@ -1076,7 +1110,10 @@ class EventCreationHandler:
                 raise SynapseError(400, "Can't send same reaction twice")
 
         # Don't attempt to start a thread if the parent event is a relation.
-        elif relation_type == RelationTypes.THREAD:
+        elif (
+            relation_type == RelationTypes.THREAD
+            or relation_type == RelationTypes.UNSTABLE_THREAD
+        ):
             if await self.store.event_includes_relation(relates_to):
                 raise SynapseError(
                     400, "Cannot start threads from an event with a relation"
diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
index 593a2aac66..724b9cfcb4 100644
--- a/synapse/handlers/oidc.py
+++ b/synapse/handlers/oidc.py
@@ -45,6 +45,7 @@ from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart
 from synapse.util import Clock, json_decoder
 from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
 from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
+from synapse.util.templates import _localpart_from_email_filter
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -1228,6 +1229,7 @@ class OidcSessionData:
 
 class UserAttributeDict(TypedDict):
     localpart: Optional[str]
+    confirm_localpart: bool
     display_name: Optional[str]
     emails: List[str]
 
@@ -1307,6 +1309,11 @@ def jinja_finalize(thing: Any) -> Any:
 
 
 env = Environment(finalize=jinja_finalize)
+env.filters.update(
+    {
+        "localpart_from_email": _localpart_from_email_filter,
+    }
+)
 
 
 @attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -1316,6 +1323,7 @@ class JinjaOidcMappingConfig:
     display_name_template: Optional[Template]
     email_template: Optional[Template]
     extra_attributes: Dict[str, Template]
+    confirm_localpart: bool = False
 
 
 class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
@@ -1357,12 +1365,17 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
                         "invalid jinja template", path=["extra_attributes", key]
                     ) from e
 
+        confirm_localpart = config.get("confirm_localpart") or False
+        if not isinstance(confirm_localpart, bool):
+            raise ConfigError("must be a bool", path=["confirm_localpart"])
+
         return JinjaOidcMappingConfig(
             subject_claim=subject_claim,
             localpart_template=localpart_template,
             display_name_template=display_name_template,
             email_template=email_template,
             extra_attributes=extra_attributes,
+            confirm_localpart=confirm_localpart,
         )
 
     def get_remote_user_id(self, userinfo: UserInfo) -> str:
@@ -1398,7 +1411,10 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
             emails.append(email)
 
         return UserAttributeDict(
-            localpart=localpart, display_name=display_name, emails=emails
+            localpart=localpart,
+            display_name=display_name,
+            emails=emails,
+            confirm_localpart=self._config.confirm_localpart,
         )
 
     async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict:
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 5c01a426ff..876b879483 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Set
+from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set
 
 import attr
 
@@ -22,6 +22,7 @@ from twisted.python.failure import Failure
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import SynapseError
 from synapse.api.filtering import Filter
+from synapse.events.utils import SerializeEventConfig
 from synapse.handlers.room import ShutdownRoomResponse
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.state import StateFilter
@@ -133,6 +134,7 @@ class PaginationHandler:
         self.clock = hs.get_clock()
         self._server_name = hs.hostname
         self._room_shutdown_handler = hs.get_room_shutdown_handler()
+        self._relations_handler = hs.get_relations_handler()
 
         self.pagination_lock = ReadWriteLock()
         # IDs of rooms in which there currently an active purge *or delete* operation.
@@ -349,7 +351,7 @@ class PaginationHandler:
         """
         self._purges_in_progress_by_room.add(room_id)
         try:
-            with await self.pagination_lock.write(room_id):
+            async with self.pagination_lock.write(room_id):
                 await self.storage.purge_events.purge_history(
                     room_id, token, delete_local_events
                 )
@@ -405,7 +407,7 @@ class PaginationHandler:
             room_id: room to be purged
             force: set true to skip checking for joined users.
         """
-        with await self.pagination_lock.write(room_id):
+        async with self.pagination_lock.write(room_id):
             # first check that we have no users in this room
             if not force:
                 joined = await self.store.is_host_joined(room_id, self._server_name)
@@ -421,7 +423,7 @@ class PaginationHandler:
         pagin_config: PaginationConfig,
         as_client_event: bool = True,
         event_filter: Optional[Filter] = None,
-    ) -> Dict[str, Any]:
+    ) -> JsonDict:
         """Get messages in a room.
 
         Args:
@@ -430,6 +432,7 @@ class PaginationHandler:
             pagin_config: The pagination config rules to apply, if any.
             as_client_event: True to get events in client-server format.
             event_filter: Filter to apply to results or None
+
         Returns:
             Pagination API results
         """
@@ -447,7 +450,7 @@ class PaginationHandler:
 
         room_token = from_token.room_key
 
-        with await self.pagination_lock.read(room_id):
+        async with self.pagination_lock.read(room_id):
             (
                 membership,
                 member_event_id,
@@ -537,17 +540,21 @@ class PaginationHandler:
                 state_dict = await self.store.get_events(list(state_ids.values()))
                 state = state_dict.values()
 
-        aggregations = await self.store.get_bundled_aggregations(events, user_id)
+        aggregations = await self._relations_handler.get_bundled_aggregations(
+            events, user_id
+        )
 
         time_now = self.clock.time_msec()
 
+        serialize_options = SerializeEventConfig(as_client_event=as_client_event)
+
         chunk = {
             "chunk": (
                 self._event_serializer.serialize_events(
                     events,
                     time_now,
+                    config=serialize_options,
                     bundle_aggregations=aggregations,
-                    as_client_event=as_client_event,
                 )
             ),
             "start": await from_token.to_string(self.store),
@@ -556,7 +563,7 @@ class PaginationHandler:
 
         if state:
             chunk["state"] = self._event_serializer.serialize_events(
-                state, time_now, as_client_event=as_client_event
+                state, time_now, config=serialize_options
             )
 
         return chunk
@@ -612,7 +619,7 @@ class PaginationHandler:
 
         self._purges_in_progress_by_room.add(room_id)
         try:
-            with await self.pagination_lock.write(room_id):
+            async with self.pagination_lock.write(room_id):
                 self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
                 self._delete_by_id[
                     delete_id
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index c155098bee..34d9411bbf 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -267,7 +267,6 @@ class BasePresenceHandler(abc.ABC):
             is_syncing: Whether or not the user is now syncing
             sync_time_msec: Time in ms when the user was last syncing
         """
-        pass
 
     async def update_external_syncs_clear(self, process_id: str) -> None:
         """Marks all users that had been marked as syncing by a given process
@@ -277,7 +276,6 @@ class BasePresenceHandler(abc.ABC):
 
         This is a no-op when presence is handled by a different worker.
         """
-        pass
 
     async def process_replication_rows(
         self, stream_name: str, instance_name: str, token: int, rows: list
@@ -424,13 +422,13 @@ class WorkerPresenceHandler(BasePresenceHandler):
 
     async def _on_shutdown(self) -> None:
         if self._presence_enabled:
-            self.hs.get_tcp_replication().send_command(
+            self.hs.get_replication_command_handler().send_command(
                 ClearUserSyncsCommand(self.instance_id)
             )
 
     def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None:
         if self._presence_enabled:
-            self.hs.get_tcp_replication().send_user_sync(
+            self.hs.get_replication_command_handler().send_user_sync(
                 self.instance_id, user_id, is_syncing, last_sync_ms
             )
 
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 6554c0d3c2..239b0aa744 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -336,12 +336,18 @@ class ProfileHandler:
         """Check that the size and content type of the avatar at the given MXC URI are
         within the configured limits.
 
+        If the given `mxc` is empty, no checks are performed. (Users are always able to
+        unset their avatar.)
+
         Args:
             mxc: The MXC URI at which the avatar can be found.
 
         Returns:
              A boolean indicating whether the file can be allowed to be set as an avatar.
         """
+        if mxc == "":
+            return True
+
         if not self.max_avatar_size and not self.allowed_avatar_mimetypes:
             return True
 
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index b4132c353a..6250bb3bdf 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -269,7 +269,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
         # Then filter down to rooms that the AS can read
         events = []
         for room_id, event in rooms_to_events.items():
-            if not await service.matches_user_in_member_list(room_id, self.store):
+            if not await service.is_interested_in_room(room_id, self.store):
                 continue
 
             events.append(event)
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
new file mode 100644
index 0000000000..73217d135d
--- /dev/null
+++ b/synapse/handlers/relations.py
@@ -0,0 +1,271 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+from typing import TYPE_CHECKING, Dict, Iterable, Optional, cast
+
+import attr
+from frozendict import frozendict
+
+from synapse.api.constants import RelationTypes
+from synapse.api.errors import SynapseError
+from synapse.events import EventBase
+from synapse.types import JsonDict, Requester, StreamToken
+from synapse.visibility import filter_events_for_client
+
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+    from synapse.storage.databases.main import DataStore
+
+
+logger = logging.getLogger(__name__)
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _ThreadAggregation:
+    # The latest event in the thread.
+    latest_event: EventBase
+    # The latest edit to the latest event in the thread.
+    latest_edit: Optional[EventBase]
+    # The total number of events in the thread.
+    count: int
+    # True if the current user has sent an event to the thread.
+    current_user_participated: bool
+
+
+@attr.s(slots=True, auto_attribs=True)
+class BundledAggregations:
+    """
+    The bundled aggregations for an event.
+
+    Some values require additional processing during serialization.
+    """
+
+    annotations: Optional[JsonDict] = None
+    references: Optional[JsonDict] = None
+    replace: Optional[EventBase] = None
+    thread: Optional[_ThreadAggregation] = None
+
+    def __bool__(self) -> bool:
+        return bool(self.annotations or self.references or self.replace or self.thread)
+
+
+class RelationsHandler:
+    def __init__(self, hs: "HomeServer"):
+        self._main_store = hs.get_datastores().main
+        self._storage = hs.get_storage()
+        self._auth = hs.get_auth()
+        self._clock = hs.get_clock()
+        self._event_handler = hs.get_event_handler()
+        self._event_serializer = hs.get_event_client_serializer()
+
+    async def get_relations(
+        self,
+        requester: Requester,
+        event_id: str,
+        room_id: str,
+        relation_type: Optional[str] = None,
+        event_type: Optional[str] = None,
+        aggregation_key: Optional[str] = None,
+        limit: int = 5,
+        direction: str = "b",
+        from_token: Optional[StreamToken] = None,
+        to_token: Optional[StreamToken] = None,
+    ) -> JsonDict:
+        """Get related events of a event, ordered by topological ordering.
+
+        TODO Accept a PaginationConfig instead of individual pagination parameters.
+
+        Args:
+            requester: The user requesting the relations.
+            event_id: Fetch events that relate to this event ID.
+            room_id: The room the event belongs to.
+            relation_type: Only fetch events with this relation type, if given.
+            event_type: Only fetch events with this event type, if given.
+            aggregation_key: Only fetch events with this aggregation key, if given.
+            limit: Only fetch the most recent `limit` events.
+            direction: Whether to fetch the most recent first (`"b"`) or the
+                oldest first (`"f"`).
+            from_token: Fetch rows from the given token, or from the start if None.
+            to_token: Fetch rows up to the given token, or up to the end if None.
+
+        Returns:
+            The pagination chunk.
+        """
+
+        user_id = requester.user.to_string()
+
+        # TODO Properly handle a user leaving a room.
+        (_, member_event_id) = await self._auth.check_user_in_room_or_world_readable(
+            room_id, user_id, allow_departed_users=True
+        )
+
+        # This gets the original event and checks that a) the event exists and
+        # b) the user is allowed to view it.
+        event = await self._event_handler.get_event(requester.user, room_id, event_id)
+        if event is None:
+            raise SynapseError(404, "Unknown parent event.")
+
+        pagination_chunk = await self._main_store.get_relations_for_event(
+            event_id=event_id,
+            event=event,
+            room_id=room_id,
+            relation_type=relation_type,
+            event_type=event_type,
+            aggregation_key=aggregation_key,
+            limit=limit,
+            direction=direction,
+            from_token=from_token,
+            to_token=to_token,
+        )
+
+        events = await self._main_store.get_events_as_list(
+            [c["event_id"] for c in pagination_chunk.chunk]
+        )
+
+        events = await filter_events_for_client(
+            self._storage, user_id, events, is_peeking=(member_event_id is None)
+        )
+
+        now = self._clock.time_msec()
+        # Do not bundle aggregations when retrieving the original event because
+        # we want the content before relations are applied to it.
+        original_event = self._event_serializer.serialize_event(
+            event, now, bundle_aggregations=None
+        )
+        # The relations returned for the requested event do include their
+        # bundled aggregations.
+        aggregations = await self.get_bundled_aggregations(
+            events, requester.user.to_string()
+        )
+        serialized_events = self._event_serializer.serialize_events(
+            events, now, bundle_aggregations=aggregations
+        )
+
+        return_value = await pagination_chunk.to_dict(self._main_store)
+        return_value["chunk"] = serialized_events
+        return_value["original_event"] = original_event
+
+        return return_value
+
+    async def _get_bundled_aggregation_for_event(
+        self, event: EventBase, user_id: str
+    ) -> Optional[BundledAggregations]:
+        """Generate bundled aggregations for an event.
+
+        Note that this does not use a cache, but depends on cached methods.
+
+        Args:
+            event: The event to calculate bundled aggregations for.
+            user_id: The user requesting the bundled aggregations.
+
+        Returns:
+            The bundled aggregations for an event, if bundled aggregations are
+            enabled and the event can have bundled aggregations.
+        """
+
+        # Do not bundle aggregations for an event which represents an edit or an
+        # annotation. It does not make sense for them to have related events.
+        relates_to = event.content.get("m.relates_to")
+        if isinstance(relates_to, (dict, frozendict)):
+            relation_type = relates_to.get("rel_type")
+            if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE):
+                return None
+
+        event_id = event.event_id
+        room_id = event.room_id
+
+        # The bundled aggregations to include, a mapping of relation type to a
+        # type-specific value. Some types include the direct return type here
+        # while others need more processing during serialization.
+        aggregations = BundledAggregations()
+
+        annotations = await self._main_store.get_aggregation_groups_for_event(
+            event_id, room_id
+        )
+        if annotations.chunk:
+            aggregations.annotations = await annotations.to_dict(
+                cast("DataStore", self)
+            )
+
+        references = await self._main_store.get_relations_for_event(
+            event_id, event, room_id, RelationTypes.REFERENCE, direction="f"
+        )
+        if references.chunk:
+            aggregations.references = await references.to_dict(cast("DataStore", self))
+
+        # Store the bundled aggregations in the event metadata for later use.
+        return aggregations
+
+    async def get_bundled_aggregations(
+        self, events: Iterable[EventBase], user_id: str
+    ) -> Dict[str, BundledAggregations]:
+        """Generate bundled aggregations for events.
+
+        Args:
+            events: The iterable of events to calculate bundled aggregations for.
+            user_id: The user requesting the bundled aggregations.
+
+        Returns:
+            A map of event ID to the bundled aggregation for the event. Not all
+            events may have bundled aggregations in the results.
+        """
+        # De-duplicate events by ID to handle the same event requested multiple times.
+        #
+        # State events do not get bundled aggregations.
+        events_by_id = {
+            event.event_id: event for event in events if not event.is_state()
+        }
+
+        # event ID -> bundled aggregation in non-serialized form.
+        results: Dict[str, BundledAggregations] = {}
+
+        # Fetch other relations per event.
+        for event in events_by_id.values():
+            event_result = await self._get_bundled_aggregation_for_event(event, user_id)
+            if event_result:
+                results[event.event_id] = event_result
+
+        # Fetch any edits (but not for redacted events).
+        edits = await self._main_store.get_applicable_edits(
+            [
+                event_id
+                for event_id, event in events_by_id.items()
+                if not event.internal_metadata.is_redacted()
+            ]
+        )
+        for event_id, edit in edits.items():
+            results.setdefault(event_id, BundledAggregations()).replace = edit
+
+        # Fetch thread summaries.
+        summaries = await self._main_store.get_thread_summaries(events_by_id.keys())
+        # Only fetch participated for a limited selection based on what had
+        # summaries.
+        participated = await self._main_store.get_threads_participated(
+            [event_id for event_id, summary in summaries.items() if summary], user_id
+        )
+        for event_id, summary in summaries.items():
+            if summary:
+                thread_count, latest_thread_event, edit = summary
+                results.setdefault(
+                    event_id, BundledAggregations()
+                ).thread = _ThreadAggregation(
+                    latest_event=latest_thread_event,
+                    latest_edit=edit,
+                    count=thread_count,
+                    # If there's a thread summary it must also exist in the
+                    # participated dictionary.
+                    current_user_participated=participated[event_id],
+                )
+
+        return results
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 7b965b4b96..092e185c99 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -60,8 +60,8 @@ from synapse.events import EventBase
 from synapse.events.utils import copy_power_levels_contents
 from synapse.federation.federation_client import InvalidResponseError
 from synapse.handlers.federation import get_domains_from_state
+from synapse.handlers.relations import BundledAggregations
 from synapse.rest.admin._base import assert_user_is_admin
-from synapse.storage.databases.main.relations import BundledAggregations
 from synapse.storage.state import StateFilter
 from synapse.streams import EventSource
 from synapse.types import (
@@ -1118,6 +1118,7 @@ class RoomContextHandler:
         self.store = hs.get_datastores().main
         self.storage = hs.get_storage()
         self.state_store = self.storage.state
+        self._relations_handler = hs.get_relations_handler()
 
     async def get_event_context(
         self,
@@ -1190,7 +1191,7 @@ class RoomContextHandler:
         event = filtered[0]
 
         # Fetch the aggregations.
-        aggregations = await self.store.get_bundled_aggregations(
+        aggregations = await self._relations_handler.get_bundled_aggregations(
             itertools.chain(events_before, (event,), events_after),
             user.to_string(),
         )
@@ -1475,6 +1476,7 @@ class RoomShutdownHandler:
         self.room_member_handler = hs.get_room_member_handler()
         self._room_creation_handler = hs.get_room_creation_handler()
         self._replication = hs.get_replication_data_handler()
+        self._third_party_rules = hs.get_third_party_event_rules()
         self.event_creation_handler = hs.get_event_creation_handler()
         self.store = hs.get_datastores().main
 
@@ -1548,6 +1550,13 @@ class RoomShutdownHandler:
         if not RoomID.is_valid(room_id):
             raise SynapseError(400, "%s is not a legal room ID" % (room_id,))
 
+        if not await self._third_party_rules.check_can_shutdown_room(
+            requester_user_id, room_id
+        ):
+            raise SynapseError(
+                403, "Shutdown of this room is forbidden", Codes.FORBIDDEN
+            )
+
         # Action the block first (even if the room doesn't exist yet)
         if block:
             # This will work even if the room is already blocked, but that is
diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py
index abbf7b7b27..a0255bd143 100644
--- a/synapse/handlers/room_batch.py
+++ b/synapse/handlers/room_batch.py
@@ -121,12 +121,11 @@ class RoomBatchHandler:
 
         return create_requester(user_id, app_service=app_service)
 
-    async def get_most_recent_auth_event_ids_from_event_id_list(
+    async def get_most_recent_full_state_ids_from_event_id_list(
         self, event_ids: List[str]
     ) -> List[str]:
-        """Find the most recent auth event ids (derived from state events) that
-        allowed that message to be sent. We will use this as a base
-        to auth our historical messages against.
+        """Find the most recent event_id and grab the full state at that event.
+        We will use this as a base to auth our historical messages against.
 
         Args:
             event_ids: List of event ID's to look at
@@ -136,38 +135,37 @@ class RoomBatchHandler:
         """
 
         (
-            most_recent_prev_event_id,
+            most_recent_event_id,
             _,
         ) = await self.store.get_max_depth_of(event_ids)
         # mapping from (type, state_key) -> state_event_id
         prev_state_map = await self.state_store.get_state_ids_for_event(
-            most_recent_prev_event_id
+            most_recent_event_id
         )
         # List of state event ID's
-        prev_state_ids = list(prev_state_map.values())
-        auth_event_ids = prev_state_ids
+        full_state_ids = list(prev_state_map.values())
 
-        return auth_event_ids
+        return full_state_ids
 
     async def persist_state_events_at_start(
         self,
         state_events_at_start: List[JsonDict],
         room_id: str,
-        initial_auth_event_ids: List[str],
+        initial_state_event_ids: List[str],
         app_service_requester: Requester,
     ) -> List[str]:
         """Takes all `state_events_at_start` event dictionaries and creates/persists
-        them as floating state events which don't resolve into the current room state.
-        They are floating because they reference a fake prev_event which doesn't connect
-        to the normal DAG at all.
+        them in a floating state event chain which don't resolve into the current room
+        state. They are floating because they reference no prev_events and are marked
+        as outliers which disconnects them from the normal DAG.
 
         Args:
             state_events_at_start:
             room_id: Room where you want the events persisted in.
-            initial_auth_event_ids: These will be the auth_events for the first
-                state event created. Each event created afterwards will be
-                added to the list of auth events for the next state event
-                created.
+            initial_state_event_ids:
+                The base set of state for the historical batch which the floating
+                state chain will derive from. This should probably be the state
+                from the `prev_event` defined by `/batch_send?prev_event_id=$abc`.
             app_service_requester: The requester of an application service.
 
         Returns:
@@ -176,7 +174,7 @@ class RoomBatchHandler:
         assert app_service_requester.app_service
 
         state_event_ids_at_start = []
-        auth_event_ids = initial_auth_event_ids.copy()
+        state_event_ids = initial_state_event_ids.copy()
 
         # Make the state events float off on their own by specifying no
         # prev_events for the first one in the chain so we don't have a bunch of
@@ -189,9 +187,7 @@ class RoomBatchHandler:
             )
 
             logger.debug(
-                "RoomBatchSendEventRestServlet inserting state_event=%s, auth_event_ids=%s",
-                state_event,
-                auth_event_ids,
+                "RoomBatchSendEventRestServlet inserting state_event=%s", state_event
             )
 
             event_dict = {
@@ -217,16 +213,26 @@ class RoomBatchHandler:
                     room_id=room_id,
                     action=membership,
                     content=event_dict["content"],
+                    # Mark as an outlier to disconnect it from the normal DAG
+                    # and not show up between batches of history.
                     outlier=True,
                     historical=True,
-                    # Only the first event in the chain should be floating.
+                    # Only the first event in the state chain should be floating.
                     # The rest should hang off each other in a chain.
                     allow_no_prev_events=index == 0,
                     prev_event_ids=prev_event_ids_for_state_chain,
+                    # Since each state event is marked as an outlier, the
+                    # `EventContext.for_outlier()` won't have any `state_ids`
+                    # set and therefore can't derive any state even though the
+                    # prev_events are set. Also since the first event in the
+                    # state chain is floating with no `prev_events`, it can't
+                    # derive state from anywhere automatically. So we need to
+                    # set some state explicitly.
+                    #
                     # Make sure to use a copy of this list because we modify it
                     # later in the loop here. Otherwise it will be the same
                     # reference and also update in the event when we append later.
-                    auth_event_ids=auth_event_ids.copy(),
+                    state_event_ids=state_event_ids.copy(),
                 )
             else:
                 # TODO: Add some complement tests that adds state that is not member joins
@@ -240,21 +246,31 @@ class RoomBatchHandler:
                         state_event["sender"], app_service_requester.app_service
                     ),
                     event_dict,
+                    # Mark as an outlier to disconnect it from the normal DAG
+                    # and not show up between batches of history.
                     outlier=True,
                     historical=True,
-                    # Only the first event in the chain should be floating.
+                    # Only the first event in the state chain should be floating.
                     # The rest should hang off each other in a chain.
                     allow_no_prev_events=index == 0,
                     prev_event_ids=prev_event_ids_for_state_chain,
+                    # Since each state event is marked as an outlier, the
+                    # `EventContext.for_outlier()` won't have any `state_ids`
+                    # set and therefore can't derive any state even though the
+                    # prev_events are set. Also since the first event in the
+                    # state chain is floating with no `prev_events`, it can't
+                    # derive state from anywhere automatically. So we need to
+                    # set some state explicitly.
+                    #
                     # Make sure to use a copy of this list because we modify it
                     # later in the loop here. Otherwise it will be the same
                     # reference and also update in the event when we append later.
-                    auth_event_ids=auth_event_ids.copy(),
+                    state_event_ids=state_event_ids.copy(),
                 )
                 event_id = event.event_id
 
             state_event_ids_at_start.append(event_id)
-            auth_event_ids.append(event_id)
+            state_event_ids.append(event_id)
             # Connect all the state in a floating chain
             prev_event_ids_for_state_chain = [event_id]
 
@@ -265,7 +281,7 @@ class RoomBatchHandler:
         events_to_create: List[JsonDict],
         room_id: str,
         inherited_depth: int,
-        auth_event_ids: List[str],
+        initial_state_event_ids: List[str],
         app_service_requester: Requester,
     ) -> List[str]:
         """Create and persists all events provided sequentially. Handles the
@@ -281,8 +297,10 @@ class RoomBatchHandler:
             room_id: Room where you want the events persisted in.
             inherited_depth: The depth to create the events at (you will
                 probably by calling inherit_depth_from_prev_ids(...)).
-            auth_event_ids: Define which events allow you to create the given
-                event in the room.
+            initial_state_event_ids:
+                This is used to set explicit state for the insertion event at
+                the start of the historical batch since it's floating with no
+                prev_events to derive state from automatically.
             app_service_requester: The requester of an application service.
 
         Returns:
@@ -290,6 +308,11 @@ class RoomBatchHandler:
         """
         assert app_service_requester.app_service
 
+        # We expect the first event in a historical batch to be an insertion event
+        assert events_to_create[0]["type"] == EventTypes.MSC2716_INSERTION
+        # We expect the last event in a historical batch to be an batch event
+        assert events_to_create[-1]["type"] == EventTypes.MSC2716_BATCH
+
         # Make the historical event chain float off on its own by specifying no
         # prev_events for the first event in the chain which causes the HS to
         # ask for the state at the start of the batch later.
@@ -321,11 +344,16 @@ class RoomBatchHandler:
                     ev["sender"], app_service_requester.app_service
                 ),
                 event_dict,
-                # Only the first event in the chain should be floating.
-                # The rest should hang off each other in a chain.
+                # Only the first event (which is the insertion event) in the
+                # chain should be floating. The rest should hang off each other
+                # in a chain.
                 allow_no_prev_events=index == 0,
                 prev_event_ids=event_dict.get("prev_events"),
-                auth_event_ids=auth_event_ids,
+                # Since the first event (which is the insertion event) in the
+                # chain is floating with no `prev_events`, it can't derive state
+                # from anywhere automatically. So we need to set some state
+                # explicitly.
+                state_event_ids=initial_state_event_ids if index == 0 else None,
                 historical=True,
                 depth=inherited_depth,
             )
@@ -343,10 +371,9 @@ class RoomBatchHandler:
             )
 
             logger.debug(
-                "RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s, auth_event_ids=%s",
+                "RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s",
                 event,
                 prev_event_ids,
-                auth_event_ids,
             )
 
             events_to_persist.append((event, context))
@@ -376,12 +403,12 @@ class RoomBatchHandler:
         room_id: str,
         batch_id_to_connect_to: str,
         inherited_depth: int,
-        auth_event_ids: List[str],
+        initial_state_event_ids: List[str],
         app_service_requester: Requester,
     ) -> Tuple[List[str], str]:
         """
-        Handles creating and persisting all of the historical events as well
-        as insertion and batch meta events to make the batch navigable in the DAG.
+        Handles creating and persisting all of the historical events as well as
+        insertion and batch meta events to make the batch navigable in the DAG.
 
         Args:
             events_to_create: List of historical events to create in JSON
@@ -391,8 +418,13 @@ class RoomBatchHandler:
                 want this batch to connect to.
             inherited_depth: The depth to create the events at (you will
                 probably by calling inherit_depth_from_prev_ids(...)).
-            auth_event_ids: Define which events allow you to create the given
-                event in the room.
+            initial_state_event_ids:
+                This is used to set explicit state for the insertion event at
+                the start of the historical batch since it's floating with no
+                prev_events to derive state from automatically. This should
+                probably be the state from the `prev_event` defined by
+                `/batch_send?prev_event_id=$abc` plus the outcome of
+                `persist_state_events_at_start`
             app_service_requester: The requester of an application service.
 
         Returns:
@@ -438,7 +470,7 @@ class RoomBatchHandler:
             events_to_create=events_to_create,
             room_id=room_id,
             inherited_depth=inherited_depth,
-            auth_event_ids=auth_event_ids,
+            initial_state_event_ids=initial_state_event_ids,
             app_service_requester=app_service_requester,
         )
 
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index a582837cf0..a33fa34aa8 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -272,6 +272,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
         allow_no_prev_events: bool = False,
         prev_event_ids: Optional[List[str]] = None,
         auth_event_ids: Optional[List[str]] = None,
+        state_event_ids: Optional[List[str]] = None,
         txn_id: Optional[str] = None,
         ratelimit: bool = True,
         content: Optional[dict] = None,
@@ -298,6 +299,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
                 The event ids to use as the auth_events for the new event.
                 Should normally be left as None, which will cause them to be calculated
                 based on the room state at the prev_events.
+            state_event_ids:
+                The full state at a given event. This is used particularly by the MSC2716
+                /batch_send endpoint. One use case is the historical `state_events_at_start`;
+                since each is marked as an `outlier`, the `EventContext.for_outlier()` won't
+                have any `state_ids` set and therefore can't derive any state even though the
+                prev_events are set so we need to set them ourself via this argument.
+                This should normally be left as None, which will cause the auth_event_ids
+                to be calculated based on the room state at the prev_events.
 
             txn_id:
             ratelimit:
@@ -353,6 +362,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
             allow_no_prev_events=allow_no_prev_events,
             prev_event_ids=prev_event_ids,
             auth_event_ids=auth_event_ids,
+            state_event_ids=state_event_ids,
             require_consent=require_consent,
             outlier=outlier,
             historical=historical,
@@ -456,6 +466,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
         allow_no_prev_events: bool = False,
         prev_event_ids: Optional[List[str]] = None,
         auth_event_ids: Optional[List[str]] = None,
+        state_event_ids: Optional[List[str]] = None,
     ) -> Tuple[str, int]:
         """Update a user's membership in a room.
 
@@ -487,6 +498,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
                 The event ids to use as the auth_events for the new event.
                 Should normally be left as None, which will cause them to be calculated
                 based on the room state at the prev_events.
+            state_event_ids:
+                The full state at a given event. This is used particularly by the MSC2716
+                /batch_send endpoint. One use case is the historical `state_events_at_start`;
+                since each is marked as an `outlier`, the `EventContext.for_outlier()` won't
+                have any `state_ids` set and therefore can't derive any state even though the
+                prev_events are set so we need to set them ourself via this argument.
+                This should normally be left as None, which will cause the auth_event_ids
+                to be calculated based on the room state at the prev_events.
 
         Returns:
             A tuple of the new event ID and stream ID.
@@ -526,6 +545,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
                     allow_no_prev_events=allow_no_prev_events,
                     prev_event_ids=prev_event_ids,
                     auth_event_ids=auth_event_ids,
+                    state_event_ids=state_event_ids,
                 )
 
         return result
@@ -548,6 +568,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
         allow_no_prev_events: bool = False,
         prev_event_ids: Optional[List[str]] = None,
         auth_event_ids: Optional[List[str]] = None,
+        state_event_ids: Optional[List[str]] = None,
     ) -> Tuple[str, int]:
         """Helper for update_membership.
 
@@ -581,6 +602,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
                 The event ids to use as the auth_events for the new event.
                 Should normally be left as None, which will cause them to be calculated
                 based on the room state at the prev_events.
+            state_event_ids:
+                The full state at a given event. This is used particularly by the MSC2716
+                /batch_send endpoint. One use case is the historical `state_events_at_start`;
+                since each is marked as an `outlier`, the `EventContext.for_outlier()` won't
+                have any `state_ids` set and therefore can't derive any state even though the
+                prev_events are set so we need to set them ourself via this argument.
+                This should normally be left as None, which will cause the auth_event_ids
+                to be calculated based on the room state at the prev_events.
 
         Returns:
             A tuple of the new event ID and stream ID.
@@ -708,6 +737,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
                 allow_no_prev_events=allow_no_prev_events,
                 prev_event_ids=prev_event_ids,
                 auth_event_ids=auth_event_ids,
+                state_event_ids=state_event_ids,
                 content=content,
                 require_consent=require_consent,
                 outlier=outlier,
@@ -932,6 +962,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
             ratelimit=ratelimit,
             prev_event_ids=latest_event_ids,
             auth_event_ids=auth_event_ids,
+            state_event_ids=state_event_ids,
             content=content,
             require_consent=require_consent,
             outlier=outlier,
@@ -1736,8 +1767,8 @@ class RoomMemberMasterHandler(RoomMemberHandler):
             txn_id=txn_id,
             prev_event_ids=prev_event_ids,
             auth_event_ids=auth_event_ids,
+            outlier=True,
         )
-        event.internal_metadata.outlier = True
         event.internal_metadata.out_of_band_membership = True
 
         result_event = await self.event_creation_handler.handle_new_client_event(
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index 3979cbba71..486145f48a 100644
--- a/synapse/handlers/room_summary.py
+++ b/synapse/handlers/room_summary.py
@@ -295,7 +295,7 @@ class RoomSummaryHandler:
             # inaccessible to the requesting user.
             if room_entry:
                 # Add the room (including the stripped m.space.child events).
-                rooms_result.append(room_entry.as_json())
+                rooms_result.append(room_entry.as_json(for_client=True))
 
                 # If this room is not at the max-depth, check if there are any
                 # children to process.
@@ -843,14 +843,25 @@ class _RoomEntry:
     # This may not include all children.
     children_state_events: Sequence[JsonDict] = ()
 
-    def as_json(self) -> JsonDict:
+    def as_json(self, for_client: bool = False) -> JsonDict:
         """
         Returns a JSON dictionary suitable for the room hierarchy endpoint.
 
         It returns the room summary including the stripped m.space.child events
         as a sub-key.
+
+        Args:
+            for_client: If true, any server-server only fields are stripped from
+                the result.
+
         """
         result = dict(self.room)
+
+        # Before returning to the client, remove the allowed_room_ids key, if it
+        # exists.
+        if for_client:
+            result.pop("allowed_room_ids", False)
+
         result["children_state"] = self.children_state_events
         return result
 
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index aa16e417eb..30eddda65f 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -54,6 +54,7 @@ class SearchHandler:
         self.clock = hs.get_clock()
         self.hs = hs
         self._event_serializer = hs.get_event_client_serializer()
+        self._relations_handler = hs.get_relations_handler()
         self.storage = hs.get_storage()
         self.state_store = self.storage.state
         self.auth = hs.get_auth()
@@ -354,7 +355,7 @@ class SearchHandler:
 
         aggregations = None
         if self._msc3666_enabled:
-            aggregations = await self.store.get_bundled_aggregations(
+            aggregations = await self._relations_handler.get_bundled_aggregations(
                 # Generate an iterable of EventBase for all the events that will be
                 # returned, including contextual events.
                 itertools.chain(
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index ff5b5169ca..4f02a060d9 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -132,6 +132,7 @@ class UserAttributes:
     # if `None`, the mapper has not picked a userid, and the user should be prompted to
     # enter one.
     localpart: Optional[str]
+    confirm_localpart: bool = False
     display_name: Optional[str] = None
     emails: Collection[str] = attr.Factory(list)
 
@@ -561,9 +562,10 @@ class SsoHandler:
         # Must provide either attributes or session, not both
         assert (attributes is not None) != (session is not None)
 
-        if (attributes and attributes.localpart is None) or (
-            session and session.chosen_localpart is None
-        ):
+        if (
+            attributes
+            and (attributes.localpart is None or attributes.confirm_localpart is True)
+        ) or (session and session.chosen_localpart is None):
             return b"/_synapse/client/pick_username/account_details"
         elif self._consent_at_registration and not (
             session and session.terms_accepted_version
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 0aa3052fd6..6c569cfb1c 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -28,16 +28,16 @@ from typing import (
 import attr
 from prometheus_client import Counter
 
-from synapse.api.constants import AccountDataTypes, EventTypes, Membership, ReceiptTypes
+from synapse.api.constants import EventTypes, Membership, ReceiptTypes
 from synapse.api.filtering import FilterCollection
 from synapse.api.presence import UserPresenceState
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.events import EventBase
+from synapse.handlers.relations import BundledAggregations
 from synapse.logging.context import current_context
 from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span
 from synapse.push.clientformat import format_push_rules_for_user
 from synapse.storage.databases.main.event_push_actions import NotifCounts
-from synapse.storage.databases.main.relations import BundledAggregations
 from synapse.storage.roommember import MemberSummary
 from synapse.storage.state import StateFilter
 from synapse.types import (
@@ -269,6 +269,7 @@ class SyncHandler:
         self.store = hs.get_datastores().main
         self.notifier = hs.get_notifier()
         self.presence_handler = hs.get_presence_handler()
+        self._relations_handler = hs.get_relations_handler()
         self.event_sources = hs.get_event_sources()
         self.clock = hs.get_clock()
         self.state = hs.get_state_handler()
@@ -638,8 +639,10 @@ class SyncHandler:
         # as clients will have all the necessary information.
         bundled_aggregations = None
         if limited or newly_joined_room:
-            bundled_aggregations = await self.store.get_bundled_aggregations(
-                recents, sync_config.user.to_string()
+            bundled_aggregations = (
+                await self._relations_handler.get_bundled_aggregations(
+                    recents, sync_config.user.to_string()
+                )
             )
 
         return TimelineBatch(
@@ -1601,7 +1604,7 @@ class SyncHandler:
                         return set(), set(), set(), set()
 
         # 3. Work out which rooms need reporting in the sync response.
-        ignored_users = await self._get_ignored_users(user_id)
+        ignored_users = await self.store.ignored_users(user_id)
         if since_token:
             room_changes = await self._get_rooms_changed(
                 sync_result_builder, ignored_users
@@ -1627,7 +1630,6 @@ class SyncHandler:
             logger.debug("Generating room entry for %s", room_entry.room_id)
             await self._generate_room_entry(
                 sync_result_builder,
-                ignored_users,
                 room_entry,
                 ephemeral=ephemeral_by_room.get(room_entry.room_id, []),
                 tags=tags_by_room.get(room_entry.room_id),
@@ -1657,29 +1659,6 @@ class SyncHandler:
             newly_left_users,
         )
 
-    async def _get_ignored_users(self, user_id: str) -> FrozenSet[str]:
-        """Retrieve the users ignored by the given user from their global account_data.
-
-        Returns an empty set if
-        - there is no global account_data entry for ignored_users
-        - there is such an entry, but it's not a JSON object.
-        """
-        # TODO: Can we `SELECT ignored_user_id FROM ignored_users WHERE ignorer_user_id=?;` instead?
-        ignored_account_data = (
-            await self.store.get_global_account_data_by_type_for_user(
-                user_id=user_id, data_type=AccountDataTypes.IGNORED_USER_LIST
-            )
-        )
-
-        # If there is ignored users account data and it matches the proper type,
-        # then use it.
-        ignored_users: FrozenSet[str] = frozenset()
-        if ignored_account_data:
-            ignored_users_data = ignored_account_data.get("ignored_users", {})
-            if isinstance(ignored_users_data, dict):
-                ignored_users = frozenset(ignored_users_data.keys())
-        return ignored_users
-
     async def _have_rooms_changed(
         self, sync_result_builder: "SyncResultBuilder"
     ) -> bool:
@@ -2022,7 +2001,6 @@ class SyncHandler:
     async def _generate_room_entry(
         self,
         sync_result_builder: "SyncResultBuilder",
-        ignored_users: FrozenSet[str],
         room_builder: "RoomSyncResultBuilder",
         ephemeral: List[JsonDict],
         tags: Optional[Dict[str, Dict[str, Any]]],
@@ -2051,7 +2029,6 @@ class SyncHandler:
 
         Args:
             sync_result_builder
-            ignored_users: Set of users ignored by user.
             room_builder
             ephemeral: List of new ephemeral events for room
             tags: List of *all* tags for room, or None if there has been
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 843c68eb0f..6854428b7c 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -160,8 +160,9 @@ class FollowerTypingHandler:
         """Should be called whenever we receive updates for typing stream."""
 
         if self._latest_room_serial > token:
-            # The master has gone backwards. To prevent inconsistent data, just
-            # clear everything.
+            # The typing worker has gone backwards (e.g. it may have restarted).
+            # To prevent inconsistent data, just clear everything.
+            logger.info("Typing handler stream went backwards; resetting")
             self._reset()
 
         # Set the latest serial token to whatever the server gave us.
@@ -486,9 +487,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]):
                 if handler._room_serials[room_id] <= from_key:
                     continue
 
-                if not await service.matches_user_in_member_list(
-                    room_id, self._main_store
-                ):
+                if not await service.is_interested_in_room(room_id, self._main_store):
                     continue
 
                 events.append(self._make_event_for(room_id))
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index d27ed2be6a..048fd4bb82 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -19,8 +19,8 @@ import synapse.metrics
 from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
 from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
 from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.databases.main.user_directory import SearchResult
 from synapse.storage.roommember import ProfileInfo
-from synapse.types import JsonDict
 from synapse.util.metrics import Measure
 
 if TYPE_CHECKING:
@@ -78,7 +78,7 @@ class UserDirectoryHandler(StateDeltasHandler):
 
     async def search_users(
         self, user_id: str, search_term: str, limit: int
-    ) -> JsonDict:
+    ) -> SearchResult:
         """Searches for users in directory
 
         Returns:
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 40bf1e06d6..6b98d865f5 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -120,7 +120,6 @@ class ByteParser(ByteWriteable, Generic[T], abc.ABC):
         """Called when response has finished streaming and the parser should
         return the final result (or error).
         """
-        pass
 
 
 @attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -601,7 +600,6 @@ class MatrixFederationHttpClient:
                             response.code,
                             response_phrase,
                         )
-                        pass
                     else:
                         logger.info(
                             "{%s} [%s] Got response headers: %d %s",
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 09b4125489..31ca841889 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -233,7 +233,6 @@ class HttpServer(Protocol):
             servlet_classname (str): The name of the handler to be used in prometheus
                 and opentracing logs.
         """
-        pass
 
 
 class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index c31c2960ad..88cd8a9e1c 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -29,7 +29,6 @@ import warnings
 from types import TracebackType
 from typing import (
     TYPE_CHECKING,
-    Any,
     Awaitable,
     Callable,
     Optional,
@@ -41,7 +40,7 @@ from typing import (
 )
 
 import attr
-from typing_extensions import Literal
+from typing_extensions import Literal, ParamSpec
 
 from twisted.internet import defer, threads
 from twisted.python.threadpool import ThreadPool
@@ -719,32 +718,33 @@ def nested_logging_context(suffix: str) -> LoggingContext:
     )
 
 
+P = ParamSpec("P")
 R = TypeVar("R")
 
 
 @overload
 def preserve_fn(  # type: ignore[misc]
-    f: Callable[..., Awaitable[R]],
-) -> Callable[..., "defer.Deferred[R]"]:
+    f: Callable[P, Awaitable[R]],
+) -> Callable[P, "defer.Deferred[R]"]:
     # The `type: ignore[misc]` above suppresses
     # "Overloaded function signatures 1 and 2 overlap with incompatible return types"
     ...
 
 
 @overload
-def preserve_fn(f: Callable[..., R]) -> Callable[..., "defer.Deferred[R]"]:
+def preserve_fn(f: Callable[P, R]) -> Callable[P, "defer.Deferred[R]"]:
     ...
 
 
 def preserve_fn(
     f: Union[
-        Callable[..., R],
-        Callable[..., Awaitable[R]],
+        Callable[P, R],
+        Callable[P, Awaitable[R]],
     ]
-) -> Callable[..., "defer.Deferred[R]"]:
+) -> Callable[P, "defer.Deferred[R]"]:
     """Function decorator which wraps the function with run_in_background"""
 
-    def g(*args: Any, **kwargs: Any) -> "defer.Deferred[R]":
+    def g(*args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[R]":
         return run_in_background(f, *args, **kwargs)
 
     return g
@@ -752,7 +752,7 @@ def preserve_fn(
 
 @overload
 def run_in_background(  # type: ignore[misc]
-    f: Callable[..., Awaitable[R]], *args: Any, **kwargs: Any
+    f: Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs
 ) -> "defer.Deferred[R]":
     # The `type: ignore[misc]` above suppresses
     # "Overloaded function signatures 1 and 2 overlap with incompatible return types"
@@ -761,18 +761,22 @@ def run_in_background(  # type: ignore[misc]
 
 @overload
 def run_in_background(
-    f: Callable[..., R], *args: Any, **kwargs: Any
+    f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
 ) -> "defer.Deferred[R]":
     ...
 
 
-def run_in_background(
+def run_in_background(  # type: ignore[misc]
+    # The `type: ignore[misc]` above suppresses
+    # "Overloaded function implementation does not accept all possible arguments of signature 1"
+    # "Overloaded function implementation does not accept all possible arguments of signature 2"
+    # which seems like a bug in mypy.
     f: Union[
-        Callable[..., R],
-        Callable[..., Awaitable[R]],
+        Callable[P, R],
+        Callable[P, Awaitable[R]],
     ],
-    *args: Any,
-    **kwargs: Any,
+    *args: P.args,
+    **kwargs: P.kwargs,
 ) -> "defer.Deferred[R]":
     """Calls a function, ensuring that the current context is restored after
     return from the function, and that the sentinel context is set once the
@@ -872,7 +876,7 @@ def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT:
 
 
 def defer_to_thread(
-    reactor: "ISynapseReactor", f: Callable[..., R], *args: Any, **kwargs: Any
+    reactor: "ISynapseReactor", f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
 ) -> "defer.Deferred[R]":
     """
     Calls the function `f` using a thread from the reactor's default threadpool and
@@ -908,9 +912,9 @@ def defer_to_thread(
 def defer_to_threadpool(
     reactor: "ISynapseReactor",
     threadpool: ThreadPool,
-    f: Callable[..., R],
-    *args: Any,
-    **kwargs: Any,
+    f: Callable[P, R],
+    *args: P.args,
+    **kwargs: P.kwargs,
 ) -> "defer.Deferred[R]":
     """
     A wrapper for twisted.internet.threads.deferToThreadpool, which handles
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 9aeeabfe36..9504b96f8e 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -55,11 +55,15 @@ from synapse.events.spamcheck import (
     USER_MAY_SEND_3PID_INVITE_CALLBACK,
 )
 from synapse.events.third_party_rules import (
+    CHECK_CAN_DEACTIVATE_USER_CALLBACK,
+    CHECK_CAN_SHUTDOWN_ROOM_CALLBACK,
     CHECK_EVENT_ALLOWED_CALLBACK,
     CHECK_THREEPID_CAN_BE_INVITED_CALLBACK,
     CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK,
     ON_CREATE_ROOM_CALLBACK,
     ON_NEW_EVENT_CALLBACK,
+    ON_PROFILE_UPDATE_CALLBACK,
+    ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK,
 )
 from synapse.handlers.account_validity import (
     IS_USER_EXPIRED_CALLBACK,
@@ -108,6 +112,7 @@ from synapse.types import (
     StateMap,
     UserID,
     UserInfo,
+    UserProfile,
     create_requester,
 )
 from synapse.util import Clock
@@ -147,6 +152,7 @@ __all__ = [
     "EventBase",
     "StateMap",
     "ProfileInfo",
+    "UserProfile",
 ]
 
 logger = logging.getLogger(__name__)
@@ -282,6 +288,12 @@ class ModuleApi:
             CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
         ] = None,
         on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None,
+        check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None,
+        check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None,
+        on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None,
+        on_user_deactivation_status_changed: Optional[
+            ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
+        ] = None,
     ) -> None:
         """Registers callbacks for third party event rules capabilities.
 
@@ -293,6 +305,10 @@ class ModuleApi:
             check_threepid_can_be_invited=check_threepid_can_be_invited,
             check_visibility_can_be_modified=check_visibility_can_be_modified,
             on_new_event=on_new_event,
+            check_can_shutdown_room=check_can_shutdown_room,
+            check_can_deactivate_user=check_can_deactivate_user,
+            on_profile_update=on_profile_update,
+            on_user_deactivation_status_changed=on_user_deactivation_status_changed,
         )
 
     def register_presence_router_callbacks(
@@ -596,15 +612,18 @@ class ModuleApi:
         localpart: str,
         displayname: Optional[str] = None,
         emails: Optional[List[str]] = None,
+        admin: bool = False,
     ) -> "defer.Deferred[str]":
         """Registers a new user with given localpart and optional displayname, emails.
 
         Added in Synapse v1.2.0.
+        Changed in Synapse v1.56.0: add 'admin' argument to register the user as admin.
 
         Args:
             localpart: The localpart of the new user.
             displayname: The displayname of the new user.
             emails: Emails to bind to the new user.
+            admin: True if the user should be registered as a server admin.
 
         Raises:
             SynapseError if there is an error performing the registration. Check the
@@ -618,6 +637,7 @@ class ModuleApi:
                 localpart=localpart,
                 default_display_name=displayname,
                 bind_emails=emails or [],
+                admin=admin,
             )
         )
 
diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
index 832eaa34e9..f42f605f23 100644
--- a/synapse/push/baserules.py
+++ b/synapse/push/baserules.py
@@ -169,7 +169,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
                 "kind": "event_match",
                 "key": "content.msgtype",
                 "pattern": "m.notice",
-                "_id": "_suppress_notices",
+                "_cache_key": "_suppress_notices",
             }
         ],
         "actions": ["dont_notify"],
@@ -183,13 +183,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
                 "kind": "event_match",
                 "key": "type",
                 "pattern": "m.room.member",
-                "_id": "_member",
+                "_cache_key": "_member",
             },
             {
                 "kind": "event_match",
                 "key": "content.membership",
                 "pattern": "invite",
-                "_id": "_invite_member",
+                "_cache_key": "_invite_member",
             },
             {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"},
         ],
@@ -212,7 +212,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
                 "kind": "event_match",
                 "key": "type",
                 "pattern": "m.room.member",
-                "_id": "_member",
+                "_cache_key": "_member",
             }
         ],
         "actions": ["dont_notify"],
@@ -237,12 +237,12 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
                 "kind": "event_match",
                 "key": "content.body",
                 "pattern": "@room",
-                "_id": "_roomnotif_content",
+                "_cache_key": "_roomnotif_content",
             },
             {
                 "kind": "sender_notification_permission",
                 "key": "room",
-                "_id": "_roomnotif_pl",
+                "_cache_key": "_roomnotif_pl",
             },
         ],
         "actions": ["notify", {"set_tweak": "highlight", "value": True}],
@@ -254,13 +254,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
                 "kind": "event_match",
                 "key": "type",
                 "pattern": "m.room.tombstone",
-                "_id": "_tombstone",
+                "_cache_key": "_tombstone",
             },
             {
                 "kind": "event_match",
                 "key": "state_key",
                 "pattern": "",
-                "_id": "_tombstone_statekey",
+                "_cache_key": "_tombstone_statekey",
             },
         ],
         "actions": ["notify", {"set_tweak": "highlight", "value": True}],
@@ -272,7 +272,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
                 "kind": "event_match",
                 "key": "type",
                 "pattern": "m.reaction",
-                "_id": "_reaction",
+                "_cache_key": "_reaction",
             }
         ],
         "actions": ["dont_notify"],
@@ -288,7 +288,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
                 "kind": "event_match",
                 "key": "type",
                 "pattern": "m.call.invite",
-                "_id": "_call",
+                "_cache_key": "_call",
             }
         ],
         "actions": [
@@ -302,12 +302,12 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
     {
         "rule_id": "global/underride/.m.rule.room_one_to_one",
         "conditions": [
-            {"kind": "room_member_count", "is": "2", "_id": "member_count"},
+            {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
             {
                 "kind": "event_match",
                 "key": "type",
                 "pattern": "m.room.message",
-                "_id": "_message",
+                "_cache_key": "_message",
             },
         ],
         "actions": [
@@ -321,12 +321,12 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
     {
         "rule_id": "global/underride/.m.rule.encrypted_room_one_to_one",
         "conditions": [
-            {"kind": "room_member_count", "is": "2", "_id": "member_count"},
+            {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
             {
                 "kind": "event_match",
                 "key": "type",
                 "pattern": "m.room.encrypted",
-                "_id": "_encrypted",
+                "_cache_key": "_encrypted",
             },
         ],
         "actions": [
@@ -342,7 +342,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
                 "kind": "event_match",
                 "key": "type",
                 "pattern": "m.room.message",
-                "_id": "_message",
+                "_cache_key": "_message",
             }
         ],
         "actions": ["notify", {"set_tweak": "highlight", "value": False}],
@@ -356,7 +356,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
                 "kind": "event_match",
                 "key": "type",
                 "pattern": "m.room.encrypted",
-                "_id": "_encrypted",
+                "_cache_key": "_encrypted",
             }
         ],
         "actions": ["notify", {"set_tweak": "highlight", "value": False}],
@@ -368,19 +368,19 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
                 "kind": "event_match",
                 "key": "type",
                 "pattern": "im.vector.modular.widgets",
-                "_id": "_type_modular_widgets",
+                "_cache_key": "_type_modular_widgets",
             },
             {
                 "kind": "event_match",
                 "key": "content.type",
                 "pattern": "jitsi",
-                "_id": "_content_type_jitsi",
+                "_cache_key": "_content_type_jitsi",
             },
             {
                 "kind": "event_match",
                 "key": "state_key",
                 "pattern": "*",
-                "_id": "_is_state_event",
+                "_cache_key": "_is_state_event",
             },
         ],
         "actions": ["notify", {"set_tweak": "highlight", "value": False}],
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index fecf86034e..a402a3e403 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -24,6 +24,7 @@ from synapse.event_auth import get_user_power_level
 from synapse.events import EventBase
 from synapse.events.snapshot import EventContext
 from synapse.state import POWER_KEY
+from synapse.storage.databases.main.roommember import EventIdMembership
 from synapse.util.async_helpers import Linearizer
 from synapse.util.caches import CacheMetric, register_cache
 from synapse.util.caches.descriptors import lru_cache
@@ -213,7 +214,7 @@ class BulkPushRuleEvaluator:
         if not event.is_state():
             ignorers = await self.store.ignored_by(event.sender)
         else:
-            ignorers = set()
+            ignorers = frozenset()
 
         for uid, rules in rules_by_user.items():
             if event.sender == uid:
@@ -274,17 +275,17 @@ def _condition_checker(
     cache: Dict[str, bool],
 ) -> bool:
     for cond in conditions:
-        _id = cond.get("_id", None)
-        if _id:
-            res = cache.get(_id, None)
+        _cache_key = cond.get("_cache_key", None)
+        if _cache_key:
+            res = cache.get(_cache_key, None)
             if res is False:
                 return False
             elif res is True:
                 continue
 
         res = evaluator.matches(cond, uid, display_name)
-        if _id:
-            cache[_id] = bool(res)
+        if _cache_key:
+            cache[_cache_key] = bool(res)
 
         if not res:
             return False
@@ -292,7 +293,7 @@ def _condition_checker(
     return True
 
 
-MemberMap = Dict[str, Tuple[str, str]]
+MemberMap = Dict[str, Optional[EventIdMembership]]
 Rule = Dict[str, dict]
 RulesByUser = Dict[str, List[Rule]]
 StateGroup = Union[object, int]
@@ -306,7 +307,7 @@ class RulesForRoomData:
     *only* include data, and not references to e.g. the data stores.
     """
 
-    # event_id -> (user_id, state)
+    # event_id -> EventIdMembership
     member_map: MemberMap = attr.Factory(dict)
     # user_id -> rules
     rules_by_user: RulesByUser = attr.Factory(dict)
@@ -447,11 +448,10 @@ class RulesForRoom:
 
                 res = self.data.member_map.get(event_id, None)
                 if res:
-                    user_id, state = res
-                    if state == Membership.JOIN:
-                        rules = self.data.rules_by_user.get(user_id, None)
+                    if res.membership == Membership.JOIN:
+                        rules = self.data.rules_by_user.get(res.user_id, None)
                         if rules:
-                            ret_rules_by_user[user_id] = rules
+                            ret_rules_by_user[res.user_id] = rules
                     continue
 
                 # If a user has left a room we remove their push rule. If they
@@ -502,24 +502,26 @@ class RulesForRoom:
         """
         sequence = self.data.sequence
 
-        rows = await self.store.get_membership_from_event_ids(member_event_ids.values())
-
-        members = {row["event_id"]: (row["user_id"], row["membership"]) for row in rows}
+        members = await self.store.get_membership_from_event_ids(
+            member_event_ids.values()
+        )
 
-        # If the event is a join event then it will be in current state evnts
+        # If the event is a join event then it will be in current state events
         # map but not in the DB, so we have to explicitly insert it.
         if event.type == EventTypes.Member:
             for event_id in member_event_ids.values():
                 if event_id == event.event_id:
-                    members[event_id] = (event.state_key, event.membership)
+                    members[event_id] = EventIdMembership(
+                        user_id=event.state_key, membership=event.membership
+                    )
 
         if logger.isEnabledFor(logging.DEBUG):
             logger.debug("Found members %r: %r", self.room_id, members.values())
 
         joined_user_ids = {
-            user_id
-            for user_id, membership in members.values()
-            if membership == Membership.JOIN
+            entry.user_id
+            for entry in members.values()
+            if entry and entry.membership == Membership.JOIN
         }
 
         logger.debug("Joined: %r", joined_user_ids)
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index c5708cd888..63b22d50ae 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -40,7 +40,7 @@ def format_push_rules_for_user(
 
         # Remove internal stuff.
         for c in r["conditions"]:
-            c.pop("_id", None)
+            c.pop("_cache_key", None)
 
             pattern_type = c.pop("pattern_type", None)
             if pattern_type == "user_id":
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 8f48a33936..79ae06ce5d 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -74,17 +74,19 @@ REQUIREMENTS = [
     # Note: 21.1.0 broke `/sync`, see #9936
     "attrs>=19.2.0,!=21.1.0",
     "netaddr>=0.7.18",
-    "Jinja2>=2.9",
+    # Jinja2 3.1.0 removes the deprecated jinja2.Markup class, which we rely on.
+    "Jinja2<3.1.0",
     "bleach>=1.4.3",
-    "typing-extensions>=3.7.4",
+    # We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0.
+    "typing-extensions>=3.10.0",
     # We enforce that we have a `cryptography` version that bundles an `openssl`
     # with the latest security patches.
     "cryptography>=3.4.7",
     # ijson 3.1.4 fixes a bug with "." in property names
     "ijson>=3.1.4",
     "matrix-common~=1.1.0",
-    # For runtime introspection of our dependencies
-    "packaging~=21.3",
+    # We need packaging.requirements.Requirement, added in 16.1.
+    "packaging>=16.1",
 ]
 
 CONDITIONAL_REQUIREMENTS = {
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index 2e697c74a6..f1abb98653 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -21,6 +21,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Tuple
 
 from prometheus_client import Counter, Gauge
 
+from twisted.internet.error import ConnectError, DNSLookupError
 from twisted.web.server import Request
 
 from synapse.api.errors import HttpResponseException, SynapseError
@@ -87,6 +88,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
             `_handle_request` must return a Deferred.
         RETRY_ON_TIMEOUT(bool): Whether or not to retry the request when a 504
             is received.
+        RETRY_ON_CONNECT_ERROR (bool): Whether or not to retry the request when
+            a connection error is received.
+        RETRY_ON_CONNECT_ERROR_ATTEMPTS (int): Number of attempts to retry when
+            receiving connection errors, each will backoff exponentially longer.
     """
 
     NAME: str = abc.abstractproperty()  # type: ignore
@@ -94,6 +99,8 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
     METHOD = "POST"
     CACHE = True
     RETRY_ON_TIMEOUT = True
+    RETRY_ON_CONNECT_ERROR = True
+    RETRY_ON_CONNECT_ERROR_ATTEMPTS = 5  # =63s (2^6-1)
 
     def __init__(self, hs: "HomeServer"):
         if self.CACHE:
@@ -236,18 +243,20 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
                     "/".join(url_args),
                 )
 
+                headers: Dict[bytes, List[bytes]] = {}
+                # Add an authorization header, if configured.
+                if replication_secret:
+                    headers[b"Authorization"] = [b"Bearer " + replication_secret]
+                opentracing.inject_header_dict(headers, check_destination=False)
+
                 try:
+                    # Keep track of attempts made so we can bail if we don't manage to
+                    # connect to the target after N tries.
+                    attempts = 0
                     # We keep retrying the same request for timeouts. This is so that we
                     # have a good idea that the request has either succeeded or failed
                     # on the master, and so whether we should clean up or not.
                     while True:
-                        headers: Dict[bytes, List[bytes]] = {}
-                        # Add an authorization header, if configured.
-                        if replication_secret:
-                            headers[b"Authorization"] = [
-                                b"Bearer " + replication_secret
-                            ]
-                        opentracing.inject_header_dict(headers, check_destination=False)
                         try:
                             result = await request_func(uri, data, headers=headers)
                             break
@@ -255,11 +264,27 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
                             if not cls.RETRY_ON_TIMEOUT:
                                 raise
 
-                        logger.warning("%s request timed out; retrying", cls.NAME)
+                            logger.warning("%s request timed out; retrying", cls.NAME)
+
+                            # If we timed out we probably don't need to worry about backing
+                            # off too much, but lets just wait a little anyway.
+                            await clock.sleep(1)
+                        except (ConnectError, DNSLookupError) as e:
+                            if not cls.RETRY_ON_CONNECT_ERROR:
+                                raise
+                            if attempts > cls.RETRY_ON_CONNECT_ERROR_ATTEMPTS:
+                                raise
+
+                            delay = 2 ** attempts
+                            logger.warning(
+                                "%s request connection failed; retrying in %ds: %r",
+                                cls.NAME,
+                                delay,
+                                e,
+                            )
 
-                        # If we timed out we probably don't need to worry about backing
-                        # off too much, but lets just wait a little anyway.
-                        await clock.sleep(1)
+                            await clock.sleep(delay)
+                            attempts += 1
                 except HttpResponseException as e:
                     # We convert to SynapseError as we know that it was a SynapseError
                     # on the main process that we should send to the client. (And
diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py
index b5b84c09ae..14706a0817 100644
--- a/synapse/replication/slave/storage/client_ips.py
+++ b/synapse/replication/slave/storage/client_ips.py
@@ -54,6 +54,6 @@ class SlavedClientIpStore(BaseSlavedStore):
 
         self.client_ip_last_seen.set(key, now)
 
-        self.hs.get_tcp_replication().send_user_ip(
+        self.hs.get_replication_command_handler().send_user_ip(
             user_id, access_token, ip, user_agent, device_id, now
         )
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 1b8479b0b4..deeaaec4e6 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -380,7 +380,7 @@ class FederationSenderHandler:
             # changes.
             hosts = {row.entity for row in rows if not row.entity.startswith("@")}
             for host in hosts:
-                self.federation_sender.send_device_messages(host)
+                self.federation_sender.send_device_messages(host, immediate=False)
 
         elif stream_name == ToDeviceStream.NAME:
             # The to_device stream includes stuff to be pushed to both local
@@ -462,6 +462,8 @@ class FederationSenderHandler:
 
                 # We ACK this token over replication so that the master can drop
                 # its in memory queues
-                self._hs.get_tcp_replication().send_federation_ack(current_position)
+                self._hs.get_replication_command_handler().send_federation_ack(
+                    current_position
+                )
         except Exception:
             logger.exception("Error updating federation stream position")
diff --git a/synapse/replication/tcp/external_cache.py b/synapse/replication/tcp/external_cache.py
index aaf91e5e02..bf7d017968 100644
--- a/synapse/replication/tcp/external_cache.py
+++ b/synapse/replication/tcp/external_cache.py
@@ -21,7 +21,7 @@ from synapse.logging.context import make_deferred_yieldable
 from synapse.util import json_decoder, json_encoder
 
 if TYPE_CHECKING:
-    from txredisapi import RedisProtocol
+    from txredisapi import ConnectionHandler
 
     from synapse.server import HomeServer
 
@@ -63,7 +63,7 @@ class ExternalCache:
     def __init__(self, hs: "HomeServer"):
         if hs.config.redis.redis_enabled:
             self._redis_connection: Optional[
-                "RedisProtocol"
+                "ConnectionHandler"
             ] = hs.get_outbound_redis_connection()
         else:
             self._redis_connection = None
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index 0d2013a3cf..b217c35f99 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -295,9 +295,7 @@ class ReplicationCommandHandler:
             raise Exception("Unrecognised command %s in stream queue", cmd.NAME)
 
     def start_replication(self, hs: "HomeServer") -> None:
-        """Helper method to start a replication connection to the remote server
-        using TCP.
-        """
+        """Helper method to start replication."""
         if hs.config.redis.redis_enabled:
             from synapse.replication.tcp.redis import (
                 RedisDirectTcpReplicationClientFactory,
@@ -711,7 +709,7 @@ class ReplicationCommandHandler:
         self.send_command(RemoteServerUpCommand(server))
 
     def stream_update(self, stream_name: str, token: Optional[int], data: Any) -> None:
-        """Called when a new update is available to stream to clients.
+        """Called when a new update is available to stream to Redis subscribers.
 
         We need to check if the client is interested in the stream or not
         """
diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py
index 3170f7c59b..989c5be032 100644
--- a/synapse/replication/tcp/redis.py
+++ b/synapse/replication/tcp/redis.py
@@ -93,7 +93,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol):
 
     synapse_handler: "ReplicationCommandHandler"
     synapse_stream_name: str
-    synapse_outbound_redis_connection: txredisapi.RedisProtocol
+    synapse_outbound_redis_connection: txredisapi.ConnectionHandler
 
     def __init__(self, *args: Any, **kwargs: Any):
         super().__init__(*args, **kwargs)
@@ -313,7 +313,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory):
     protocol = RedisSubscriber
 
     def __init__(
-        self, hs: "HomeServer", outbound_redis_connection: txredisapi.RedisProtocol
+        self, hs: "HomeServer", outbound_redis_connection: txredisapi.ConnectionHandler
     ):
 
         super().__init__(
@@ -325,7 +325,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory):
             password=hs.config.redis.redis_password,
         )
 
-        self.synapse_handler = hs.get_tcp_replication()
+        self.synapse_handler = hs.get_replication_command_handler()
         self.synapse_stream_name = hs.hostname
 
         self.synapse_outbound_redis_connection = outbound_redis_connection
@@ -353,7 +353,7 @@ def lazyConnection(
     reconnect: bool = True,
     password: Optional[str] = None,
     replyTimeout: int = 30,
-) -> txredisapi.RedisProtocol:
+) -> txredisapi.ConnectionHandler:
     """Creates a connection to Redis that is lazily set up and reconnects if the
     connections is lost.
     """
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index 494e42a2be..c6870df8f9 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -44,7 +44,7 @@ class ReplicationStreamProtocolFactory(ServerFactory):
     """Factory for new replication connections."""
 
     def __init__(self, hs: "HomeServer"):
-        self.command_handler = hs.get_tcp_replication()
+        self.command_handler = hs.get_replication_command_handler()
         self.clock = hs.get_clock()
         self.server_name = hs.config.server.server_name
 
@@ -67,8 +67,8 @@ class ReplicationStreamProtocolFactory(ServerFactory):
 class ReplicationStreamer:
     """Handles replication connections.
 
-    This needs to be poked when new replication data may be available. When new
-    data is available it will propagate to all connected clients.
+    This needs to be poked when new replication data may be available.
+    When new data is available it will propagate to all Redis subscribers.
     """
 
     def __init__(self, hs: "HomeServer"):
@@ -85,7 +85,7 @@ class ReplicationStreamer:
         self.is_looping = False
         self.pending_updates = False
 
-        self.command_handler = hs.get_tcp_replication()
+        self.command_handler = hs.get_replication_command_handler()
 
         # Set of streams to replicate.
         self.streams = self.command_handler.get_streams_to_replicate()
@@ -109,7 +109,7 @@ class ReplicationStreamer:
 
     def on_notifier_poke(self) -> None:
         """Checks if there is actually any new data and sends it to the
-        connections if there are.
+        Redis subscribers if there are.
 
         This should get called each time new data is available, even if it
         is currently being executed, so that nothing gets missed
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 23d631a769..495f2f0285 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -316,7 +316,19 @@ class PresenceFederationStream(Stream):
 class TypingStream(Stream):
     @attr.s(slots=True, frozen=True, auto_attribs=True)
     class TypingStreamRow:
+        """
+        An entry in the typing stream.
+        Describes all the users that are 'typing' right now in one room.
+
+        When a user stops typing, it will be streamed as a new update with that
+        user absent; you can think of the `user_ids` list as overwriting the
+        entire list that was there previously.
+        """
+
+        # The room that this update is for.
         room_id: str
+
+        # All the users that are 'typing' right now in the specified room.
         user_ids: List[str]
 
     NAME = "typing"
diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html
index 00e1dcdbb8..1ba850369a 100644
--- a/synapse/res/templates/sso_auth_account_details.html
+++ b/synapse/res/templates/sso_auth_account_details.html
@@ -130,22 +130,22 @@
   </head>
   <body>
     <header>
-      <h1>Your account is nearly ready</h1>
-      <p>Check your details before creating an account on {{ server_name }}</p>
+      <h1>Create your account</h1>
+      <p>This is required. Continue to create your account on {{ server_name }}. You can't change this later.</p>
     </header>
     <main>
       <form method="post" class="form__input" id="form">
         <div class="username_input" id="username_input">
-          <label for="field-username">Username</label>
+          <label for="field-username">Username (required)</label>
           <div class="prefix">@</div>
-          <input type="text" name="username" id="field-username" autofocus>
+          <input type="text" name="username" id="field-username" value="{{ user_attributes.localpart }}" autofocus>
           <div class="postfix">:{{ server_name }}</div>
         </div>
         <output for="username_input" id="field-username-output"></output>
         <input type="submit" value="Continue" class="primary-button">
         {% if user_attributes.avatar_url or user_attributes.display_name or user_attributes.emails %}
         <section class="idp-pick-details">
-          <h2>{% if idp.idp_icon %}<img src="{{ idp.idp_icon | mxc_to_http(24, 24) }}"/>{% endif %}Information from {{ idp.idp_name }}</h2>
+          <h2>{% if idp.idp_icon %}<img src="{{ idp.idp_icon | mxc_to_http(24, 24) }}"/>{% endif %}Optional data from {{ idp.idp_name }}</h2>
           {% if user_attributes.avatar_url %}
           <label class="idp-detail idp-avatar" for="idp-avatar">
             <div class="check-row">
diff --git a/synapse/res/templates/sso_auth_account_details.js b/synapse/res/templates/sso_auth_account_details.js
index 3c45df9078..82438519a2 100644
--- a/synapse/res/templates/sso_auth_account_details.js
+++ b/synapse/res/templates/sso_auth_account_details.js
@@ -62,7 +62,7 @@ function validateUsername(username) {
     usernameField.parentElement.classList.remove("invalid");
     usernameOutput.classList.remove("error");
     if (!username) {
-        return reportError("Please provide a username");
+        return reportError("This is required. Please provide a username");
     }
     if (username.length > 255) {
         return reportError("Too long, please choose something shorter");
diff --git a/synapse/res/templates/sso_footer.html b/synapse/res/templates/sso_footer.html
index 588a3d508d..b46e0d83fe 100644
--- a/synapse/res/templates/sso_footer.html
+++ b/synapse/res/templates/sso_footer.html
@@ -15,5 +15,5 @@
               </g>
           </g>
       </svg>
-      <p>An open network for secure, decentralized communication.<br>© 2021 The Matrix.org Foundation C.I.C.</p>
+      <p>An open network for secure, decentralized communication.<br>© 2022 The Matrix.org Foundation C.I.C.</p>
 </footer>
\ No newline at end of file
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index cebdeecb81..57c4773edc 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -32,6 +32,7 @@ from synapse.rest.client import (
     knock,
     login as v1_login,
     logout,
+    mutual_rooms,
     notifications,
     openid,
     password_policy,
@@ -49,7 +50,6 @@ from synapse.rest.client import (
     room_keys,
     room_upgrade_rest_servlet,
     sendtodevice,
-    shared_rooms,
     sync,
     tags,
     thirdparty,
@@ -118,7 +118,8 @@ class ClientRestResource(JsonResource):
         thirdparty.register_servlets(hs, client_resource)
         sendtodevice.register_servlets(hs, client_resource)
         user_directory.register_servlets(hs, client_resource)
-        groups.register_servlets(hs, client_resource)
+        if hs.config.experimental.groups_enabled:
+            groups.register_servlets(hs, client_resource)
         room_upgrade_rest_servlet.register_servlets(hs, client_resource)
         room_batch.register_servlets(hs, client_resource)
         capabilities.register_servlets(hs, client_resource)
@@ -131,4 +132,4 @@ class ClientRestResource(JsonResource):
         admin.register_servlets_for_client_rest_resource(hs, client_resource)
 
         # unstable
-        shared_rooms.register_servlets(hs, client_resource)
+        mutual_rooms.register_servlets(hs, client_resource)
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 6de302f813..cb4d55c89d 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -293,7 +293,8 @@ def register_servlets_for_client_rest_resource(
     ResetPasswordRestServlet(hs).register(http_server)
     SearchUsersRestServlet(hs).register(http_server)
     UserRegisterServlet(hs).register(http_server)
-    DeleteGroupAdminRestServlet(hs).register(http_server)
+    if hs.config.experimental.groups_enabled:
+        DeleteGroupAdminRestServlet(hs).register(http_server)
     AccountValidityRenewServlet(hs).register(http_server)
 
     # Load the media repo ones if we're using them. Otherwise load the servlets which
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index f4736a3dad..356d6f74d7 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -67,6 +67,7 @@ class RoomRestV2Servlet(RestServlet):
         self._auth = hs.get_auth()
         self._store = hs.get_datastores().main
         self._pagination_handler = hs.get_pagination_handler()
+        self._third_party_rules = hs.get_third_party_event_rules()
 
     async def on_DELETE(
         self, request: SynapseRequest, room_id: str
@@ -106,6 +107,14 @@ class RoomRestV2Servlet(RestServlet):
                 HTTPStatus.BAD_REQUEST, "%s is not a legal room ID" % (room_id,)
             )
 
+        # Check this here, as otherwise we'll only fail after the background job has been started.
+        if not await self._third_party_rules.check_can_shutdown_room(
+            requester.user.to_string(), room_id
+        ):
+            raise SynapseError(
+                403, "Shutdown of this room is forbidden", Codes.FORBIDDEN
+            )
+
         delete_id = self._pagination_handler.start_shutdown_and_purge_room(
             room_id=room_id,
             new_room_user_id=content.get("new_room_user_id"),
diff --git a/synapse/rest/client/shared_rooms.py b/synapse/rest/client/mutual_rooms.py
index e669fa7890..27bfaf0b29 100644
--- a/synapse/rest/client/shared_rooms.py
+++ b/synapse/rest/client/mutual_rooms.py
@@ -28,13 +28,13 @@ if TYPE_CHECKING:
 logger = logging.getLogger(__name__)
 
 
-class UserSharedRoomsServlet(RestServlet):
+class UserMutualRoomsServlet(RestServlet):
     """
-    GET /uk.half-shot.msc2666/user/shared_rooms/{user_id} HTTP/1.1
+    GET /uk.half-shot.msc2666/user/mutual_rooms/{user_id} HTTP/1.1
     """
 
     PATTERNS = client_patterns(
-        "/uk.half-shot.msc2666/user/shared_rooms/(?P<user_id>[^/]*)",
+        "/uk.half-shot.msc2666/user/mutual_rooms/(?P<user_id>[^/]*)",
         releases=(),  # This is an unstable feature
     )
 
@@ -42,17 +42,19 @@ class UserSharedRoomsServlet(RestServlet):
         super().__init__()
         self.auth = hs.get_auth()
         self.store = hs.get_datastores().main
-        self.user_directory_active = hs.config.server.update_user_directory
+        self.user_directory_search_enabled = (
+            hs.config.userdirectory.user_directory_search_enabled
+        )
 
     async def on_GET(
         self, request: SynapseRequest, user_id: str
     ) -> Tuple[int, JsonDict]:
 
-        if not self.user_directory_active:
+        if not self.user_directory_search_enabled:
             raise SynapseError(
                 code=400,
-                msg="The user directory is disabled on this server. Cannot determine shared rooms.",
-                errcode=Codes.FORBIDDEN,
+                msg="User directory searching is disabled. Cannot determine shared rooms.",
+                errcode=Codes.UNKNOWN,
             )
 
         UserID.from_string(user_id)
@@ -64,7 +66,8 @@ class UserSharedRoomsServlet(RestServlet):
                 msg="You cannot request a list of shared rooms with yourself",
                 errcode=Codes.FORBIDDEN,
             )
-        rooms = await self.store.get_shared_rooms_for_users(
+
+        rooms = await self.store.get_mutual_rooms_for_users(
             requester.user.to_string(), user_id
         )
 
@@ -72,4 +75,4 @@ class UserSharedRoomsServlet(RestServlet):
 
 
 def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
-    UserSharedRoomsServlet(hs).register(http_server)
+    UserMutualRoomsServlet(hs).register(http_server)
diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py
index 20377a9ac6..ff040de6b8 100644
--- a/synapse/rest/client/notifications.py
+++ b/synapse/rest/client/notifications.py
@@ -16,7 +16,10 @@ import logging
 from typing import TYPE_CHECKING, Tuple
 
 from synapse.api.constants import ReceiptTypes
-from synapse.events.utils import format_event_for_client_v2_without_room_id
+from synapse.events.utils import (
+    SerializeEventConfig,
+    format_event_for_client_v2_without_room_id,
+)
 from synapse.http.server import HttpServer
 from synapse.http.servlet import RestServlet, parse_integer, parse_string
 from synapse.http.site import SynapseRequest
@@ -75,7 +78,9 @@ class NotificationsServlet(RestServlet):
                     self._event_serializer.serialize_event(
                         notif_events[pa.event_id],
                         self.clock.time_msec(),
-                        event_format=format_event_for_client_v2_without_room_id,
+                        config=SerializeEventConfig(
+                            event_format=format_event_for_client_v2_without_room_id
+                        ),
                     )
                 ),
             }
diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py
index 487ea38b55..c16078b187 100644
--- a/synapse/rest/client/relations.py
+++ b/synapse/rest/client/relations.py
@@ -27,50 +27,15 @@ from synapse.http.server import HttpServer
 from synapse.http.servlet import RestServlet, parse_integer, parse_string
 from synapse.http.site import SynapseRequest
 from synapse.rest.client._base import client_patterns
-from synapse.storage.relations import (
-    AggregationPaginationToken,
-    PaginationChunk,
-    RelationPaginationToken,
-)
-from synapse.types import JsonDict, RoomStreamToken, StreamToken
+from synapse.storage.relations import AggregationPaginationToken
+from synapse.types import JsonDict, StreamToken
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
-    from synapse.storage.databases.main import DataStore
 
 logger = logging.getLogger(__name__)
 
 
-async def _parse_token(
-    store: "DataStore", token: Optional[str]
-) -> Optional[StreamToken]:
-    """
-    For backwards compatibility support RelationPaginationToken, but new pagination
-    tokens are generated as full StreamTokens, to be compatible with /sync and /messages.
-    """
-    if not token:
-        return None
-    # Luckily the format for StreamToken and RelationPaginationToken differ enough
-    # that they can easily be separated. An "_" appears in the serialization of
-    # RoomStreamToken (as part of StreamToken), but RelationPaginationToken uses
-    # "-" only for separators.
-    if "_" in token:
-        return await StreamToken.from_string(store, token)
-    else:
-        relation_token = RelationPaginationToken.from_string(token)
-        return StreamToken(
-            room_key=RoomStreamToken(relation_token.topological, relation_token.stream),
-            presence_key=0,
-            typing_key=0,
-            receipt_key=0,
-            account_data_key=0,
-            push_rules_key=0,
-            to_device_key=0,
-            device_list_key=0,
-            groups_key=0,
-        )
-
-
 class RelationPaginationServlet(RestServlet):
     """API to paginate relations on an event by topological ordering, optionally
     filtered by relation type and event type.
@@ -86,9 +51,7 @@ class RelationPaginationServlet(RestServlet):
         super().__init__()
         self.auth = hs.get_auth()
         self.store = hs.get_datastores().main
-        self.clock = hs.get_clock()
-        self._event_serializer = hs.get_event_client_serializer()
-        self.event_handler = hs.get_event_handler()
+        self._relations_handler = hs.get_relations_handler()
 
     async def on_GET(
         self,
@@ -100,16 +63,6 @@ class RelationPaginationServlet(RestServlet):
     ) -> Tuple[int, JsonDict]:
         requester = await self.auth.get_user_by_req(request, allow_guest=True)
 
-        await self.auth.check_user_in_room_or_world_readable(
-            room_id, requester.user.to_string(), allow_departed_users=True
-        )
-
-        # This gets the original event and checks that a) the event exists and
-        # b) the user is allowed to view it.
-        event = await self.event_handler.get_event(requester.user, room_id, parent_id)
-        if event is None:
-            raise SynapseError(404, "Unknown parent event.")
-
         limit = parse_integer(request, "limit", default=5)
         direction = parse_string(
             request, "org.matrix.msc3715.dir", default="b", allowed_values=["f", "b"]
@@ -117,49 +70,27 @@ class RelationPaginationServlet(RestServlet):
         from_token_str = parse_string(request, "from")
         to_token_str = parse_string(request, "to")
 
-        if event.internal_metadata.is_redacted():
-            # If the event is redacted, return an empty list of relations
-            pagination_chunk = PaginationChunk(chunk=[])
-        else:
-            # Return the relations
-            from_token = await _parse_token(self.store, from_token_str)
-            to_token = await _parse_token(self.store, to_token_str)
-
-            pagination_chunk = await self.store.get_relations_for_event(
-                event_id=parent_id,
-                room_id=room_id,
-                relation_type=relation_type,
-                event_type=event_type,
-                limit=limit,
-                direction=direction,
-                from_token=from_token,
-                to_token=to_token,
-            )
+        # Return the relations
+        from_token = None
+        if from_token_str:
+            from_token = await StreamToken.from_string(self.store, from_token_str)
+        to_token = None
+        if to_token_str:
+            to_token = await StreamToken.from_string(self.store, to_token_str)
 
-        events = await self.store.get_events_as_list(
-            [c["event_id"] for c in pagination_chunk.chunk]
-        )
-
-        now = self.clock.time_msec()
-        # Do not bundle aggregations when retrieving the original event because
-        # we want the content before relations are applied to it.
-        original_event = self._event_serializer.serialize_event(
-            event, now, bundle_aggregations=None
-        )
-        # The relations returned for the requested event do include their
-        # bundled aggregations.
-        aggregations = await self.store.get_bundled_aggregations(
-            events, requester.user.to_string()
-        )
-        serialized_events = self._event_serializer.serialize_events(
-            events, now, bundle_aggregations=aggregations
+        result = await self._relations_handler.get_relations(
+            requester=requester,
+            event_id=parent_id,
+            room_id=room_id,
+            relation_type=relation_type,
+            event_type=event_type,
+            limit=limit,
+            direction=direction,
+            from_token=from_token,
+            to_token=to_token,
         )
 
-        return_value = await pagination_chunk.to_dict(self.store)
-        return_value["chunk"] = serialized_events
-        return_value["original_event"] = original_event
-
-        return 200, return_value
+        return 200, result
 
 
 class RelationAggregationPaginationServlet(RestServlet):
@@ -224,27 +155,23 @@ class RelationAggregationPaginationServlet(RestServlet):
         from_token_str = parse_string(request, "from")
         to_token_str = parse_string(request, "to")
 
-        if event.internal_metadata.is_redacted():
-            # If the event is redacted, return an empty list of relations
-            pagination_chunk = PaginationChunk(chunk=[])
-        else:
-            # Return the relations
-            from_token = None
-            if from_token_str:
-                from_token = AggregationPaginationToken.from_string(from_token_str)
-
-            to_token = None
-            if to_token_str:
-                to_token = AggregationPaginationToken.from_string(to_token_str)
-
-            pagination_chunk = await self.store.get_aggregation_groups_for_event(
-                event_id=parent_id,
-                room_id=room_id,
-                event_type=event_type,
-                limit=limit,
-                from_token=from_token,
-                to_token=to_token,
-            )
+        # Return the relations
+        from_token = None
+        if from_token_str:
+            from_token = AggregationPaginationToken.from_string(from_token_str)
+
+        to_token = None
+        if to_token_str:
+            to_token = AggregationPaginationToken.from_string(to_token_str)
+
+        pagination_chunk = await self.store.get_aggregation_groups_for_event(
+            event_id=parent_id,
+            room_id=room_id,
+            event_type=event_type,
+            limit=limit,
+            from_token=from_token,
+            to_token=to_token,
+        )
 
         return 200, await pagination_chunk.to_dict(self.store)
 
@@ -283,9 +210,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
         super().__init__()
         self.auth = hs.get_auth()
         self.store = hs.get_datastores().main
-        self.clock = hs.get_clock()
-        self._event_serializer = hs.get_event_client_serializer()
-        self.event_handler = hs.get_event_handler()
+        self._relations_handler = hs.get_relations_handler()
 
     async def on_GET(
         self,
@@ -298,18 +223,6 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
     ) -> Tuple[int, JsonDict]:
         requester = await self.auth.get_user_by_req(request, allow_guest=True)
 
-        await self.auth.check_user_in_room_or_world_readable(
-            room_id,
-            requester.user.to_string(),
-            allow_departed_users=True,
-        )
-
-        # This checks that a) the event exists and b) the user is allowed to
-        # view it.
-        event = await self.event_handler.get_event(requester.user, room_id, parent_id)
-        if event is None:
-            raise SynapseError(404, "Unknown parent event.")
-
         if relation_type != RelationTypes.ANNOTATION:
             raise SynapseError(400, "Relation type must be 'annotation'")
 
@@ -317,10 +230,15 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
         from_token_str = parse_string(request, "from")
         to_token_str = parse_string(request, "to")
 
-        from_token = await _parse_token(self.store, from_token_str)
-        to_token = await _parse_token(self.store, to_token_str)
+        from_token = None
+        if from_token_str:
+            from_token = await StreamToken.from_string(self.store, from_token_str)
+        to_token = None
+        if to_token_str:
+            to_token = await StreamToken.from_string(self.store, to_token_str)
 
-        result = await self.store.get_relations_for_event(
+        result = await self._relations_handler.get_relations(
+            requester=requester,
             event_id=parent_id,
             room_id=room_id,
             relation_type=relation_type,
@@ -331,17 +249,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
             to_token=to_token,
         )
 
-        events = await self.store.get_events_as_list(
-            [c["event_id"] for c in result.chunk]
-        )
-
-        now = self.clock.time_msec()
-        serialized_events = self._event_serializer.serialize_events(events, now)
-
-        return_value = await result.to_dict(self.store)
-        return_value["chunk"] = serialized_events
-
-        return 200, return_value
+        return 200, result
 
 
 def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 8a06ab8c5f..47e152c8cc 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -645,6 +645,7 @@ class RoomEventServlet(RestServlet):
         self._store = hs.get_datastores().main
         self.event_handler = hs.get_event_handler()
         self._event_serializer = hs.get_event_client_serializer()
+        self._relations_handler = hs.get_relations_handler()
         self.auth = hs.get_auth()
 
     async def on_GET(
@@ -663,7 +664,7 @@ class RoomEventServlet(RestServlet):
 
         if event:
             # Ensure there are bundled aggregations available.
-            aggregations = await self._store.get_bundled_aggregations(
+            aggregations = await self._relations_handler.get_bundled_aggregations(
                 [event], requester.user.to_string()
             )
 
diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py
index 0048973e59..0780485322 100644
--- a/synapse/rest/client/room_batch.py
+++ b/synapse/rest/client/room_batch.py
@@ -124,14 +124,14 @@ class RoomBatchSendEventRestServlet(RestServlet):
                 )
 
         # For the event we are inserting next to (`prev_event_ids_from_query`),
-        # find the most recent auth events (derived from state events) that
-        # allowed that message to be sent. We will use that as a base
-        # to auth our historical messages against.
-        auth_event_ids = await self.room_batch_handler.get_most_recent_auth_event_ids_from_event_id_list(
+        # find the most recent state events that allowed that message to be
+        # sent. We will use that as a base to auth our historical messages
+        # against.
+        state_event_ids = await self.room_batch_handler.get_most_recent_full_state_ids_from_event_id_list(
             prev_event_ids_from_query
         )
 
-        if not auth_event_ids:
+        if not state_event_ids:
             raise SynapseError(
                 HTTPStatus.BAD_REQUEST,
                 "No auth events found for given prev_event query parameter. The prev_event=%s probably does not exist."
@@ -148,13 +148,13 @@ class RoomBatchSendEventRestServlet(RestServlet):
                 await self.room_batch_handler.persist_state_events_at_start(
                     state_events_at_start=body["state_events_at_start"],
                     room_id=room_id,
-                    initial_auth_event_ids=auth_event_ids,
+                    initial_state_event_ids=state_event_ids,
                     app_service_requester=requester,
                 )
             )
             # Update our ongoing auth event ID list with all of the new state we
             # just created
-            auth_event_ids.extend(state_event_ids_at_start)
+            state_event_ids.extend(state_event_ids_at_start)
 
         inherited_depth = await self.room_batch_handler.inherit_depth_from_prev_ids(
             prev_event_ids_from_query
@@ -196,7 +196,12 @@ class RoomBatchSendEventRestServlet(RestServlet):
                 ),
                 base_insertion_event_dict,
                 prev_event_ids=base_insertion_event_dict.get("prev_events"),
-                auth_event_ids=auth_event_ids,
+                # Also set the explicit state here because we want to resolve
+                # any `state_events_at_start` here too. It's not strictly
+                # necessary to accomplish anything but if someone asks for the
+                # state at this point, we probably want to show them the
+                # historical state that was part of this batch.
+                state_event_ids=state_event_ids,
                 historical=True,
                 depth=inherited_depth,
             )
@@ -212,7 +217,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
             room_id=room_id,
             batch_id_to_connect_to=batch_id_to_connect_to,
             inherited_depth=inherited_depth,
-            auth_event_ids=auth_event_ids,
+            initial_state_event_ids=state_event_ids,
             app_service_requester=requester,
         )
 
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index f3018ff690..53c385a86c 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -14,24 +14,14 @@
 import itertools
 import logging
 from collections import defaultdict
-from typing import (
-    TYPE_CHECKING,
-    Any,
-    Callable,
-    Dict,
-    Iterable,
-    List,
-    Optional,
-    Tuple,
-    Union,
-)
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
 
 from synapse.api.constants import Membership, PresenceState
 from synapse.api.errors import Codes, StoreError, SynapseError
 from synapse.api.filtering import FilterCollection
 from synapse.api.presence import UserPresenceState
-from synapse.events import EventBase
 from synapse.events.utils import (
+    SerializeEventConfig,
     format_event_for_client_v2_without_room_id,
     format_event_raw,
 )
@@ -48,7 +38,6 @@ from synapse.http.server import HttpServer
 from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
 from synapse.http.site import SynapseRequest
 from synapse.logging.opentracing import trace
-from synapse.storage.databases.main.relations import BundledAggregations
 from synapse.types import JsonDict, StreamToken
 from synapse.util import json_decoder
 
@@ -239,28 +228,31 @@ class SyncRestServlet(RestServlet):
         else:
             raise Exception("Unknown event format %s" % (filter.event_format,))
 
+        serialize_options = SerializeEventConfig(
+            event_format=event_formatter,
+            token_id=access_token_id,
+            only_event_fields=filter.event_fields,
+        )
+        stripped_serialize_options = SerializeEventConfig(
+            event_format=event_formatter,
+            token_id=access_token_id,
+            include_stripped_room_state=True,
+        )
+
         joined = await self.encode_joined(
-            sync_result.joined,
-            time_now,
-            access_token_id,
-            filter.event_fields,
-            event_formatter,
+            sync_result.joined, time_now, serialize_options
         )
 
         invited = await self.encode_invited(
-            sync_result.invited, time_now, access_token_id, event_formatter
+            sync_result.invited, time_now, stripped_serialize_options
         )
 
         knocked = await self.encode_knocked(
-            sync_result.knocked, time_now, access_token_id, event_formatter
+            sync_result.knocked, time_now, stripped_serialize_options
         )
 
         archived = await self.encode_archived(
-            sync_result.archived,
-            time_now,
-            access_token_id,
-            filter.event_fields,
-            event_formatter,
+            sync_result.archived, time_now, serialize_options
         )
 
         logger.debug("building sync response dict")
@@ -339,9 +331,7 @@ class SyncRestServlet(RestServlet):
         self,
         rooms: List[JoinedSyncResult],
         time_now: int,
-        token_id: Optional[int],
-        event_fields: List[str],
-        event_formatter: Callable[[JsonDict], JsonDict],
+        serialize_options: SerializeEventConfig,
     ) -> JsonDict:
         """
         Encode the joined rooms in a sync result
@@ -349,24 +339,14 @@ class SyncRestServlet(RestServlet):
         Args:
             rooms: list of sync results for rooms this user is joined to
             time_now: current time - used as a baseline for age calculations
-            token_id: ID of the user's auth token - used for namespacing
-                of transaction IDs
-            event_fields: List of event fields to include. If empty,
-                all fields will be returned.
-            event_formatter: function to convert from federation format
-                to client format
+            serialize_options: Event serializer options
         Returns:
             The joined rooms list, in our response format
         """
         joined = {}
         for room in rooms:
             joined[room.room_id] = await self.encode_room(
-                room,
-                time_now,
-                token_id,
-                joined=True,
-                only_fields=event_fields,
-                event_formatter=event_formatter,
+                room, time_now, joined=True, serialize_options=serialize_options
             )
 
         return joined
@@ -376,8 +356,7 @@ class SyncRestServlet(RestServlet):
         self,
         rooms: List[InvitedSyncResult],
         time_now: int,
-        token_id: Optional[int],
-        event_formatter: Callable[[JsonDict], JsonDict],
+        serialize_options: SerializeEventConfig,
     ) -> JsonDict:
         """
         Encode the invited rooms in a sync result
@@ -385,10 +364,7 @@ class SyncRestServlet(RestServlet):
         Args:
             rooms: list of sync results for rooms this user is invited to
             time_now: current time - used as a baseline for age calculations
-            token_id: ID of the user's auth token - used for namespacing
-                of transaction IDs
-            event_formatter: function to convert from federation format
-                to client format
+            serialize_options: Event serializer options
 
         Returns:
             The invited rooms list, in our response format
@@ -396,11 +372,7 @@ class SyncRestServlet(RestServlet):
         invited = {}
         for room in rooms:
             invite = self._event_serializer.serialize_event(
-                room.invite,
-                time_now,
-                token_id=token_id,
-                event_format=event_formatter,
-                include_stripped_room_state=True,
+                room.invite, time_now, config=serialize_options
             )
             unsigned = dict(invite.get("unsigned", {}))
             invite["unsigned"] = unsigned
@@ -415,8 +387,7 @@ class SyncRestServlet(RestServlet):
         self,
         rooms: List[KnockedSyncResult],
         time_now: int,
-        token_id: Optional[int],
-        event_formatter: Callable[[Dict], Dict],
+        serialize_options: SerializeEventConfig,
     ) -> Dict[str, Dict[str, Any]]:
         """
         Encode the rooms we've knocked on in a sync result.
@@ -424,8 +395,7 @@ class SyncRestServlet(RestServlet):
         Args:
             rooms: list of sync results for rooms this user is knocking on
             time_now: current time - used as a baseline for age calculations
-            token_id: ID of the user's auth token - used for namespacing of transaction IDs
-            event_formatter: function to convert from federation format to client format
+            serialize_options: Event serializer options
 
         Returns:
             The list of rooms the user has knocked on, in our response format.
@@ -433,11 +403,7 @@ class SyncRestServlet(RestServlet):
         knocked = {}
         for room in rooms:
             knock = self._event_serializer.serialize_event(
-                room.knock,
-                time_now,
-                token_id=token_id,
-                event_format=event_formatter,
-                include_stripped_room_state=True,
+                room.knock, time_now, config=serialize_options
             )
 
             # Extract the `unsigned` key from the knock event.
@@ -470,9 +436,7 @@ class SyncRestServlet(RestServlet):
         self,
         rooms: List[ArchivedSyncResult],
         time_now: int,
-        token_id: Optional[int],
-        event_fields: List[str],
-        event_formatter: Callable[[JsonDict], JsonDict],
+        serialize_options: SerializeEventConfig,
     ) -> JsonDict:
         """
         Encode the archived rooms in a sync result
@@ -480,23 +444,14 @@ class SyncRestServlet(RestServlet):
         Args:
             rooms: list of sync results for rooms this user is joined to
             time_now: current time - used as a baseline for age calculations
-            token_id: ID of the user's auth token - used for namespacing
-                of transaction IDs
-            event_fields: List of event fields to include. If empty,
-                all fields will be returned.
-            event_formatter: function to convert from federation format to client format
+            serialize_options: Event serializer options
         Returns:
             The archived rooms list, in our response format
         """
         joined = {}
         for room in rooms:
             joined[room.room_id] = await self.encode_room(
-                room,
-                time_now,
-                token_id,
-                joined=False,
-                only_fields=event_fields,
-                event_formatter=event_formatter,
+                room, time_now, joined=False, serialize_options=serialize_options
             )
 
         return joined
@@ -505,10 +460,8 @@ class SyncRestServlet(RestServlet):
         self,
         room: Union[JoinedSyncResult, ArchivedSyncResult],
         time_now: int,
-        token_id: Optional[int],
         joined: bool,
-        only_fields: Optional[List[str]],
-        event_formatter: Callable[[JsonDict], JsonDict],
+        serialize_options: SerializeEventConfig,
     ) -> JsonDict:
         """
         Args:
@@ -524,20 +477,6 @@ class SyncRestServlet(RestServlet):
         Returns:
             The room, encoded in our response format
         """
-
-        def serialize(
-            events: Iterable[EventBase],
-            aggregations: Optional[Dict[str, BundledAggregations]] = None,
-        ) -> List[JsonDict]:
-            return self._event_serializer.serialize_events(
-                events,
-                time_now=time_now,
-                bundle_aggregations=aggregations,
-                token_id=token_id,
-                event_format=event_formatter,
-                only_event_fields=only_fields,
-            )
-
         state_dict = room.state
         timeline_events = room.timeline.events
 
@@ -554,9 +493,14 @@ class SyncRestServlet(RestServlet):
                     event.room_id,
                 )
 
-        serialized_state = serialize(state_events)
-        serialized_timeline = serialize(
-            timeline_events, room.timeline.bundled_aggregations
+        serialized_state = self._event_serializer.serialize_events(
+            state_events, time_now, config=serialize_options
+        )
+        serialized_timeline = self._event_serializer.serialize_events(
+            timeline_events,
+            time_now,
+            config=serialize_options,
+            bundle_aggregations=room.timeline.bundled_aggregations,
         )
 
         account_data = room.account_data
diff --git a/synapse/rest/client/user_directory.py b/synapse/rest/client/user_directory.py
index a47d9bd01d..116c982ce6 100644
--- a/synapse/rest/client/user_directory.py
+++ b/synapse/rest/client/user_directory.py
@@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError
 from synapse.http.server import HttpServer
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.http.site import SynapseRequest
-from synapse.types import JsonDict
+from synapse.types import JsonMapping
 
 from ._base import client_patterns
 
@@ -38,7 +38,7 @@ class UserDirectorySearchRestServlet(RestServlet):
         self.auth = hs.get_auth()
         self.user_directory_handler = hs.get_user_directory_handler()
 
-    async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+    async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonMapping]:
         """Searches for users in directory
 
         Returns:
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 2e5d0e4e22..9a65aa4843 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -101,6 +101,7 @@ class VersionsRestServlet(RestServlet):
                     "org.matrix.msc3030": self.config.experimental.msc3030_enabled,
                     # Adds support for thread relations, per MSC3440.
                     "org.matrix.msc3440": self.config.experimental.msc3440_enabled,
+                    "org.matrix.msc3440.stable": True,  # TODO: remove when "v1.3" is added above
                 },
             },
         )
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index 9b40fd8a6c..c35d42fab8 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -298,7 +298,6 @@ class Responder:
         Returns:
             Resolves once the response has finished being written
         """
-        pass
 
     def __enter__(self) -> None:
         pass
diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py
index 872a9e72e8..4cc9c66fbe 100644
--- a/synapse/rest/media/v1/preview_html.py
+++ b/synapse/rest/media/v1/preview_html.py
@@ -16,7 +16,6 @@ import itertools
 import logging
 import re
 from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Set, Union
-from urllib import parse as urlparse
 
 if TYPE_CHECKING:
     from lxml import etree
@@ -144,9 +143,7 @@ def decode_body(
     return etree.fromstring(body, parser)
 
 
-def parse_html_to_open_graph(
-    tree: "etree.Element", media_uri: str
-) -> Dict[str, Optional[str]]:
+def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]:
     """
     Parse the HTML document into an Open Graph response.
 
@@ -155,7 +152,6 @@ def parse_html_to_open_graph(
 
     Args:
         tree: The parsed HTML document.
-        media_url: The URI used to download the body.
 
     Returns:
         The Open Graph response as a dictionary.
@@ -209,7 +205,7 @@ def parse_html_to_open_graph(
             "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image']/@content"
         )
         if meta_image:
-            og["og:image"] = rebase_url(meta_image[0], media_uri)
+            og["og:image"] = meta_image[0]
         else:
             # TODO: consider inlined CSS styles as well as width & height attribs
             images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
@@ -320,37 +316,6 @@ def _iterate_over_text(
             )
 
 
-def rebase_url(url: str, base: str) -> str:
-    """
-    Resolves a potentially relative `url` against an absolute `base` URL.
-
-    For example:
-
-        >>> rebase_url("subpage", "https://example.com/foo/")
-        'https://example.com/foo/subpage'
-        >>> rebase_url("sibling", "https://example.com/foo")
-        'https://example.com/sibling'
-        >>> rebase_url("/bar", "https://example.com/foo/")
-        'https://example.com/bar'
-        >>> rebase_url("https://alice.com/a/", "https://example.com/foo/")
-        'https://alice.com/a'
-    """
-    base_parts = urlparse.urlparse(base)
-    # Convert the parsed URL to a list for (potential) modification.
-    url_parts = list(urlparse.urlparse(url))
-    # Add a scheme, if one does not exist.
-    if not url_parts[0]:
-        url_parts[0] = base_parts.scheme or "http"
-    # Fix up the hostname, if this is not a data URL.
-    if url_parts[0] != "data" and not url_parts[1]:
-        url_parts[1] = base_parts.netloc
-        # If the path does not start with a /, nest it under the base path's last
-        # directory.
-        if not url_parts[2].startswith("/"):
-            url_parts[2] = re.sub(r"/[^/]+$", "/", base_parts.path) + url_parts[2]
-    return urlparse.urlunparse(url_parts)
-
-
 def summarize_paragraphs(
     text_nodes: Iterable[str], min_size: int = 200, max_size: int = 500
 ) -> Optional[str]:
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 14ea88b240..d47af8ead6 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -22,7 +22,7 @@ import shutil
 import sys
 import traceback
 from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple
-from urllib import parse as urlparse
+from urllib.parse import urljoin, urlparse, urlsplit
 from urllib.request import urlopen
 
 import attr
@@ -44,11 +44,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.rest.media.v1._base import get_filename_from_headers
 from synapse.rest.media.v1.media_storage import MediaStorage
 from synapse.rest.media.v1.oembed import OEmbedProvider
-from synapse.rest.media.v1.preview_html import (
-    decode_body,
-    parse_html_to_open_graph,
-    rebase_url,
-)
+from synapse.rest.media.v1.preview_html import decode_body, parse_html_to_open_graph
 from synapse.types import JsonDict, UserID
 from synapse.util import json_encoder
 from synapse.util.async_helpers import ObservableDeferred
@@ -187,7 +183,7 @@ class PreviewUrlResource(DirectServeJsonResource):
             ts = self.clock.time_msec()
 
         # XXX: we could move this into _do_preview if we wanted.
-        url_tuple = urlparse.urlsplit(url)
+        url_tuple = urlsplit(url)
         for entry in self.url_preview_url_blacklist:
             match = True
             for attrib in entry:
@@ -322,7 +318,7 @@ class PreviewUrlResource(DirectServeJsonResource):
 
                 # Parse Open Graph information from the HTML in case the oEmbed
                 # response failed or is incomplete.
-                og_from_html = parse_html_to_open_graph(tree, media_info.uri)
+                og_from_html = parse_html_to_open_graph(tree)
 
                 # Compile the Open Graph response by using the scraped
                 # information from the HTML and overlaying any information
@@ -588,12 +584,17 @@ class PreviewUrlResource(DirectServeJsonResource):
         if "og:image" not in og or not og["og:image"]:
             return
 
+        # The image URL from the HTML might be relative to the previewed page,
+        # convert it to an URL which can be requested directly.
+        image_url = og["og:image"]
+        url_parts = urlparse(image_url)
+        if url_parts.scheme != "data":
+            image_url = urljoin(media_info.uri, image_url)
+
         # FIXME: it might be cleaner to use the same flow as the main /preview_url
         # request itself and benefit from the same caching etc.  But for now we
         # just rely on the caching on the master request to speed things up.
-        image_info = await self._handle_url(
-            rebase_url(og["og:image"], media_info.uri), user, allow_data_urls=True
-        )
+        image_info = await self._handle_url(image_url, user, allow_data_urls=True)
 
         if _is_media(image_info.media_type):
             # TODO: make sure we don't choke on white-on-transparent images
diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py
index 18bf977d3d..1c9b71d69c 100644
--- a/synapse/rest/media/v1/storage_provider.py
+++ b/synapse/rest/media/v1/storage_provider.py
@@ -16,7 +16,7 @@ import abc
 import logging
 import os
 import shutil
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING, Callable, Optional
 
 from synapse.config._base import Config
 from synapse.logging.context import defer_to_thread, run_in_background
@@ -150,8 +150,13 @@ class FileStorageProviderBackend(StorageProvider):
         dirname = os.path.dirname(backup_fname)
         os.makedirs(dirname, exist_ok=True)
 
+        # mypy needs help inferring the type of the second parameter, which is generic
+        shutil_copyfile: Callable[[str, str], str] = shutil.copyfile
         await defer_to_thread(
-            self.hs.get_reactor(), shutil.copyfile, primary_fname, backup_fname
+            self.hs.get_reactor(),
+            shutil_copyfile,
+            primary_fname,
+            backup_fname,
         )
 
     async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
diff --git a/synapse/rest/synapse/client/pick_username.py b/synapse/rest/synapse/client/pick_username.py
index 28ae083497..6338fbaaa9 100644
--- a/synapse/rest/synapse/client/pick_username.py
+++ b/synapse/rest/synapse/client/pick_username.py
@@ -92,12 +92,20 @@ class AccountDetailsResource(DirectServeHtmlResource):
             self._sso_handler.render_error(request, "bad_session", e.msg, code=e.code)
             return
 
+        # The configuration might mandate going through this step to validate an
+        # automatically generated localpart, so session.chosen_localpart might already
+        # be set.
+        localpart = ""
+        if session.chosen_localpart is not None:
+            localpart = session.chosen_localpart
+
         idp_id = session.auth_provider_id
         template_params = {
             "idp": self._sso_handler.get_identity_providers()[idp_id],
             "user_attributes": {
                 "display_name": session.display_name,
                 "emails": session.emails,
+                "localpart": localpart,
             },
         }
 
diff --git a/synapse/server.py b/synapse/server.py
index b5e2a319bc..380369db92 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -94,6 +94,7 @@ from synapse.handlers.profile import ProfileHandler
 from synapse.handlers.read_marker import ReadMarkerHandler
 from synapse.handlers.receipts import ReceiptsHandler
 from synapse.handlers.register import RegistrationHandler
+from synapse.handlers.relations import RelationsHandler
 from synapse.handlers.room import (
     RoomContextHandler,
     RoomCreationHandler,
@@ -145,7 +146,7 @@ from synapse.util.stringutils import random_string
 logger = logging.getLogger(__name__)
 
 if TYPE_CHECKING:
-    from txredisapi import RedisProtocol
+    from txredisapi import ConnectionHandler
 
     from synapse.handlers.oidc import OidcHandler
     from synapse.handlers.saml import SamlHandler
@@ -328,7 +329,6 @@ class HomeServer(metaclass=abc.ABCMeta):
         Does nothing in this base class; overridden in derived classes to start the
         appropriate listeners.
         """
-        pass
 
     def setup_background_tasks(self) -> None:
         """
@@ -639,7 +639,7 @@ class HomeServer(metaclass=abc.ABCMeta):
         return ReadMarkerHandler(self)
 
     @cache_in_self
-    def get_tcp_replication(self) -> ReplicationCommandHandler:
+    def get_replication_command_handler(self) -> ReplicationCommandHandler:
         return ReplicationCommandHandler(self)
 
     @cache_in_self
@@ -721,6 +721,10 @@ class HomeServer(metaclass=abc.ABCMeta):
         return PaginationHandler(self)
 
     @cache_in_self
+    def get_relations_handler(self) -> RelationsHandler:
+        return RelationsHandler(self)
+
+    @cache_in_self
     def get_room_context_handler(self) -> RoomContextHandler:
         return RoomContextHandler(self)
 
@@ -754,7 +758,7 @@ class HomeServer(metaclass=abc.ABCMeta):
 
     @cache_in_self
     def get_event_client_serializer(self) -> EventClientSerializer:
-        return EventClientSerializer()
+        return EventClientSerializer(self)
 
     @cache_in_self
     def get_password_policy_handler(self) -> PasswordPolicyHandler:
@@ -807,7 +811,7 @@ class HomeServer(metaclass=abc.ABCMeta):
         return AccountHandler(self)
 
     @cache_in_self
-    def get_outbound_redis_connection(self) -> "RedisProtocol":
+    def get_outbound_redis_connection(self) -> "ConnectionHandler":
         """
         The Redis connection used for replication.
 
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 6babd5963c..21888cc8c5 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -194,7 +194,7 @@ class StateHandler:
         }
 
     async def get_current_state_ids(
-        self, room_id: str, latest_event_ids: Optional[Iterable[str]] = None
+        self, room_id: str, latest_event_ids: Optional[Collection[str]] = None
     ) -> StateMap[str]:
         """Get the current state, or the state at a set of events, for a room
 
@@ -243,7 +243,7 @@ class StateHandler:
         return await self.get_hosts_in_room_at_events(room_id, event_ids)
 
     async def get_hosts_in_room_at_events(
-        self, room_id: str, event_ids: Iterable[str]
+        self, room_id: str, event_ids: Collection[str]
     ) -> Set[str]:
         """Get the hosts that were in a room at the given event ids
 
@@ -404,7 +404,7 @@ class StateHandler:
 
     @measure_func()
     async def resolve_state_groups_for_events(
-        self, room_id: str, event_ids: Iterable[str]
+        self, room_id: str, event_ids: Collection[str]
     ) -> _StateCacheEntry:
         """Given a list of event_ids this method fetches the state at each
         event, resolves conflicts between them and returns them.
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index d64910aded..08c6eabc6d 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -60,18 +60,19 @@ class _BackgroundUpdateHandler:
 
 
 class _BackgroundUpdateContextManager:
-    BACKGROUND_UPDATE_INTERVAL_MS = 1000
-    BACKGROUND_UPDATE_DURATION_MS = 100
-
-    def __init__(self, sleep: bool, clock: Clock):
+    def __init__(
+        self, sleep: bool, clock: Clock, sleep_duration_ms: int, update_duration: int
+    ):
         self._sleep = sleep
         self._clock = clock
+        self._sleep_duration_ms = sleep_duration_ms
+        self._update_duration_ms = update_duration
 
     async def __aenter__(self) -> int:
         if self._sleep:
-            await self._clock.sleep(self.BACKGROUND_UPDATE_INTERVAL_MS / 1000)
+            await self._clock.sleep(self._sleep_duration_ms / 1000)
 
-        return self.BACKGROUND_UPDATE_DURATION_MS
+        return self._update_duration_ms
 
     async def __aexit__(self, *exc) -> None:
         pass
@@ -102,10 +103,12 @@ class BackgroundUpdatePerformance:
         Returns:
             A duration in ms as a float
         """
-        if self.avg_duration_ms == 0:
-            return 0
-        elif self.total_item_count == 0:
+        # We want to return None if this is the first background update item
+        if self.total_item_count == 0:
             return None
+        # Avoid dividing by zero
+        elif self.avg_duration_ms == 0:
+            return 0
         else:
             # Use the exponential moving average so that we can adapt to
             # changes in how long the update process takes.
@@ -131,9 +134,6 @@ class BackgroundUpdater:
     process and autotuning the batch size.
     """
 
-    MINIMUM_BACKGROUND_BATCH_SIZE = 1
-    DEFAULT_BACKGROUND_BATCH_SIZE = 100
-
     def __init__(self, hs: "HomeServer", database: "DatabasePool"):
         self._clock = hs.get_clock()
         self.db_pool = database
@@ -158,6 +158,14 @@ class BackgroundUpdater:
         # enable/disable background updates via the admin API.
         self.enabled = True
 
+        self.minimum_background_batch_size = hs.config.background_updates.min_batch_size
+        self.default_background_batch_size = (
+            hs.config.background_updates.default_batch_size
+        )
+        self.update_duration_ms = hs.config.background_updates.update_duration_ms
+        self.sleep_duration_ms = hs.config.background_updates.sleep_duration_ms
+        self.sleep_enabled = hs.config.background_updates.sleep_enabled
+
     def register_update_controller_callbacks(
         self,
         on_update: ON_UPDATE_CALLBACK,
@@ -214,7 +222,9 @@ class BackgroundUpdater:
         if self._on_update_callback is not None:
             return self._on_update_callback(update_name, database_name, oneshot)
 
-        return _BackgroundUpdateContextManager(sleep, self._clock)
+        return _BackgroundUpdateContextManager(
+            sleep, self._clock, self.sleep_duration_ms, self.update_duration_ms
+        )
 
     async def _default_batch_size(self, update_name: str, database_name: str) -> int:
         """The batch size to use for the first iteration of a new background
@@ -223,7 +233,7 @@ class BackgroundUpdater:
         if self._default_batch_size_callback is not None:
             return await self._default_batch_size_callback(update_name, database_name)
 
-        return self.DEFAULT_BACKGROUND_BATCH_SIZE
+        return self.default_background_batch_size
 
     async def _min_batch_size(self, update_name: str, database_name: str) -> int:
         """A lower bound on the batch size of a new background update.
@@ -233,7 +243,7 @@ class BackgroundUpdater:
         if self._min_batch_size_callback is not None:
             return await self._min_batch_size_callback(update_name, database_name)
 
-        return self.MINIMUM_BACKGROUND_BATCH_SIZE
+        return self.minimum_background_batch_size
 
     def get_current_update(self) -> Optional[BackgroundUpdatePerformance]:
         """Returns the current background update, if any."""
@@ -252,9 +262,12 @@ class BackgroundUpdater:
         if self.enabled:
             # if we start a new background update, not all updates are done.
             self._all_done = False
-            run_as_background_process("background_updates", self.run_background_updates)
+            sleep = self.sleep_enabled
+            run_as_background_process(
+                "background_updates", self.run_background_updates, sleep
+            )
 
-    async def run_background_updates(self, sleep: bool = True) -> None:
+    async def run_background_updates(self, sleep: bool) -> None:
         if self._running or not self.enabled:
             return
 
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 99802228c9..367709a1a7 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -41,6 +41,7 @@ from prometheus_client import Histogram
 from typing_extensions import Literal
 
 from twisted.enterprise import adbapi
+from twisted.internet import defer
 
 from synapse.api.errors import StoreError
 from synapse.config.database import DatabaseConnectionConfig
@@ -55,6 +56,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.background_updates import BackgroundUpdater
 from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
 from synapse.storage.types import Connection, Cursor
+from synapse.util.async_helpers import delay_cancellation
 from synapse.util.iterutils import batch_iter
 
 if TYPE_CHECKING:
@@ -286,7 +288,7 @@ class LoggingTransaction:
         """
 
         if isinstance(self.database_engine, PostgresEngine):
-            from psycopg2.extras import execute_batch  # type: ignore
+            from psycopg2.extras import execute_batch
 
             self._do_execute(lambda *x: execute_batch(self.txn, *x), sql, args)
         else:
@@ -300,10 +302,18 @@ class LoggingTransaction:
         rows (e.g. INSERTs).
         """
         assert isinstance(self.database_engine, PostgresEngine)
-        from psycopg2.extras import execute_values  # type: ignore
+        from psycopg2.extras import execute_values
 
         return self._do_execute(
-            lambda *x: execute_values(self.txn, *x, fetch=fetch), sql, *args
+            # Type ignore: mypy is unhappy because if `x` is a 5-tuple, then there will
+            # be two values for `fetch`: one given positionally, and another given
+            # as a keyword argument. We might be able to fix this by
+            # - propagating the signature of psycopg2.extras.execute_values to this
+            #   function, or
+            # - changing `*args: Any` to `values: T` for some appropriate T.
+            lambda *x: execute_values(self.txn, *x, fetch=fetch),  # type: ignore[misc]
+            sql,
+            *args,
         )
 
     def execute(self, sql: str, *args: Any) -> None:
@@ -732,34 +742,45 @@ class DatabasePool:
         Returns:
             The result of func
         """
-        after_callbacks: List[_CallbackListEntry] = []
-        exception_callbacks: List[_CallbackListEntry] = []
 
-        if not current_context():
-            logger.warning("Starting db txn '%s' from sentinel context", desc)
+        async def _runInteraction() -> R:
+            after_callbacks: List[_CallbackListEntry] = []
+            exception_callbacks: List[_CallbackListEntry] = []
 
-        try:
-            with opentracing.start_active_span(f"db.{desc}"):
-                result = await self.runWithConnection(
-                    self.new_transaction,
-                    desc,
-                    after_callbacks,
-                    exception_callbacks,
-                    func,
-                    *args,
-                    db_autocommit=db_autocommit,
-                    isolation_level=isolation_level,
-                    **kwargs,
-                )
+            if not current_context():
+                logger.warning("Starting db txn '%s' from sentinel context", desc)
 
-            for after_callback, after_args, after_kwargs in after_callbacks:
-                after_callback(*after_args, **after_kwargs)
-        except Exception:
-            for after_callback, after_args, after_kwargs in exception_callbacks:
-                after_callback(*after_args, **after_kwargs)
-            raise
+            try:
+                with opentracing.start_active_span(f"db.{desc}"):
+                    result = await self.runWithConnection(
+                        self.new_transaction,
+                        desc,
+                        after_callbacks,
+                        exception_callbacks,
+                        func,
+                        *args,
+                        db_autocommit=db_autocommit,
+                        isolation_level=isolation_level,
+                        **kwargs,
+                    )
 
-        return cast(R, result)
+                for after_callback, after_args, after_kwargs in after_callbacks:
+                    after_callback(*after_args, **after_kwargs)
+
+                return cast(R, result)
+            except Exception:
+                for after_callback, after_args, after_kwargs in exception_callbacks:
+                    after_callback(*after_args, **after_kwargs)
+                raise
+
+        # To handle cancellation, we ensure that `after_callback`s and
+        # `exception_callback`s are always run, since the transaction will complete
+        # on another thread regardless of cancellation.
+        #
+        # We also wait until everything above is done before releasing the
+        # `CancelledError`, so that logging contexts won't get used after they have been
+        # finished.
+        return await delay_cancellation(defer.ensureDeferred(_runInteraction()))
 
     async def runWithConnection(
         self,
diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py
index 52146aacc8..9af9f4f18e 100644
--- a/synapse/storage/databases/main/account_data.py
+++ b/synapse/storage/databases/main/account_data.py
@@ -14,7 +14,17 @@
 # limitations under the License.
 
 import logging
-from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, cast
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    FrozenSet,
+    Iterable,
+    List,
+    Optional,
+    Tuple,
+    cast,
+)
 
 from synapse.api.constants import AccountDataTypes
 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
@@ -365,7 +375,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
         )
 
     @cached(max_entries=5000, iterable=True)
-    async def ignored_by(self, user_id: str) -> Set[str]:
+    async def ignored_by(self, user_id: str) -> FrozenSet[str]:
         """
         Get users which ignore the given user.
 
@@ -375,7 +385,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
         Return:
             The user IDs which ignore the given user.
         """
-        return set(
+        return frozenset(
             await self.db_pool.simple_select_onecol(
                 table="ignored_users",
                 keyvalues={"ignored_user_id": user_id},
@@ -384,6 +394,26 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
             )
         )
 
+    @cached(max_entries=5000, iterable=True)
+    async def ignored_users(self, user_id: str) -> FrozenSet[str]:
+        """
+        Get users which the given user ignores.
+
+        Params:
+            user_id: The user ID which is making the request.
+
+        Return:
+            The user IDs which are ignored by the given user.
+        """
+        return frozenset(
+            await self.db_pool.simple_select_onecol(
+                table="ignored_users",
+                keyvalues={"ignorer_user_id": user_id},
+                retcol="ignored_user_id",
+                desc="ignored_users",
+            )
+        )
+
     def process_replication_rows(
         self,
         stream_name: str,
@@ -529,6 +559,10 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
         else:
             currently_ignored_users = set()
 
+        # If the data has not changed, nothing to do.
+        if previously_ignored_users == currently_ignored_users:
+            return
+
         # Delete entries which are no longer ignored.
         self.db_pool.simple_delete_many_txn(
             txn,
@@ -551,6 +585,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
         # Invalidate the cache for any ignored users which were added or removed.
         for ignored_user_id in previously_ignored_users ^ currently_ignored_users:
             self._invalidate_cache_and_stream(txn, self.ignored_by, (ignored_user_id,))
+        self._invalidate_cache_and_stream(txn, self.ignored_users, (user_id,))
 
     async def purge_account_data_for_user(self, user_id: str) -> None:
         """
diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
index c428dd5596..dd4e83a2ad 100644
--- a/synapse/storage/databases/main/cache.py
+++ b/synapse/storage/databases/main/cache.py
@@ -23,6 +23,7 @@ from synapse.replication.tcp.streams.events import (
     EventsStream,
     EventsStreamCurrentStateRow,
     EventsStreamEventRow,
+    EventsStreamRow,
 )
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.database import (
@@ -31,6 +32,7 @@ from synapse.storage.database import (
     LoggingTransaction,
 )
 from synapse.storage.engines import PostgresEngine
+from synapse.util.caches.descriptors import _CachedFunction
 from synapse.util.iterutils import batch_iter
 
 if TYPE_CHECKING:
@@ -82,7 +84,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
         if last_id == current_id:
             return [], current_id, False
 
-        def get_all_updated_caches_txn(txn):
+        def get_all_updated_caches_txn(
+            txn: LoggingTransaction,
+        ) -> Tuple[List[Tuple[int, tuple]], int, bool]:
             # We purposefully don't bound by the current token, as we want to
             # send across cache invalidations as quickly as possible. Cache
             # invalidations are idempotent, so duplicates are fine.
@@ -107,7 +111,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
             "get_all_updated_caches", get_all_updated_caches_txn
         )
 
-    def process_replication_rows(self, stream_name, instance_name, token, rows):
+    def process_replication_rows(
+        self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
+    ) -> None:
         if stream_name == EventsStream.NAME:
             for row in rows:
                 self._process_event_stream_row(token, row)
@@ -142,10 +148,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
 
         super().process_replication_rows(stream_name, instance_name, token, rows)
 
-    def _process_event_stream_row(self, token, row):
+    def _process_event_stream_row(self, token: int, row: EventsStreamRow) -> None:
         data = row.data
 
         if row.type == EventsStreamEventRow.TypeId:
+            assert isinstance(data, EventsStreamEventRow)
             self._invalidate_caches_for_event(
                 token,
                 data.event_id,
@@ -157,9 +164,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
                 backfilled=False,
             )
         elif row.type == EventsStreamCurrentStateRow.TypeId:
-            self._curr_state_delta_stream_cache.entity_has_changed(
-                row.data.room_id, token
-            )
+            assert isinstance(data, EventsStreamCurrentStateRow)
+            self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token)
 
             if data.type == EventTypes.Member:
                 self.get_rooms_for_user_with_stream_ordering.invalidate(
@@ -170,15 +176,15 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
 
     def _invalidate_caches_for_event(
         self,
-        stream_ordering,
-        event_id,
-        room_id,
-        etype,
-        state_key,
-        redacts,
-        relates_to,
-        backfilled,
-    ):
+        stream_ordering: int,
+        event_id: str,
+        room_id: str,
+        etype: str,
+        state_key: Optional[str],
+        redacts: Optional[str],
+        relates_to: Optional[str],
+        backfilled: bool,
+    ) -> None:
         self._invalidate_get_event_cache(event_id)
         self.have_seen_event.invalidate((room_id, event_id))
 
@@ -186,11 +192,19 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
 
         self.get_unread_event_push_actions_by_room_for_user.invalidate((room_id,))
 
+        # The `_get_membership_from_event_id` is immutable, except for the
+        # case where we look up an event *before* persisting it.
+        self._get_membership_from_event_id.invalidate((event_id,))
+
         if not backfilled:
             self._events_stream_cache.entity_has_changed(room_id, stream_ordering)
 
         if redacts:
             self._invalidate_get_event_cache(redacts)
+            # Caches which might leak edits must be invalidated for the event being
+            # redacted.
+            self.get_relations_for_event.invalidate((redacts,))
+            self.get_applicable_edit.invalidate((redacts,))
 
         if etype == EventTypes.Member:
             self._membership_stream_cache.entity_has_changed(state_key, stream_ordering)
@@ -200,8 +214,12 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
             self.get_relations_for_event.invalidate((relates_to,))
             self.get_aggregation_groups_for_event.invalidate((relates_to,))
             self.get_applicable_edit.invalidate((relates_to,))
+            self.get_thread_summary.invalidate((relates_to,))
+            self.get_thread_participated.invalidate((relates_to,))
 
-    async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]):
+    async def invalidate_cache_and_stream(
+        self, cache_name: str, keys: Tuple[Any, ...]
+    ) -> None:
         """Invalidates the cache and adds it to the cache stream so slaves
         will know to invalidate their caches.
 
@@ -221,7 +239,12 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
             keys,
         )
 
-    def _invalidate_cache_and_stream(self, txn, cache_func, keys):
+    def _invalidate_cache_and_stream(
+        self,
+        txn: LoggingTransaction,
+        cache_func: _CachedFunction,
+        keys: Tuple[Any, ...],
+    ) -> None:
         """Invalidates the cache and adds it to the cache stream so slaves
         will know to invalidate their caches.
 
@@ -232,7 +255,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
         txn.call_after(cache_func.invalidate, keys)
         self._send_invalidation_to_replication(txn, cache_func.__name__, keys)
 
-    def _invalidate_all_cache_and_stream(self, txn, cache_func):
+    def _invalidate_all_cache_and_stream(
+        self, txn: LoggingTransaction, cache_func: _CachedFunction
+    ) -> None:
         """Invalidates the entire cache and adds it to the cache stream so slaves
         will know to invalidate their caches.
         """
@@ -273,8 +298,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
             )
 
     def _send_invalidation_to_replication(
-        self, txn, cache_name: str, keys: Optional[Iterable[Any]]
-    ):
+        self, txn: LoggingTransaction, cache_name: str, keys: Optional[Iterable[Any]]
+    ) -> None:
         """Notifies replication that given cache has been invalidated.
 
         Note that this does *not* invalidate the cache locally.
@@ -309,7 +334,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
                     "instance_name": self._instance_name,
                     "cache_func": cache_name,
                     "keys": keys,
-                    "invalidation_ts": self.clock.time_msec(),
+                    "invalidation_ts": self._clock.time_msec(),
                 },
             )
 
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 1392363de1..b4a1b041b1 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -298,6 +298,9 @@ class DeviceInboxWorkerStore(SQLBaseStore):
                 # This user has new messages sent to them. Query messages for them
                 user_ids_to_query.add(user_id)
 
+        if not user_ids_to_query:
+            return {}, to_stream_id
+
         def get_device_messages_txn(txn: LoggingTransaction):
             # Build a query to select messages from any of the given devices that
             # are between the given stream id bounds.
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index ca2a9ba9d1..d253243125 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -1518,7 +1518,7 @@ class PersistEventsStore:
                 )
 
                 # Remove from relations table.
-                self._handle_redaction(txn, event.redacts)
+                self._handle_redact_relations(txn, event.redacts)
 
         # Update the event_forward_extremities, event_backward_extremities and
         # event_edges tables.
@@ -1619,9 +1619,12 @@ class PersistEventsStore:
 
         txn.call_after(prefill)
 
-    def _store_redaction(self, txn, event):
-        # invalidate the cache for the redacted event
+    def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None:
+        # Invalidate the caches for the redacted event, note that these caches
+        # are also cleared as part of event replication in _invalidate_caches_for_event.
         txn.call_after(self.store._invalidate_get_event_cache, event.redacts)
+        txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,))
+        txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,))
 
         self.db_pool.simple_upsert_txn(
             txn,
@@ -1742,6 +1745,13 @@ class PersistEventsStore:
                 (event.state_key,),
             )
 
+            # The `_get_membership_from_event_id` is immutable, except for the
+            # case where we look up an event *before* persisting it.
+            txn.call_after(
+                self.store._get_membership_from_event_id.invalidate,
+                (event.event_id,),
+            )
+
             # We update the local_current_membership table only if the event is
             # "current", i.e., its something that has just happened.
             #
@@ -1811,10 +1821,11 @@ class PersistEventsStore:
         if rel_type == RelationTypes.REPLACE:
             txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,))
 
-        if rel_type == RelationTypes.THREAD:
-            txn.call_after(
-                self.store.get_thread_summary.invalidate, (parent_id, event.room_id)
-            )
+        if (
+            rel_type == RelationTypes.THREAD
+            or rel_type == RelationTypes.UNSTABLE_THREAD
+        ):
+            txn.call_after(self.store.get_thread_summary.invalidate, (parent_id,))
             # It should be safe to only invalidate the cache if the user has not
             # previously participated in the thread, but that's difficult (and
             # potentially error-prone) so it is always invalidated.
@@ -1943,15 +1954,43 @@ class PersistEventsStore:
 
         txn.execute(sql, (batch_id,))
 
-    def _handle_redaction(self, txn, redacted_event_id):
-        """Handles receiving a redaction and checking whether we need to remove
-        any redacted relations from the database.
+    def _handle_redact_relations(
+        self, txn: LoggingTransaction, redacted_event_id: str
+    ) -> None:
+        """Handles receiving a redaction and checking whether the redacted event
+        has any relations which must be removed from the database.
 
         Args:
             txn
-            redacted_event_id (str): The event that was redacted.
+            redacted_event_id: The event that was redacted.
         """
 
+        # Fetch the current relation of the event being redacted.
+        redacted_relates_to = self.db_pool.simple_select_one_onecol_txn(
+            txn,
+            table="event_relations",
+            keyvalues={"event_id": redacted_event_id},
+            retcol="relates_to_id",
+            allow_none=True,
+        )
+        # Any relation information for the related event must be cleared.
+        if redacted_relates_to is not None:
+            self.store._invalidate_cache_and_stream(
+                txn, self.store.get_relations_for_event, (redacted_relates_to,)
+            )
+            self.store._invalidate_cache_and_stream(
+                txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,)
+            )
+            self.store._invalidate_cache_and_stream(
+                txn, self.store.get_applicable_edit, (redacted_relates_to,)
+            )
+            self.store._invalidate_cache_and_stream(
+                txn, self.store.get_thread_summary, (redacted_relates_to,)
+            )
+            self.store._invalidate_cache_and_stream(
+                txn, self.store.get_thread_participated, (redacted_relates_to,)
+            )
+
         self.db_pool.simple_delete_txn(
             txn, table="event_relations", keyvalues={"event_id": redacted_event_id}
         )
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 26784f755e..59454a47df 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -1286,7 +1286,7 @@ class EventsWorkerStore(SQLBaseStore):
         )
         return {eid for ((_rid, eid), have_event) in res.items() if have_event}
 
-    @cachedList("have_seen_event", "keys")
+    @cachedList(cached_method_name="have_seen_event", list_name="keys")
     async def _have_seen_events_dict(
         self, keys: Iterable[Tuple[str, str]]
     ) -> Dict[Tuple[str, str], bool]:
@@ -1954,7 +1954,7 @@ class EventsWorkerStore(SQLBaseStore):
             get_event_id_for_timestamp_txn,
         )
 
-    @cachedList("is_partial_state_event", list_name="event_ids")
+    @cachedList(cached_method_name="is_partial_state_event", list_name="event_ids")
     async def get_partial_state_events(
         self, event_ids: Collection[str]
     ) -> Dict[str, bool]:
diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py
index 3f6086050b..0aef121d83 100644
--- a/synapse/storage/databases/main/group_server.py
+++ b/synapse/storage/databases/main/group_server.py
@@ -13,13 +13,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, cast
 
 from typing_extensions import TypedDict
 
 from synapse.api.errors import SynapseError
 from synapse.storage._base import SQLBaseStore, db_to_json
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
+from synapse.storage.database import (
+    DatabasePool,
+    LoggingDatabaseConnection,
+    LoggingTransaction,
+)
 from synapse.types import JsonDict
 from synapse.util import json_encoder
 
@@ -75,7 +79,7 @@ class GroupServerWorkerStore(SQLBaseStore):
     ) -> List[Dict[str, Any]]:
         # TODO: Pagination
 
-        keyvalues = {"group_id": group_id}
+        keyvalues: JsonDict = {"group_id": group_id}
         if not include_private:
             keyvalues["is_public"] = True
 
@@ -117,7 +121,7 @@ class GroupServerWorkerStore(SQLBaseStore):
 
         # TODO: Pagination
 
-        def _get_rooms_in_group_txn(txn):
+        def _get_rooms_in_group_txn(txn: LoggingTransaction) -> List[_RoomInGroup]:
             sql = """
             SELECT room_id, is_public FROM group_rooms
                 WHERE group_id = ?
@@ -176,8 +180,10 @@ class GroupServerWorkerStore(SQLBaseStore):
                         * "order": int, the sort order of rooms in this category
         """
 
-        def _get_rooms_for_summary_txn(txn):
-            keyvalues = {"group_id": group_id}
+        def _get_rooms_for_summary_txn(
+            txn: LoggingTransaction,
+        ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
+            keyvalues: JsonDict = {"group_id": group_id}
             if not include_private:
                 keyvalues["is_public"] = True
 
@@ -241,7 +247,7 @@ class GroupServerWorkerStore(SQLBaseStore):
             "get_rooms_for_summary", _get_rooms_for_summary_txn
         )
 
-    async def get_group_categories(self, group_id):
+    async def get_group_categories(self, group_id: str) -> JsonDict:
         rows = await self.db_pool.simple_select_list(
             table="group_room_categories",
             keyvalues={"group_id": group_id},
@@ -257,7 +263,7 @@ class GroupServerWorkerStore(SQLBaseStore):
             for row in rows
         }
 
-    async def get_group_category(self, group_id, category_id):
+    async def get_group_category(self, group_id: str, category_id: str) -> JsonDict:
         category = await self.db_pool.simple_select_one(
             table="group_room_categories",
             keyvalues={"group_id": group_id, "category_id": category_id},
@@ -269,7 +275,7 @@ class GroupServerWorkerStore(SQLBaseStore):
 
         return category
 
-    async def get_group_roles(self, group_id):
+    async def get_group_roles(self, group_id: str) -> JsonDict:
         rows = await self.db_pool.simple_select_list(
             table="group_roles",
             keyvalues={"group_id": group_id},
@@ -285,7 +291,7 @@ class GroupServerWorkerStore(SQLBaseStore):
             for row in rows
         }
 
-    async def get_group_role(self, group_id, role_id):
+    async def get_group_role(self, group_id: str, role_id: str) -> JsonDict:
         role = await self.db_pool.simple_select_one(
             table="group_roles",
             keyvalues={"group_id": group_id, "role_id": role_id},
@@ -311,15 +317,19 @@ class GroupServerWorkerStore(SQLBaseStore):
             desc="get_local_groups_for_room",
         )
 
-    async def get_users_for_summary_by_role(self, group_id, include_private=False):
+    async def get_users_for_summary_by_role(
+        self, group_id: str, include_private: bool = False
+    ) -> Tuple[List[JsonDict], JsonDict]:
         """Get the users and roles that should be included in a summary request
 
         Returns:
             ([users], [roles])
         """
 
-        def _get_users_for_summary_txn(txn):
-            keyvalues = {"group_id": group_id}
+        def _get_users_for_summary_txn(
+            txn: LoggingTransaction,
+        ) -> Tuple[List[JsonDict], JsonDict]:
+            keyvalues: JsonDict = {"group_id": group_id}
             if not include_private:
                 keyvalues["is_public"] = True
 
@@ -406,7 +416,9 @@ class GroupServerWorkerStore(SQLBaseStore):
             allow_none=True,
         )
 
-    async def get_users_membership_info_in_group(self, group_id, user_id):
+    async def get_users_membership_info_in_group(
+        self, group_id: str, user_id: str
+    ) -> JsonDict:
         """Get a dict describing the membership of a user in a group.
 
         Example if joined:
@@ -421,7 +433,7 @@ class GroupServerWorkerStore(SQLBaseStore):
              An empty dict if the user is not join/invite/etc
         """
 
-        def _get_users_membership_in_group_txn(txn):
+        def _get_users_membership_in_group_txn(txn: LoggingTransaction) -> JsonDict:
             row = self.db_pool.simple_select_one_txn(
                 txn,
                 table="group_users",
@@ -463,10 +475,14 @@ class GroupServerWorkerStore(SQLBaseStore):
             desc="get_publicised_groups_for_user",
         )
 
-    async def get_attestations_need_renewals(self, valid_until_ms):
+    async def get_attestations_need_renewals(
+        self, valid_until_ms: int
+    ) -> List[Dict[str, Any]]:
         """Get all attestations that need to be renewed until givent time"""
 
-        def _get_attestations_need_renewals_txn(txn):
+        def _get_attestations_need_renewals_txn(
+            txn: LoggingTransaction,
+        ) -> List[Dict[str, Any]]:
             sql = """
                 SELECT group_id, user_id FROM group_attestations_renewals
                 WHERE valid_until_ms <= ?
@@ -478,7 +494,9 @@ class GroupServerWorkerStore(SQLBaseStore):
             "get_attestations_need_renewals", _get_attestations_need_renewals_txn
         )
 
-    async def get_remote_attestation(self, group_id, user_id):
+    async def get_remote_attestation(
+        self, group_id: str, user_id: str
+    ) -> Optional[JsonDict]:
         """Get the attestation that proves the remote agrees that the user is
         in the group.
         """
@@ -504,8 +522,8 @@ class GroupServerWorkerStore(SQLBaseStore):
             desc="get_joined_groups",
         )
 
-    async def get_all_groups_for_user(self, user_id, now_token):
-        def _get_all_groups_for_user_txn(txn):
+    async def get_all_groups_for_user(self, user_id, now_token) -> List[JsonDict]:
+        def _get_all_groups_for_user_txn(txn: LoggingTransaction) -> List[JsonDict]:
             sql = """
                 SELECT group_id, type, membership, u.content
                 FROM local_group_updates AS u
@@ -528,15 +546,16 @@ class GroupServerWorkerStore(SQLBaseStore):
             "get_all_groups_for_user", _get_all_groups_for_user_txn
         )
 
-    async def get_groups_changes_for_user(self, user_id, from_token, to_token):
-        from_token = int(from_token)
-        has_changed = self._group_updates_stream_cache.has_entity_changed(
+    async def get_groups_changes_for_user(
+        self, user_id: str, from_token: int, to_token: int
+    ) -> List[JsonDict]:
+        has_changed = self._group_updates_stream_cache.has_entity_changed(  # type: ignore[attr-defined]
             user_id, from_token
         )
         if not has_changed:
             return []
 
-        def _get_groups_changes_for_user_txn(txn):
+        def _get_groups_changes_for_user_txn(txn: LoggingTransaction) -> List[JsonDict]:
             sql = """
                 SELECT group_id, membership, type, u.content
                 FROM local_group_updates AS u
@@ -583,12 +602,14 @@ class GroupServerWorkerStore(SQLBaseStore):
         """
 
         last_id = int(last_id)
-        has_changed = self._group_updates_stream_cache.has_any_entity_changed(last_id)
+        has_changed = self._group_updates_stream_cache.has_any_entity_changed(last_id)  # type: ignore[attr-defined]
 
         if not has_changed:
             return [], current_id, False
 
-        def _get_all_groups_changes_txn(txn):
+        def _get_all_groups_changes_txn(
+            txn: LoggingTransaction,
+        ) -> Tuple[List[Tuple[int, tuple]], int, bool]:
             sql = """
                 SELECT stream_id, group_id, user_id, type, content
                 FROM local_group_updates
@@ -596,10 +617,13 @@ class GroupServerWorkerStore(SQLBaseStore):
                 LIMIT ?
             """
             txn.execute(sql, (last_id, current_id, limit))
-            updates = [
-                (stream_id, (group_id, user_id, gtype, db_to_json(content_json)))
-                for stream_id, group_id, user_id, gtype, content_json in txn
-            ]
+            updates = cast(
+                List[Tuple[int, tuple]],
+                [
+                    (stream_id, (group_id, user_id, gtype, db_to_json(content_json)))
+                    for stream_id, group_id, user_id, gtype, content_json in txn
+                ],
+            )
 
             limited = False
             upto_token = current_id
@@ -633,8 +657,8 @@ class GroupServerStore(GroupServerWorkerStore):
         self,
         group_id: str,
         room_id: str,
-        category_id: str,
-        order: int,
+        category_id: Optional[str],
+        order: Optional[int],
         is_public: Optional[bool],
     ) -> None:
         """Add (or update) room's entry in summary.
@@ -661,11 +685,11 @@ class GroupServerStore(GroupServerWorkerStore):
 
     def _add_room_to_summary_txn(
         self,
-        txn,
+        txn: LoggingTransaction,
         group_id: str,
         room_id: str,
-        category_id: str,
-        order: int,
+        category_id: Optional[str],
+        order: Optional[int],
         is_public: Optional[bool],
     ) -> None:
         """Add (or update) room's entry in summary.
@@ -750,7 +774,7 @@ class GroupServerStore(GroupServerWorkerStore):
                 WHERE group_id = ? AND category_id = ?
             """
             txn.execute(sql, (group_id, category_id))
-            (order,) = txn.fetchone()
+            (order,) = cast(Tuple[int], txn.fetchone())
 
         if existing:
             to_update = {}
@@ -766,7 +790,7 @@ class GroupServerStore(GroupServerWorkerStore):
                     "category_id": category_id,
                     "room_id": room_id,
                 },
-                values=to_update,
+                updatevalues=to_update,
             )
         else:
             if is_public is None:
@@ -785,7 +809,7 @@ class GroupServerStore(GroupServerWorkerStore):
             )
 
     async def remove_room_from_summary(
-        self, group_id: str, room_id: str, category_id: str
+        self, group_id: str, room_id: str, category_id: Optional[str]
     ) -> int:
         if category_id is None:
             category_id = _DEFAULT_CATEGORY_ID
@@ -808,8 +832,8 @@ class GroupServerStore(GroupServerWorkerStore):
         is_public: Optional[bool],
     ) -> None:
         """Add/update room category for group"""
-        insertion_values = {}
-        update_values = {"category_id": category_id}  # This cannot be empty
+        insertion_values: JsonDict = {}
+        update_values: JsonDict = {"category_id": category_id}  # This cannot be empty
 
         if profile is None:
             insertion_values["profile"] = "{}"
@@ -844,8 +868,8 @@ class GroupServerStore(GroupServerWorkerStore):
         is_public: Optional[bool],
     ) -> None:
         """Add/remove user role"""
-        insertion_values = {}
-        update_values = {"role_id": role_id}  # This cannot be empty
+        insertion_values: JsonDict = {}
+        update_values: JsonDict = {"role_id": role_id}  # This cannot be empty
 
         if profile is None:
             insertion_values["profile"] = "{}"
@@ -876,8 +900,8 @@ class GroupServerStore(GroupServerWorkerStore):
         self,
         group_id: str,
         user_id: str,
-        role_id: str,
-        order: int,
+        role_id: Optional[str],
+        order: Optional[int],
         is_public: Optional[bool],
     ) -> None:
         """Add (or update) user's entry in summary.
@@ -904,13 +928,13 @@ class GroupServerStore(GroupServerWorkerStore):
 
     def _add_user_to_summary_txn(
         self,
-        txn,
+        txn: LoggingTransaction,
         group_id: str,
         user_id: str,
-        role_id: str,
-        order: int,
+        role_id: Optional[str],
+        order: Optional[int],
         is_public: Optional[bool],
-    ):
+    ) -> None:
         """Add (or update) user's entry in summary.
 
         Args:
@@ -989,7 +1013,7 @@ class GroupServerStore(GroupServerWorkerStore):
                 WHERE group_id = ? AND role_id = ?
             """
             txn.execute(sql, (group_id, role_id))
-            (order,) = txn.fetchone()
+            (order,) = cast(Tuple[int], txn.fetchone())
 
         if existing:
             to_update = {}
@@ -1005,7 +1029,7 @@ class GroupServerStore(GroupServerWorkerStore):
                     "role_id": role_id,
                     "user_id": user_id,
                 },
-                values=to_update,
+                updatevalues=to_update,
             )
         else:
             if is_public is None:
@@ -1024,7 +1048,7 @@ class GroupServerStore(GroupServerWorkerStore):
             )
 
     async def remove_user_from_summary(
-        self, group_id: str, user_id: str, role_id: str
+        self, group_id: str, user_id: str, role_id: Optional[str]
     ) -> int:
         if role_id is None:
             role_id = _DEFAULT_ROLE_ID
@@ -1065,7 +1089,7 @@ class GroupServerStore(GroupServerWorkerStore):
                 Optional if the user and group are on the same server
         """
 
-        def _add_user_to_group_txn(txn):
+        def _add_user_to_group_txn(txn: LoggingTransaction) -> None:
             self.db_pool.simple_insert_txn(
                 txn,
                 table="group_users",
@@ -1108,7 +1132,7 @@ class GroupServerStore(GroupServerWorkerStore):
         await self.db_pool.runInteraction("add_user_to_group", _add_user_to_group_txn)
 
     async def remove_user_from_group(self, group_id: str, user_id: str) -> None:
-        def _remove_user_from_group_txn(txn):
+        def _remove_user_from_group_txn(txn: LoggingTransaction) -> None:
             self.db_pool.simple_delete_txn(
                 txn,
                 table="group_users",
@@ -1159,7 +1183,7 @@ class GroupServerStore(GroupServerWorkerStore):
         )
 
     async def remove_room_from_group(self, group_id: str, room_id: str) -> None:
-        def _remove_room_from_group_txn(txn):
+        def _remove_room_from_group_txn(txn: LoggingTransaction) -> None:
             self.db_pool.simple_delete_txn(
                 txn,
                 table="group_rooms",
@@ -1216,7 +1240,9 @@ class GroupServerStore(GroupServerWorkerStore):
 
         content = content or {}
 
-        def _register_user_group_membership_txn(txn, next_id):
+        def _register_user_group_membership_txn(
+            txn: LoggingTransaction, next_id: int
+        ) -> int:
             # TODO: Upsert?
             self.db_pool.simple_delete_txn(
                 txn,
@@ -1249,7 +1275,7 @@ class GroupServerStore(GroupServerWorkerStore):
                     ),
                 },
             )
-            self._group_updates_stream_cache.entity_has_changed(user_id, next_id)
+            self._group_updates_stream_cache.entity_has_changed(user_id, next_id)  # type: ignore[attr-defined]
 
             # TODO: Insert profile to ensure it comes down stream if its a join.
 
@@ -1289,7 +1315,7 @@ class GroupServerStore(GroupServerWorkerStore):
 
             return next_id
 
-        async with self._group_updates_id_gen.get_next() as next_id:
+        async with self._group_updates_id_gen.get_next() as next_id:  # type: ignore[attr-defined]
             res = await self.db_pool.runInteraction(
                 "register_user_group_membership",
                 _register_user_group_membership_txn,
@@ -1298,7 +1324,13 @@ class GroupServerStore(GroupServerWorkerStore):
         return res
 
     async def create_group(
-        self, group_id, user_id, name, avatar_url, short_description, long_description
+        self,
+        group_id: str,
+        user_id: str,
+        name: str,
+        avatar_url: str,
+        short_description: str,
+        long_description: str,
     ) -> None:
         await self.db_pool.simple_insert(
             table="groups",
@@ -1313,7 +1345,7 @@ class GroupServerStore(GroupServerWorkerStore):
             desc="create_group",
         )
 
-    async def update_group_profile(self, group_id, profile):
+    async def update_group_profile(self, group_id: str, profile: JsonDict) -> None:
         await self.db_pool.simple_update_one(
             table="groups",
             keyvalues={"group_id": group_id},
@@ -1361,8 +1393,8 @@ class GroupServerStore(GroupServerWorkerStore):
             desc="remove_attestation_renewal",
         )
 
-    def get_group_stream_token(self):
-        return self._group_updates_id_gen.get_current_token()
+    def get_group_stream_token(self) -> int:
+        return self._group_updates_id_gen.get_current_token()  # type: ignore[attr-defined]
 
     async def delete_group(self, group_id: str) -> None:
         """Deletes a group fully from the database.
@@ -1371,7 +1403,7 @@ class GroupServerStore(GroupServerWorkerStore):
             group_id: The group ID to delete.
         """
 
-        def _delete_group_txn(txn):
+        def _delete_group_txn(txn: LoggingTransaction) -> None:
             tables = [
                 "groups",
                 "group_users",
diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py
index e9a0cdc6be..216622964a 100644
--- a/synapse/storage/databases/main/monthly_active_users.py
+++ b/synapse/storage/databases/main/monthly_active_users.py
@@ -12,15 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import TYPE_CHECKING, Dict, List, Optional
+from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast
 
 from synapse.metrics.background_process_metrics import wrap_as_background_process
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.database import (
     DatabasePool,
     LoggingDatabaseConnection,
+    LoggingTransaction,
     make_in_list_sql_clause,
 )
+from synapse.storage.databases.main.registration import RegistrationWorkerStore
 from synapse.util.caches.descriptors import cached
 from synapse.util.threepids import canonicalise_email
 
@@ -56,7 +58,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
             Number of current monthly active users
         """
 
-        def _count_users(txn):
+        def _count_users(txn: LoggingTransaction) -> int:
             # Exclude app service users
             sql = """
                 SELECT COUNT(*)
@@ -66,7 +68,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
                 WHERE (users.appservice_id IS NULL OR users.appservice_id = '');
             """
             txn.execute(sql)
-            (count,) = txn.fetchone()
+            (count,) = cast(Tuple[int], txn.fetchone())
             return count
 
         return await self.db_pool.runInteraction("count_users", _count_users)
@@ -84,7 +86,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
 
         """
 
-        def _count_users_by_service(txn):
+        def _count_users_by_service(txn: LoggingTransaction) -> Dict[str, int]:
             sql = """
                 SELECT COALESCE(appservice_id, 'native'), COUNT(*)
                 FROM monthly_active_users
@@ -93,7 +95,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
             """
 
             txn.execute(sql)
-            result = txn.fetchall()
+            result = cast(List[Tuple[str, int]], txn.fetchall())
             return dict(result)
 
         return await self.db_pool.runInteraction(
@@ -141,12 +143,12 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
         )
 
     @wrap_as_background_process("reap_monthly_active_users")
-    async def reap_monthly_active_users(self):
+    async def reap_monthly_active_users(self) -> None:
         """Cleans out monthly active user table to ensure that no stale
         entries exist.
         """
 
-        def _reap_users(txn, reserved_users):
+        def _reap_users(txn: LoggingTransaction, reserved_users: List[str]) -> None:
             """
             Args:
                 reserved_users (tuple): reserved users to preserve
@@ -210,10 +212,10 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
             # is racy.
             # Have resolved to invalidate the whole cache for now and do
             # something about it if and when the perf becomes significant
-            self._invalidate_all_cache_and_stream(
+            self._invalidate_all_cache_and_stream(  # type: ignore[attr-defined]
                 txn, self.user_last_seen_monthly_active
             )
-            self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ())
+            self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ())  # type: ignore[attr-defined]
 
         reserved_users = await self.get_registered_reserved_users()
         await self.db_pool.runInteraction(
@@ -221,7 +223,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
         )
 
 
-class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
+class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore, RegistrationWorkerStore):
     def __init__(
         self,
         database: DatabasePool,
@@ -242,13 +244,15 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
             hs.config.server.mau_limits_reserved_threepids[: self._max_mau_value],
         )
 
-    def _initialise_reserved_users(self, txn, threepids):
+    def _initialise_reserved_users(
+        self, txn: LoggingTransaction, threepids: List[dict]
+    ) -> None:
         """Ensures that reserved threepids are accounted for in the MAU table, should
         be called on start up.
 
         Args:
-            txn (cursor):
-            threepids (list[dict]): List of threepid dicts to reserve
+            txn:
+            threepids: List of threepid dicts to reserve
         """
 
         # XXX what is this function trying to achieve?  It upserts into
@@ -299,7 +303,9 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
             "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id
         )
 
-    def upsert_monthly_active_user_txn(self, txn, user_id):
+    def upsert_monthly_active_user_txn(
+        self, txn: LoggingTransaction, user_id: str
+    ) -> None:
         """Updates or inserts monthly active user member
 
         We consciously do not call is_support_txn from this method because it
@@ -336,7 +342,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
             txn, self.user_last_seen_monthly_active, (user_id,)
         )
 
-    async def populate_monthly_active_users(self, user_id):
+    async def populate_monthly_active_users(self, user_id: str) -> None:
         """Checks on the state of monthly active user limits and optionally
         add the user to the monthly active tables
 
@@ -345,7 +351,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
         """
         if self._limit_usage_by_mau or self._mau_stats_only:
             # Trial users and guests should not be included as part of MAU group
-            is_guest = await self.is_guest(user_id)
+            is_guest = await self.is_guest(user_id)  # type: ignore[attr-defined]
             if is_guest:
                 return
             is_trial = await self.is_trial_user(user_id)
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index dc6665237a..a698d10cc5 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -48,8 +48,6 @@ class ExternalIDReuseException(Exception):
     """Exception if writing an external id for a user fails,
     because this external id is given to an other user."""
 
-    pass
-
 
 @attr.s(frozen=True, slots=True, auto_attribs=True)
 class TokenLookupResult:
diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index 36aa1092f6..b2295fd51f 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -27,7 +27,6 @@ from typing import (
 )
 
 import attr
-from frozendict import frozendict
 
 from synapse.api.constants import RelationTypes
 from synapse.events import EventBase
@@ -41,45 +40,15 @@ from synapse.storage.database import (
 from synapse.storage.databases.main.stream import generate_pagination_where_clause
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.relations import AggregationPaginationToken, PaginationChunk
-from synapse.types import JsonDict, RoomStreamToken, StreamToken
+from synapse.types import RoomStreamToken, StreamToken
 from synapse.util.caches.descriptors import cached, cachedList
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
-    from synapse.storage.databases.main import DataStore
 
 logger = logging.getLogger(__name__)
 
 
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class _ThreadAggregation:
-    # The latest event in the thread.
-    latest_event: EventBase
-    # The latest edit to the latest event in the thread.
-    latest_edit: Optional[EventBase]
-    # The total number of events in the thread.
-    count: int
-    # True if the current user has sent an event to the thread.
-    current_user_participated: bool
-
-
-@attr.s(slots=True, auto_attribs=True)
-class BundledAggregations:
-    """
-    The bundled aggregations for an event.
-
-    Some values require additional processing during serialization.
-    """
-
-    annotations: Optional[JsonDict] = None
-    references: Optional[JsonDict] = None
-    replace: Optional[EventBase] = None
-    thread: Optional[_ThreadAggregation] = None
-
-    def __bool__(self) -> bool:
-        return bool(self.annotations or self.references or self.replace or self.thread)
-
-
 class RelationsWorkerStore(SQLBaseStore):
     def __init__(
         self,
@@ -91,10 +60,11 @@ class RelationsWorkerStore(SQLBaseStore):
 
         self._msc3440_enabled = hs.config.experimental.msc3440_enabled
 
-    @cached(tree=True)
+    @cached(uncached_args=("event",), tree=True)
     async def get_relations_for_event(
         self,
         event_id: str,
+        event: EventBase,
         room_id: str,
         relation_type: Optional[str] = None,
         event_type: Optional[str] = None,
@@ -108,6 +78,7 @@ class RelationsWorkerStore(SQLBaseStore):
 
         Args:
             event_id: Fetch events that relate to this event ID.
+            event: The matching EventBase to event_id.
             room_id: The room the event belongs to.
             relation_type: Only fetch events with this relation type, if given.
             event_type: Only fetch events with this event type, if given.
@@ -122,9 +93,13 @@ class RelationsWorkerStore(SQLBaseStore):
             List of event IDs that match relations requested. The rows are of
             the form `{"event_id": "..."}`.
         """
+        # We don't use `event_id`, it's there so that we can cache based on
+        # it. The `event_id` must match the `event.event_id`.
+        assert event.event_id == event_id
 
         where_clause = ["relates_to_id = ?", "room_id = ?"]
-        where_args: List[Union[str, int]] = [event_id, room_id]
+        where_args: List[Union[str, int]] = [event.event_id, room_id]
+        is_redacted = event.internal_metadata.is_redacted()
 
         if relation_type is not None:
             where_clause.append("relation_type = ?")
@@ -157,7 +132,7 @@ class RelationsWorkerStore(SQLBaseStore):
             order = "ASC"
 
         sql = """
-            SELECT event_id, topological_ordering, stream_ordering
+            SELECT event_id, relation_type, topological_ordering, stream_ordering
             FROM event_relations
             INNER JOIN events USING (event_id)
             WHERE %s
@@ -178,9 +153,12 @@ class RelationsWorkerStore(SQLBaseStore):
             last_stream_id = None
             events = []
             for row in txn:
-                events.append({"event_id": row[0]})
-                last_topo_id = row[1]
-                last_stream_id = row[2]
+                # Do not include edits for redacted events as they leak event
+                # content.
+                if not is_redacted or row[1] != RelationTypes.REPLACE:
+                    events.append({"event_id": row[0]})
+                last_topo_id = row[2]
+                last_stream_id = row[3]
 
             # If there are more events, generate the next pagination key.
             next_token = None
@@ -375,7 +353,7 @@ class RelationsWorkerStore(SQLBaseStore):
         raise NotImplementedError()
 
     @cachedList(cached_method_name="get_applicable_edit", list_name="event_ids")
-    async def _get_applicable_edits(
+    async def get_applicable_edits(
         self, event_ids: Collection[str]
     ) -> Dict[str, Optional[EventBase]]:
         """Get the most recent edit (if any) that has happened for the given
@@ -464,7 +442,7 @@ class RelationsWorkerStore(SQLBaseStore):
         raise NotImplementedError()
 
     @cachedList(cached_method_name="get_thread_summary", list_name="event_ids")
-    async def _get_thread_summaries(
+    async def get_thread_summaries(
         self, event_ids: Collection[str]
     ) -> Dict[str, Optional[Tuple[int, EventBase, Optional[EventBase]]]]:
         """Get the number of threaded replies, the latest reply (if any), and the latest edit for that reply for the given event.
@@ -499,7 +477,7 @@ class RelationsWorkerStore(SQLBaseStore):
                         AND parent.room_id = child.room_id
                     WHERE
                         %s
-                        AND relation_type = ?
+                        AND %s
                     ORDER BY parent.event_id, child.topological_ordering DESC, child.stream_ordering DESC
                 """
             else:
@@ -514,16 +492,22 @@ class RelationsWorkerStore(SQLBaseStore):
                         AND parent.room_id = child.room_id
                     WHERE
                         %s
-                        AND relation_type = ?
+                        AND %s
                     ORDER BY child.topological_ordering DESC, child.stream_ordering DESC
                 """
 
             clause, args = make_in_list_sql_clause(
                 txn.database_engine, "relates_to_id", event_ids
             )
-            args.append(RelationTypes.THREAD)
 
-            txn.execute(sql % (clause,), args)
+            if self._msc3440_enabled:
+                relations_clause = "(relation_type = ? OR relation_type = ?)"
+                args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
+            else:
+                relations_clause = "relation_type = ?"
+                args.append(RelationTypes.THREAD)
+
+            txn.execute(sql % (clause, relations_clause), args)
             latest_event_ids = {}
             for parent_event_id, child_event_id in txn:
                 # Only consider the latest threaded reply (by topological ordering).
@@ -543,7 +527,7 @@ class RelationsWorkerStore(SQLBaseStore):
                     AND parent.room_id = child.room_id
                 WHERE
                     %s
-                    AND relation_type = ?
+                    AND %s
                 GROUP BY parent.event_id
             """
 
@@ -552,9 +536,15 @@ class RelationsWorkerStore(SQLBaseStore):
             clause, args = make_in_list_sql_clause(
                 txn.database_engine, "relates_to_id", latest_event_ids.keys()
             )
-            args.append(RelationTypes.THREAD)
 
-            txn.execute(sql % (clause,), args)
+            if self._msc3440_enabled:
+                relations_clause = "(relation_type = ? OR relation_type = ?)"
+                args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
+            else:
+                relations_clause = "relation_type = ?"
+                args.append(RelationTypes.THREAD)
+
+            txn.execute(sql % (clause, relations_clause), args)
             counts = dict(cast(List[Tuple[str, int]], txn.fetchall()))
 
             return counts, latest_event_ids
@@ -566,7 +556,7 @@ class RelationsWorkerStore(SQLBaseStore):
         latest_events = await self.get_events(latest_event_ids.values())  # type: ignore[attr-defined]
 
         # Check to see if any of those events are edited.
-        latest_edits = await self._get_applicable_edits(latest_event_ids.values())
+        latest_edits = await self.get_applicable_edits(latest_event_ids.values())
 
         # Map to the event IDs to the thread summary.
         #
@@ -589,7 +579,7 @@ class RelationsWorkerStore(SQLBaseStore):
         raise NotImplementedError()
 
     @cachedList(cached_method_name="get_thread_participated", list_name="event_ids")
-    async def _get_threads_participated(
+    async def get_threads_participated(
         self, event_ids: Collection[str], user_id: str
     ) -> Dict[str, bool]:
         """Get whether the requesting user participated in the given threads.
@@ -617,16 +607,24 @@ class RelationsWorkerStore(SQLBaseStore):
                     AND parent.room_id = child.room_id
                 WHERE
                     %s
-                    AND relation_type = ?
+                    AND %s
                     AND child.sender = ?
             """
 
             clause, args = make_in_list_sql_clause(
                 txn.database_engine, "relates_to_id", event_ids
             )
-            args.extend((RelationTypes.THREAD, user_id))
 
-            txn.execute(sql % (clause,), args)
+            if self._msc3440_enabled:
+                relations_clause = "(relation_type = ? OR relation_type = ?)"
+                args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
+            else:
+                relations_clause = "relation_type = ?"
+                args.append(RelationTypes.THREAD)
+
+            args.append(user_id)
+
+            txn.execute(sql % (clause, relations_clause), args)
             return {row[0] for row in txn.fetchall()}
 
         participated_threads = await self.db_pool.runInteraction(
@@ -737,122 +735,6 @@ class RelationsWorkerStore(SQLBaseStore):
             "get_if_user_has_annotated_event", _get_if_user_has_annotated_event
         )
 
-    async def _get_bundled_aggregation_for_event(
-        self, event: EventBase, user_id: str
-    ) -> Optional[BundledAggregations]:
-        """Generate bundled aggregations for an event.
-
-        Note that this does not use a cache, but depends on cached methods.
-
-        Args:
-            event: The event to calculate bundled aggregations for.
-            user_id: The user requesting the bundled aggregations.
-
-        Returns:
-            The bundled aggregations for an event, if bundled aggregations are
-            enabled and the event can have bundled aggregations.
-        """
-
-        # Do not bundle aggregations for an event which represents an edit or an
-        # annotation. It does not make sense for them to have related events.
-        relates_to = event.content.get("m.relates_to")
-        if isinstance(relates_to, (dict, frozendict)):
-            relation_type = relates_to.get("rel_type")
-            if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE):
-                return None
-
-        event_id = event.event_id
-        room_id = event.room_id
-
-        # The bundled aggregations to include, a mapping of relation type to a
-        # type-specific value. Some types include the direct return type here
-        # while others need more processing during serialization.
-        aggregations = BundledAggregations()
-
-        annotations = await self.get_aggregation_groups_for_event(event_id, room_id)
-        if annotations.chunk:
-            aggregations.annotations = await annotations.to_dict(
-                cast("DataStore", self)
-            )
-
-        references = await self.get_relations_for_event(
-            event_id, room_id, RelationTypes.REFERENCE, direction="f"
-        )
-        if references.chunk:
-            aggregations.references = await references.to_dict(cast("DataStore", self))
-
-        # Store the bundled aggregations in the event metadata for later use.
-        return aggregations
-
-    async def get_bundled_aggregations(
-        self, events: Iterable[EventBase], user_id: str
-    ) -> Dict[str, BundledAggregations]:
-        """Generate bundled aggregations for events.
-
-        Args:
-            events: The iterable of events to calculate bundled aggregations for.
-            user_id: The user requesting the bundled aggregations.
-
-        Returns:
-            A map of event ID to the bundled aggregation for the event. Not all
-            events may have bundled aggregations in the results.
-        """
-        # The already processed event IDs. Tracked separately from the result
-        # since the result omits events which do not have bundled aggregations.
-        seen_event_ids = set()
-
-        # State events and redacted events do not get bundled aggregations.
-        events = [
-            event
-            for event in events
-            if not event.is_state() and not event.internal_metadata.is_redacted()
-        ]
-
-        # event ID -> bundled aggregation in non-serialized form.
-        results: Dict[str, BundledAggregations] = {}
-
-        # Fetch other relations per event.
-        for event in events:
-            # De-duplicate events by ID to handle the same event requested multiple
-            # times. The caches that _get_bundled_aggregation_for_event use should
-            # capture this, but best to reduce work.
-            if event.event_id in seen_event_ids:
-                continue
-            seen_event_ids.add(event.event_id)
-
-            event_result = await self._get_bundled_aggregation_for_event(event, user_id)
-            if event_result:
-                results[event.event_id] = event_result
-
-        # Fetch any edits.
-        edits = await self._get_applicable_edits(seen_event_ids)
-        for event_id, edit in edits.items():
-            results.setdefault(event_id, BundledAggregations()).replace = edit
-
-        # Fetch thread summaries.
-        if self._msc3440_enabled:
-            summaries = await self._get_thread_summaries(seen_event_ids)
-            # Only fetch participated for a limited selection based on what had
-            # summaries.
-            participated = await self._get_threads_participated(
-                summaries.keys(), user_id
-            )
-            for event_id, summary in summaries.items():
-                if summary:
-                    thread_count, latest_thread_event, edit = summary
-                    results.setdefault(
-                        event_id, BundledAggregations()
-                    ).thread = _ThreadAggregation(
-                        latest_event=latest_thread_event,
-                        latest_edit=edit,
-                        count=thread_count,
-                        # If there's a thread summary it must also exist in the
-                        # participated dictionary.
-                        current_user_participated=participated[event_id],
-                    )
-
-        return results
-
 
 class RelationsStore(RelationsWorkerStore):
     pass
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index e48ec5f495..3248da5356 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -46,7 +46,7 @@ from synapse.storage.roommember import (
     ProfileInfo,
     RoomsForUser,
 )
-from synapse.types import PersistedEventPosition, StateMap, get_domain_from_id
+from synapse.types import PersistedEventPosition, get_domain_from_id
 from synapse.util.async_helpers import Linearizer
 from synapse.util.caches import intern_string
 from synapse.util.caches.descriptors import _CacheContext, cached, cachedList
@@ -63,6 +63,14 @@ _MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update"
 _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME = "current_state_events_membership"
 
 
+@attr.s(frozen=True, slots=True, auto_attribs=True)
+class EventIdMembership:
+    """Returned by `get_membership_from_event_ids`"""
+
+    user_id: str
+    membership: str
+
+
 class RoomMemberWorkerStore(EventsWorkerStore):
     def __init__(
         self,
@@ -273,7 +281,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             txn.execute(sql, (room_id,))
             res = {}
             for count, membership in txn:
-                summary = res.setdefault(membership, MemberSummary([], count))
+                res.setdefault(membership, MemberSummary([], count))
 
             # we order by membership and then fairly arbitrarily by event_id so
             # heroes are consistent
@@ -772,7 +780,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             retcols=("user_id", "display_name", "avatar_url", "event_id"),
             keyvalues={"membership": Membership.JOIN},
             batch_size=500,
-            desc="_get_membership_from_event_ids",
+            desc="_get_joined_profiles_from_event_ids",
         )
 
         return {
@@ -839,18 +847,14 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
         with Measure(self._clock, "get_joined_hosts"):
             return await self._get_joined_hosts(
-                room_id, state_group, state_entry.state, state_entry=state_entry
+                room_id, state_group, state_entry=state_entry
             )
 
     @cached(num_args=2, max_entries=10000, iterable=True)
     async def _get_joined_hosts(
-        self,
-        room_id: str,
-        state_group: int,
-        current_state_ids: StateMap[str],
-        state_entry: "_StateCacheEntry",
+        self, room_id: str, state_group: int, state_entry: "_StateCacheEntry"
     ) -> FrozenSet[str]:
-        # We don't use `state_group`, its there so that we can cache based on
+        # We don't use `state_group`, it's there so that we can cache based on
         # it. However, its important that its never None, since two
         # current_state's with a state_group of None are likely to be different.
         #
@@ -1004,12 +1008,26 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
         return set(room_ids)
 
+    @cached(max_entries=5000)
+    async def _get_membership_from_event_id(
+        self, member_event_id: str
+    ) -> Optional[EventIdMembership]:
+        raise NotImplementedError()
+
+    @cachedList(
+        cached_method_name="_get_membership_from_event_id", list_name="member_event_ids"
+    )
     async def get_membership_from_event_ids(
         self, member_event_ids: Iterable[str]
-    ) -> List[dict]:
-        """Get user_id and membership of a set of event IDs."""
+    ) -> Dict[str, Optional[EventIdMembership]]:
+        """Get user_id and membership of a set of event IDs.
 
-        return await self.db_pool.simple_select_many_batch(
+        Returns:
+            Mapping from event ID to `EventIdMembership` if the event is a
+            membership event, otherwise the value is None.
+        """
+
+        rows = await self.db_pool.simple_select_many_batch(
             table="room_memberships",
             column="event_id",
             iterable=member_event_ids,
@@ -1019,6 +1037,13 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             desc="get_membership_from_event_ids",
         )
 
+        return {
+            row["event_id"]: EventIdMembership(
+                membership=row["membership"], user_id=row["user_id"]
+            )
+            for row in rows
+        }
+
     async def is_local_host_in_room_ignoring_users(
         self, room_id: str, ignore_users: Collection[str]
     ) -> bool:
diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index e23b119072..c5e9010c83 100644
--- a/synapse/storage/databases/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -125,9 +125,6 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
     ):
         super().__init__(database, db_conn, hs)
 
-        if not hs.config.server.enable_search:
-            return
-
         self.db_pool.updates.register_background_update_handler(
             self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search
         )
@@ -243,9 +240,13 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
 
             return len(event_search_rows)
 
-        result = await self.db_pool.runInteraction(
-            self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn
-        )
+        if self.hs.config.server.enable_search:
+            result = await self.db_pool.runInteraction(
+                self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn
+            )
+        else:
+            # Don't index anything if search is not enabled.
+            result = 0
 
         if not result:
             await self.db_pool.updates._end_background_update(
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index a898f847e7..39e1efe373 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -325,21 +325,23 @@ def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]:
         args.extend(event_filter.labels)
 
     # Filter on relation_senders / relation types from the joined tables.
-    if event_filter.relation_senders:
+    if event_filter.related_by_senders:
         clauses.append(
             "(%s)"
             % " OR ".join(
-                "related_event.sender = ?" for _ in event_filter.relation_senders
+                "related_event.sender = ?" for _ in event_filter.related_by_senders
             )
         )
-        args.extend(event_filter.relation_senders)
+        args.extend(event_filter.related_by_senders)
 
-    if event_filter.relation_types:
+    if event_filter.related_by_rel_types:
         clauses.append(
             "(%s)"
-            % " OR ".join("relation_type = ?" for _ in event_filter.relation_types)
+            % " OR ".join(
+                "relation_type = ?" for _ in event_filter.related_by_rel_types
+            )
         )
-        args.extend(event_filter.relation_types)
+        args.extend(event_filter.related_by_rel_types)
 
     return " AND ".join(clauses), args
 
@@ -1203,7 +1205,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         # If there is a filter on relation_senders and relation_types join to the
         # relations table.
         if event_filter and (
-            event_filter.relation_senders or event_filter.relation_types
+            event_filter.related_by_senders or event_filter.related_by_rel_types
         ):
             # Filtering by relations could cause the same event to appear multiple
             # times (since there's no limit on the number of relations to an event).
@@ -1211,7 +1213,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             join_clause += """
                 LEFT JOIN event_relations AS relation ON (event.event_id = relation.relates_to_id)
             """
-            if event_filter.relation_senders:
+            if event_filter.related_by_senders:
                 join_clause += """
                     LEFT JOIN events AS related_event ON (relation.event_id = related_event.event_id)
                 """
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index e7fddd2426..0595df01d3 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -26,6 +26,8 @@ from typing import (
     cast,
 )
 
+from typing_extensions import TypedDict
+
 from synapse.api.errors import StoreError
 
 if TYPE_CHECKING:
@@ -40,7 +42,12 @@ from synapse.storage.database import (
 from synapse.storage.databases.main.state import StateFilter
 from synapse.storage.databases.main.state_deltas import StateDeltasStore
 from synapse.storage.engines import PostgresEngine, Sqlite3Engine
-from synapse.types import JsonDict, get_domain_from_id, get_localpart_from_id
+from synapse.types import (
+    JsonDict,
+    UserProfile,
+    get_domain_from_id,
+    get_localpart_from_id,
+)
 from synapse.util.caches.descriptors import cached
 
 logger = logging.getLogger(__name__)
@@ -591,6 +598,11 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
         )
 
 
+class SearchResult(TypedDict):
+    limited: bool
+    results: List[UserProfile]
+
+
 class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
     # How many records do we calculate before sending it to
     # add_users_who_share_private_rooms?
@@ -718,7 +730,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
         users.update(rows)
         return list(users)
 
-    async def get_shared_rooms_for_users(
+    async def get_mutual_rooms_for_users(
         self, user_id: str, other_user_id: str
     ) -> Set[str]:
         """
@@ -732,7 +744,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
             A set of room ID's that the users share.
         """
 
-        def _get_shared_rooms_for_users_txn(
+        def _get_mutual_rooms_for_users_txn(
             txn: LoggingTransaction,
         ) -> List[Dict[str, str]]:
             txn.execute(
@@ -756,7 +768,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
             return rows
 
         rows = await self.db_pool.runInteraction(
-            "get_shared_rooms_for_users", _get_shared_rooms_for_users_txn
+            "get_mutual_rooms_for_users", _get_mutual_rooms_for_users_txn
         )
 
         return {row["room_id"] for row in rows}
@@ -777,7 +789,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
 
     async def search_user_dir(
         self, user_id: str, search_term: str, limit: int
-    ) -> JsonDict:
+    ) -> SearchResult:
         """Searches for users in directory
 
         Returns:
@@ -910,8 +922,11 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
             # This should be unreachable.
             raise Exception("Unrecognized database engine")
 
-        results = await self.db_pool.execute(
-            "search_user_dir", self.db_pool.cursor_to_dict, sql, *args
+        results = cast(
+            List[UserProfile],
+            await self.db_pool.execute(
+                "search_user_dir", self.db_pool.cursor_to_dict, sql, *args
+            ),
         )
 
         limited = len(results) > limit
diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py
index 9abc02046e..afb7d5054d 100644
--- a/synapse/storage/engines/__init__.py
+++ b/synapse/storage/engines/__init__.py
@@ -27,7 +27,7 @@ def create_engine(database_config) -> BaseDatabaseEngine:
 
     if name == "psycopg2":
         # Note that psycopg2cffi-compat provides the psycopg2 module on pypy.
-        import psycopg2  # type: ignore
+        import psycopg2
 
         return PostgresEngine(psycopg2, database_config)
 
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 808342fafb..e8d29e2870 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -47,17 +47,26 @@ class PostgresEngine(BaseDatabaseEngine):
         self.default_isolation_level = (
             self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
         )
+        self.config = database_config
 
     @property
     def single_threaded(self) -> bool:
         return False
 
+    def get_db_locale(self, txn):
+        txn.execute(
+            "SELECT datcollate, datctype FROM pg_database WHERE datname = current_database()"
+        )
+        collation, ctype = txn.fetchone()
+        return collation, ctype
+
     def check_database(self, db_conn, allow_outdated_version: bool = False):
         # Get the version of PostgreSQL that we're using. As per the psycopg2
         # docs: The number is formed by converting the major, minor, and
         # revision numbers into two-decimal-digit numbers and appending them
         # together. For example, version 8.1.5 will be returned as 80105
         self._version = db_conn.server_version
+        allow_unsafe_locale = self.config.get("allow_unsafe_locale", False)
 
         # Are we on a supported PostgreSQL version?
         if not allow_outdated_version and self._version < 100000:
@@ -72,33 +81,39 @@ class PostgresEngine(BaseDatabaseEngine):
                     "See docs/postgres.md for more information." % (rows[0][0],)
                 )
 
-            txn.execute(
-                "SELECT datcollate, datctype FROM pg_database WHERE datname = current_database()"
-            )
-            collation, ctype = txn.fetchone()
+            collation, ctype = self.get_db_locale(txn)
             if collation != "C":
                 logger.warning(
-                    "Database has incorrect collation of %r. Should be 'C'\n"
-                    "See docs/postgres.md for more information.",
+                    "Database has incorrect collation of %r. Should be 'C'",
                     collation,
                 )
+                if not allow_unsafe_locale:
+                    raise IncorrectDatabaseSetup(
+                        "Database has incorrect collation of %r. Should be 'C'\n"
+                        "See docs/postgres.md for more information. You can override this check by"
+                        "setting 'allow_unsafe_locale' to true in the database config.",
+                        collation,
+                    )
 
             if ctype != "C":
-                logger.warning(
-                    "Database has incorrect ctype of %r. Should be 'C'\n"
-                    "See docs/postgres.md for more information.",
-                    ctype,
-                )
+                if not allow_unsafe_locale:
+                    logger.warning(
+                        "Database has incorrect ctype of %r. Should be 'C'",
+                        ctype,
+                    )
+                    raise IncorrectDatabaseSetup(
+                        "Database has incorrect ctype of %r. Should be 'C'\n"
+                        "See docs/postgres.md for more information. You can override this check by"
+                        "setting 'allow_unsafe_locale' to true in the database config.",
+                        ctype,
+                    )
 
     def check_new_database(self, txn):
         """Gets called when setting up a brand new database. This allows us to
         apply stricter checks on new databases versus existing database.
         """
 
-        txn.execute(
-            "SELECT datcollate, datctype FROM pg_database WHERE datname = current_database()"
-        )
-        collation, ctype = txn.fetchone()
+        collation, ctype = self.get_db_locale(txn)
 
         errors = []
 
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index 7d543fdbe0..b402922817 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -1023,8 +1023,13 @@ class EventsPersistenceStorage:
 
         # Check if any of the changes that we don't have events for are joins.
         if events_to_check:
-            rows = await self.main_store.get_membership_from_event_ids(events_to_check)
-            is_still_joined = any(row["membership"] == Membership.JOIN for row in rows)
+            members = await self.main_store.get_membership_from_event_ids(
+                events_to_check
+            )
+            is_still_joined = any(
+                member and member.membership == Membership.JOIN
+                for member in members.values()
+            )
             if is_still_joined:
                 return True
 
@@ -1060,9 +1065,11 @@ class EventsPersistenceStorage:
             ), event_id in current_state.items()
             if typ == EventTypes.Member and not self.is_mine_id(state_key)
         ]
-        rows = await self.main_store.get_membership_from_event_ids(remote_event_ids)
+        members = await self.main_store.get_membership_from_event_ids(remote_event_ids)
         potentially_left_users.update(
-            row["user_id"] for row in rows if row["membership"] == Membership.JOIN
+            member.user_id
+            for member in members.values()
+            if member and member.membership == Membership.JOIN
         )
 
         return False
diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py
index 36ca2b8273..fba270150b 100644
--- a/synapse/storage/relations.py
+++ b/synapse/storage/relations.py
@@ -55,37 +55,6 @@ class PaginationChunk:
 
 
 @attr.s(frozen=True, slots=True, auto_attribs=True)
-class RelationPaginationToken:
-    """Pagination token for relation pagination API.
-
-    As the results are in topological order, we can use the
-    `topological_ordering` and `stream_ordering` fields of the events at the
-    boundaries of the chunk as pagination tokens.
-
-    Attributes:
-        topological: The topological ordering of the boundary event
-        stream: The stream ordering of the boundary event.
-    """
-
-    topological: int
-    stream: int
-
-    @staticmethod
-    def from_string(string: str) -> "RelationPaginationToken":
-        try:
-            t, s = string.split("-")
-            return RelationPaginationToken(int(t), int(s))
-        except ValueError:
-            raise SynapseError(400, "Invalid relation pagination token")
-
-    async def to_string(self, store: "DataStore") -> str:
-        return "%d-%d" % (self.topological, self.stream)
-
-    def as_tuple(self) -> Tuple[Any, ...]:
-        return attr.astuple(self)
-
-
-@attr.s(frozen=True, slots=True, auto_attribs=True)
 class AggregationPaginationToken:
     """Pagination token for relation aggregation pagination API.
 
diff --git a/synapse/storage/schema/main/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py
index 22a7901e15..4b4b166e37 100644
--- a/synapse/storage/schema/main/delta/30/as_users.py
+++ b/synapse/storage/schema/main/delta/30/as_users.py
@@ -36,7 +36,6 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs):
         config_files = config.appservice.app_service_config_files
     except AttributeError:
         logger.warning("Could not get app_service_config_files from config")
-        pass
 
     appservices = load_appservices(config.server.server_name, config_files)
 
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index e79ecf64a0..86f1a5373b 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -561,7 +561,7 @@ class StateGroupStorage:
         return state_group_delta.prev_group, state_group_delta.delta_ids
 
     async def get_state_groups_ids(
-        self, _room_id: str, event_ids: Iterable[str]
+        self, _room_id: str, event_ids: Collection[str]
     ) -> Dict[int, MutableStateMap[str]]:
         """Get the event IDs of all the state for the state groups for the given events
 
@@ -596,7 +596,7 @@ class StateGroupStorage:
         return group_to_state[state_group]
 
     async def get_state_groups(
-        self, room_id: str, event_ids: Iterable[str]
+        self, room_id: str, event_ids: Collection[str]
     ) -> Dict[int, List[EventBase]]:
         """Get the state groups for the given list of event_ids
 
@@ -648,7 +648,7 @@ class StateGroupStorage:
         return self.stores.state._get_state_groups_from_groups(groups, state_filter)
 
     async def get_state_for_events(
-        self, event_ids: Iterable[str], state_filter: Optional[StateFilter] = None
+        self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None
     ) -> Dict[str, StateMap[EventBase]]:
         """Given a list of event_ids and type tuples, return a list of state
         dicts for each event.
@@ -684,7 +684,7 @@ class StateGroupStorage:
         return {event: event_to_state[event] for event in event_ids}
 
     async def get_state_ids_for_events(
-        self, event_ids: Iterable[str], state_filter: Optional[StateFilter] = None
+        self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None
     ) -> Dict[str, StateMap[str]]:
         """
         Get the state dicts corresponding to a list of events, containing the event_ids
diff --git a/synapse/types.py b/synapse/types.py
index 53be3583a0..5ce2a5b0a5 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -34,6 +34,7 @@ from typing import (
 import attr
 from frozendict import frozendict
 from signedjson.key import decode_verify_key_bytes
+from typing_extensions import TypedDict
 from unpaddedbase64 import decode_base64
 from zope.interface import Interface
 
@@ -63,6 +64,10 @@ MutableStateMap = MutableMapping[StateKey, T]
 # JSON types. These could be made stronger, but will do for now.
 # A JSON-serialisable dict.
 JsonDict = Dict[str, Any]
+# A JSON-serialisable mapping; roughly speaking an immutable JSONDict.
+# Useful when you have a TypedDict which isn't going to be mutated and you don't want
+# to cast to JsonDict everywhere.
+JsonMapping = Mapping[str, Any]
 # A JSON-serialisable object.
 JsonSerializable = object
 
@@ -791,3 +796,9 @@ class UserInfo:
     is_deactivated: bool
     is_guest: bool
     is_shadow_banned: bool
+
+
+class UserProfile(TypedDict):
+    user_id: str
+    display_name: Optional[str]
+    avatar_url: Optional[str]
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index 58b4220ff3..d8046b7553 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -31,13 +31,6 @@ from synapse.logging import context
 if typing.TYPE_CHECKING:
     pass
 
-# FIXME Mjolnir imports glob_to_regex from this file, but it was moved to
-#       matrix_common.
-#       As a temporary workaround, we import glob_to_regex here for
-#       compatibility with current versions of Mjolnir.
-# See https://github.com/matrix-org/mjolnir/pull/174
-from matrix_common.regex import glob_to_regex  # noqa
-
 logger = logging.getLogger(__name__)
 
 
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 60c03a66fd..6a8e844d63 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -18,9 +18,10 @@ import collections
 import inspect
 import itertools
 import logging
-from contextlib import contextmanager
+from contextlib import asynccontextmanager, contextmanager
 from typing import (
     Any,
+    AsyncIterator,
     Awaitable,
     Callable,
     Collection,
@@ -40,7 +41,7 @@ from typing import (
 )
 
 import attr
-from typing_extensions import ContextManager
+from typing_extensions import AsyncContextManager, Literal
 
 from twisted.internet import defer
 from twisted.internet.defer import CancelledError
@@ -96,6 +97,10 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]):
 
     __slots__ = ["_deferred", "_observers", "_result"]
 
+    _deferred: "defer.Deferred[_T]"
+    _observers: Union[List["defer.Deferred[_T]"], Tuple[()]]
+    _result: Union[None, Tuple[Literal[True], _T], Tuple[Literal[False], Failure]]
+
     def __init__(self, deferred: "defer.Deferred[_T]", consumeErrors: bool = False):
         object.__setattr__(self, "_deferred", deferred)
         object.__setattr__(self, "_result", None)
@@ -158,12 +163,14 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]):
         effect the underlying deferred.
         """
         if not self._result:
+            assert isinstance(self._observers, list)
             d: "defer.Deferred[_T]" = defer.Deferred()
             self._observers.append(d)
             return d
+        elif self._result[0]:
+            return defer.succeed(self._result[1])
         else:
-            success, res = self._result
-            return defer.succeed(res) if success else defer.fail(res)
+            return defer.fail(self._result[1])
 
     def observers(self) -> "Collection[defer.Deferred[_T]]":
         return self._observers
@@ -175,6 +182,8 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]):
         return self._result is not None and self._result[0] is True
 
     def get_result(self) -> Union[_T, Failure]:
+        if self._result is None:
+            raise ValueError(f"{self!r} has no result yet")
         return self._result[1]
 
     def __getattr__(self, name: str) -> Any:
@@ -483,7 +492,7 @@ class ReadWriteLock:
 
     Example:
 
-        with await read_write_lock.read("test_key"):
+        async with read_write_lock.read("test_key"):
             # do some work
     """
 
@@ -506,22 +515,24 @@ class ReadWriteLock:
         # Latest writer queued
         self.key_to_current_writer: Dict[str, defer.Deferred] = {}
 
-    async def read(self, key: str) -> ContextManager:
-        new_defer: "defer.Deferred[None]" = defer.Deferred()
-
-        curr_readers = self.key_to_current_readers.setdefault(key, set())
-        curr_writer = self.key_to_current_writer.get(key, None)
+    def read(self, key: str) -> AsyncContextManager:
+        @asynccontextmanager
+        async def _ctx_manager() -> AsyncIterator[None]:
+            new_defer: "defer.Deferred[None]" = defer.Deferred()
 
-        curr_readers.add(new_defer)
+            curr_readers = self.key_to_current_readers.setdefault(key, set())
+            curr_writer = self.key_to_current_writer.get(key, None)
 
-        # We wait for the latest writer to finish writing. We can safely ignore
-        # any existing readers... as they're readers.
-        if curr_writer:
-            await make_deferred_yieldable(curr_writer)
+            curr_readers.add(new_defer)
 
-        @contextmanager
-        def _ctx_manager() -> Iterator[None]:
             try:
+                # We wait for the latest writer to finish writing. We can safely ignore
+                # any existing readers... as they're readers.
+                # May raise a `CancelledError` if the `Deferred` wrapping us is
+                # cancelled. The `Deferred` we are waiting on must not be cancelled,
+                # since we do not own it.
+                if curr_writer:
+                    await make_deferred_yieldable(stop_cancellation(curr_writer))
                 yield
             finally:
                 with PreserveLoggingContext():
@@ -530,29 +541,35 @@ class ReadWriteLock:
 
         return _ctx_manager()
 
-    async def write(self, key: str) -> ContextManager:
-        new_defer: "defer.Deferred[None]" = defer.Deferred()
+    def write(self, key: str) -> AsyncContextManager:
+        @asynccontextmanager
+        async def _ctx_manager() -> AsyncIterator[None]:
+            new_defer: "defer.Deferred[None]" = defer.Deferred()
 
-        curr_readers = self.key_to_current_readers.get(key, set())
-        curr_writer = self.key_to_current_writer.get(key, None)
+            curr_readers = self.key_to_current_readers.get(key, set())
+            curr_writer = self.key_to_current_writer.get(key, None)
 
-        # We wait on all latest readers and writer.
-        to_wait_on = list(curr_readers)
-        if curr_writer:
-            to_wait_on.append(curr_writer)
+            # We wait on all latest readers and writer.
+            to_wait_on = list(curr_readers)
+            if curr_writer:
+                to_wait_on.append(curr_writer)
 
-        # We can clear the list of current readers since the new writer waits
-        # for them to finish.
-        curr_readers.clear()
-        self.key_to_current_writer[key] = new_defer
+            # We can clear the list of current readers since `new_defer` waits
+            # for them to finish.
+            curr_readers.clear()
+            self.key_to_current_writer[key] = new_defer
 
-        await make_deferred_yieldable(defer.gatherResults(to_wait_on))
-
-        @contextmanager
-        def _ctx_manager() -> Iterator[None]:
+            to_wait_on_defer = defer.gatherResults(to_wait_on)
             try:
+                # Wait for all current readers and the latest writer to finish.
+                # May raise a `CancelledError` immediately after the wait if the
+                # `Deferred` wrapping us is cancelled. We must only release the lock
+                # once we have acquired it, hence the use of `delay_cancellation`
+                # rather than `stop_cancellation`.
+                await make_deferred_yieldable(delay_cancellation(to_wait_on_defer))
                 yield
             finally:
+                # Release the lock.
                 with PreserveLoggingContext():
                     new_defer.callback(None)
                 # `self.key_to_current_writer[key]` may be missing if there was another
@@ -678,12 +695,48 @@ def stop_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]":
             Synapse logcontext rules.
 
     Returns:
-        A new `Deferred`, which will contain the result of the original `Deferred`,
-        but will not propagate cancellation through to the original. When cancelled,
-        the new `Deferred` will fail with a `CancelledError` and will not follow the
-        Synapse logcontext rules. `make_deferred_yieldable` should be used to wrap
-        the new `Deferred`.
+        A new `Deferred`, which will contain the result of the original `Deferred`.
+        The new `Deferred` will not propagate cancellation through to the original.
+        When cancelled, the new `Deferred` will fail with a `CancelledError`.
+
+        The new `Deferred` will not follow the Synapse logcontext rules and should be
+        wrapped with `make_deferred_yieldable`.
     """
-    new_deferred: defer.Deferred[T] = defer.Deferred()
+    new_deferred: "defer.Deferred[T]" = defer.Deferred()
+    deferred.chainDeferred(new_deferred)
+    return new_deferred
+
+
+def delay_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]":
+    """Delay cancellation of a `Deferred` until it resolves.
+
+    Has the same effect as `stop_cancellation`, but the returned `Deferred` will not
+    resolve with a `CancelledError` until the original `Deferred` resolves.
+
+    Args:
+        deferred: The `Deferred` to protect against cancellation. May optionally follow
+            the Synapse logcontext rules.
+
+    Returns:
+        A new `Deferred`, which will contain the result of the original `Deferred`.
+        The new `Deferred` will not propagate cancellation through to the original.
+        When cancelled, the new `Deferred` will wait until the original `Deferred`
+        resolves before failing with a `CancelledError`.
+
+        The new `Deferred` will follow the Synapse logcontext rules if `deferred`
+        follows the Synapse logcontext rules. Otherwise the new `Deferred` should be
+        wrapped with `make_deferred_yieldable`.
+    """
+
+    def handle_cancel(new_deferred: "defer.Deferred[T]") -> None:
+        # before the new deferred is cancelled, we `pause` it to stop the cancellation
+        # propagating. we then `unpause` it once the wrapped deferred completes, to
+        # propagate the exception.
+        new_deferred.pause()
+        new_deferred.errback(Failure(CancelledError()))
+
+        deferred.addBoth(lambda _: new_deferred.unpause())
+
+    new_deferred: "defer.Deferred[T]" = defer.Deferred(handle_cancel)
     deferred.chainDeferred(new_deferred)
     return new_deferred
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 1cdead02f1..eda92d864d 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -20,6 +20,7 @@ from typing import (
     Any,
     Awaitable,
     Callable,
+    Collection,
     Dict,
     Generic,
     Hashable,
@@ -40,6 +41,7 @@ from twisted.python.failure import Failure
 
 from synapse.logging.context import make_deferred_yieldable, preserve_fn
 from synapse.util import unwrapFirstError
+from synapse.util.async_helpers import delay_cancellation
 from synapse.util.caches.deferred_cache import DeferredCache
 from synapse.util.caches.lrucache import LruCache
 
@@ -69,6 +71,7 @@ class _CacheDescriptorBase:
         self,
         orig: Callable[..., Any],
         num_args: Optional[int],
+        uncached_args: Optional[Collection[str]] = None,
         cache_context: bool = False,
     ):
         self.orig = orig
@@ -76,6 +79,13 @@ class _CacheDescriptorBase:
         arg_spec = inspect.getfullargspec(orig)
         all_args = arg_spec.args
 
+        # There's no reason that keyword-only arguments couldn't be supported,
+        # but right now they're buggy so do not allow them.
+        if arg_spec.kwonlyargs:
+            raise ValueError(
+                "_CacheDescriptorBase does not support keyword-only arguments."
+            )
+
         if "cache_context" in all_args:
             if not cache_context:
                 raise ValueError(
@@ -88,6 +98,9 @@ class _CacheDescriptorBase:
                 " named `cache_context`"
             )
 
+        if num_args is not None and uncached_args is not None:
+            raise ValueError("Cannot provide both num_args and uncached_args")
+
         if num_args is None:
             num_args = len(all_args) - 1
             if cache_context:
@@ -105,6 +118,12 @@ class _CacheDescriptorBase:
         # list of the names of the args used as the cache key
         self.arg_names = all_args[1 : num_args + 1]
 
+        # If there are args to not cache on, filter them out (and fix the size of num_args).
+        if uncached_args is not None:
+            include_arg_in_cache_key = [n not in uncached_args for n in self.arg_names]
+        else:
+            include_arg_in_cache_key = [True] * len(self.arg_names)
+
         # self.arg_defaults is a map of arg name to its default value for each
         # argument that has a default value
         if arg_spec.defaults:
@@ -119,8 +138,8 @@ class _CacheDescriptorBase:
 
         self.add_cache_context = cache_context
 
-        self.cache_key_builder = get_cache_key_builder(
-            self.arg_names, self.arg_defaults
+        self.cache_key_builder = _get_cache_key_builder(
+            self.arg_names, include_arg_in_cache_key, self.arg_defaults
         )
 
 
@@ -130,8 +149,7 @@ class _LruCachedFunction(Generic[F]):
 
 
 def lru_cache(
-    max_entries: int = 1000,
-    cache_context: bool = False,
+    *, max_entries: int = 1000, cache_context: bool = False
 ) -> Callable[[F], _LruCachedFunction[F]]:
     """A method decorator that applies a memoizing cache around the function.
 
@@ -186,7 +204,9 @@ class LruCacheDescriptor(_CacheDescriptorBase):
         max_entries: int = 1000,
         cache_context: bool = False,
     ):
-        super().__init__(orig, num_args=None, cache_context=cache_context)
+        super().__init__(
+            orig, num_args=None, uncached_args=None, cache_context=cache_context
+        )
         self.max_entries = max_entries
 
     def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]:
@@ -260,6 +280,9 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
         num_args: number of positional arguments (excluding ``self`` and
             ``cache_context``) to use as cache keys. Defaults to all named
             args of the function.
+        uncached_args: a list of argument names to not use as the cache key.
+            (``self`` and ``cache_context`` are always ignored.) Cannot be used
+            with num_args.
         tree:
         cache_context:
         iterable:
@@ -273,12 +296,18 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
         orig: Callable[..., Any],
         max_entries: int = 1000,
         num_args: Optional[int] = None,
+        uncached_args: Optional[Collection[str]] = None,
         tree: bool = False,
         cache_context: bool = False,
         iterable: bool = False,
         prune_unread_entries: bool = True,
     ):
-        super().__init__(orig, num_args=num_args, cache_context=cache_context)
+        super().__init__(
+            orig,
+            num_args=num_args,
+            uncached_args=uncached_args,
+            cache_context=cache_context,
+        )
 
         if tree and self.num_args < 2:
             raise RuntimeError(
@@ -322,6 +351,11 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
                 ret = defer.maybeDeferred(preserve_fn(self.orig), obj, *args, **kwargs)
                 ret = cache.set(cache_key, ret, callback=invalidate_callback)
 
+                # We started a new call to `self.orig`, so we must always wait for it to
+                # complete. Otherwise we might mark our current logging context as
+                # finished while `self.orig` is still using it in the background.
+                ret = delay_cancellation(ret)
+
             return make_deferred_yieldable(ret)
 
         wrapped = cast(_CachedFunction, _wrapped)
@@ -369,7 +403,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase):
                 but including list_name) to use as cache keys. Defaults to all
                 named args of the function.
         """
-        super().__init__(orig, num_args=num_args)
+        super().__init__(orig, num_args=num_args, uncached_args=None)
 
         self.list_name = list_name
 
@@ -482,6 +516,11 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase):
                 d = defer.gatherResults(cached_defers, consumeErrors=True).addCallbacks(
                     lambda _: results, unwrapFirstError
                 )
+                if missing:
+                    # We started a new call to `self.orig`, so we must always wait for it to
+                    # complete. Otherwise we might mark our current logging context as
+                    # finished while `self.orig` is still using it in the background.
+                    d = delay_cancellation(d)
                 return make_deferred_yieldable(d)
             else:
                 return defer.succeed(results)
@@ -530,8 +569,10 @@ class _CacheContext:
 
 
 def cached(
+    *,
     max_entries: int = 1000,
     num_args: Optional[int] = None,
+    uncached_args: Optional[Collection[str]] = None,
     tree: bool = False,
     cache_context: bool = False,
     iterable: bool = False,
@@ -541,6 +582,7 @@ def cached(
         orig,
         max_entries=max_entries,
         num_args=num_args,
+        uncached_args=uncached_args,
         tree=tree,
         cache_context=cache_context,
         iterable=iterable,
@@ -551,7 +593,7 @@ def cached(
 
 
 def cachedList(
-    cached_method_name: str, list_name: str, num_args: Optional[int] = None
+    *, cached_method_name: str, list_name: str, num_args: Optional[int] = None
 ) -> Callable[[F], _CachedFunction[F]]:
     """Creates a descriptor that wraps a function in a `CacheListDescriptor`.
 
@@ -590,13 +632,16 @@ def cachedList(
     return cast(Callable[[F], _CachedFunction[F]], func)
 
 
-def get_cache_key_builder(
-    param_names: Sequence[str], param_defaults: Mapping[str, Any]
+def _get_cache_key_builder(
+    param_names: Sequence[str],
+    include_params: Sequence[bool],
+    param_defaults: Mapping[str, Any],
 ) -> Callable[[Sequence[Any], Mapping[str, Any]], CacheKey]:
     """Construct a function which will build cache keys suitable for a cached function
 
     Args:
         param_names: list of formal parameter names for the cached function
+        include_params: list of bools of whether to include the parameter name in the cache key
         param_defaults: a mapping from parameter name to default value for that param
 
     Returns:
@@ -608,6 +653,7 @@ def get_cache_key_builder(
 
     if len(param_names) == 1:
         nm = param_names[0]
+        assert include_params[0] is True
 
         def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey:
             if nm in kwargs:
@@ -620,13 +666,18 @@ def get_cache_key_builder(
     else:
 
         def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey:
-            return tuple(_get_cache_key_gen(param_names, param_defaults, args, kwargs))
+            return tuple(
+                _get_cache_key_gen(
+                    param_names, include_params, param_defaults, args, kwargs
+                )
+            )
 
     return get_cache_key
 
 
 def _get_cache_key_gen(
     param_names: Iterable[str],
+    include_params: Iterable[bool],
     param_defaults: Mapping[str, Any],
     args: Sequence[Any],
     kwargs: Mapping[str, Any],
@@ -637,16 +688,18 @@ def _get_cache_key_gen(
     This is essentially the same operation as `inspect.getcallargs`, but optimised so
     that we don't need to inspect the target function for each call.
     """
-
     # We loop through each arg name, looking up if its in the `kwargs`,
     # otherwise using the next argument in `args`. If there are no more
     # args then we try looking the arg name up in the defaults.
     pos = 0
-    for nm in param_names:
+    for nm, inc in zip(param_names, include_params):
         if nm in kwargs:
-            yield kwargs[nm]
+            if inc:
+                yield kwargs[nm]
         elif pos < len(args):
-            yield args[pos]
+            if inc:
+                yield args[pos]
             pos += 1
         else:
-            yield param_defaults[nm]
+            if inc:
+                yield param_defaults[nm]
diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py
index 563845f867..e78305f787 100644
--- a/synapse/util/caches/treecache.py
+++ b/synapse/util/caches/treecache.py
@@ -22,8 +22,6 @@ class TreeCacheNode(dict):
     leaves.
     """
 
-    pass
-
 
 class TreeCache:
     """
diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py
index 3a1f6b3c75..66f1da7502 100644
--- a/synapse/util/check_dependencies.py
+++ b/synapse/util/check_dependencies.py
@@ -1,3 +1,25 @@
+#  Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+"""
+This module exposes a single function which checks synapse's dependencies are present
+and correctly versioned. It makes use of `importlib.metadata` to do so. The details
+are a bit murky: there's no easy way to get a map from "extras" to the packages they
+require. But this is probably just symptomatic of Python's package management.
+"""
+
 import logging
 from typing import Iterable, NamedTuple, Optional
 
@@ -10,6 +32,8 @@ try:
 except ImportError:
     import importlib_metadata as metadata  # type: ignore[no-redef]
 
+__all__ = ["check_requirements"]
+
 
 class DependencyException(Exception):
     @property
@@ -29,7 +53,17 @@ class DependencyException(Exception):
             yield '"' + i + '"'
 
 
-EXTRAS = set(metadata.metadata(DISTRIBUTION_NAME).get_all("Provides-Extra"))
+DEV_EXTRAS = {"lint", "mypy", "test", "dev"}
+RUNTIME_EXTRAS = (
+    set(metadata.metadata(DISTRIBUTION_NAME).get_all("Provides-Extra")) - DEV_EXTRAS
+)
+VERSION = metadata.version(DISTRIBUTION_NAME)
+
+
+def _is_dev_dependency(req: Requirement) -> bool:
+    return req.marker is not None and any(
+        req.marker.evaluate({"extra": e}) for e in DEV_EXTRAS
+    )
 
 
 class Dependency(NamedTuple):
@@ -43,6 +77,9 @@ def _generic_dependencies() -> Iterable[Dependency]:
     assert requirements is not None
     for raw_requirement in requirements:
         req = Requirement(raw_requirement)
+        if _is_dev_dependency(req):
+            continue
+
         # https://packaging.pypa.io/en/latest/markers.html#usage notes that
         #   > Evaluating an extra marker with no environment is an error
         # so we pass in a dummy empty extra value here.
@@ -56,6 +93,8 @@ def _dependencies_for_extra(extra: str) -> Iterable[Dependency]:
     assert requirements is not None
     for raw_requirement in requirements:
         req = Requirement(raw_requirement)
+        if _is_dev_dependency(req):
+            continue
         # Exclude mandatory deps by only selecting deps needed with this extra.
         if (
             req.marker is not None
@@ -67,18 +106,39 @@ def _dependencies_for_extra(extra: str) -> Iterable[Dependency]:
 
 def _not_installed(requirement: Requirement, extra: Optional[str] = None) -> str:
     if extra:
-        return f"Need {requirement.name} for {extra}, but it is not installed"
+        return (
+            f"Synapse {VERSION} needs {requirement.name} for {extra}, "
+            f"but it is not installed"
+        )
     else:
-        return f"Need {requirement.name}, but it is not installed"
+        return f"Synapse {VERSION} needs {requirement.name}, but it is not installed"
 
 
 def _incorrect_version(
     requirement: Requirement, got: str, extra: Optional[str] = None
 ) -> str:
     if extra:
-        return f"Need {requirement} for {extra}, but got {requirement.name}=={got}"
+        return (
+            f"Synapse {VERSION} needs {requirement} for {extra}, "
+            f"but got {requirement.name}=={got}"
+        )
+    else:
+        return (
+            f"Synapse {VERSION} needs {requirement}, but got {requirement.name}=={got}"
+        )
+
+
+def _no_reported_version(requirement: Requirement, extra: Optional[str] = None) -> str:
+    if extra:
+        return (
+            f"Synapse {VERSION} needs {requirement} for {extra}, "
+            f"but can't determine {requirement.name}'s version"
+        )
     else:
-        return f"Need {requirement}, but got {requirement.name}=={got}"
+        return (
+            f"Synapse {VERSION} needs {requirement}, "
+            f"but can't determine {requirement.name}'s version"
+        )
 
 
 def check_requirements(extra: Optional[str] = None) -> None:
@@ -100,10 +160,10 @@ def check_requirements(extra: Optional[str] = None) -> None:
     # First work out which dependencies are required, and which are optional.
     if extra is None:
         dependencies = _generic_dependencies()
-    elif extra in EXTRAS:
+    elif extra in RUNTIME_EXTRAS:
         dependencies = _dependencies_for_extra(extra)
     else:
-        raise ValueError(f"Synapse does not provide the feature '{extra}'")
+        raise ValueError(f"Synapse {VERSION} does not provide the feature '{extra}'")
 
     deps_unfulfilled = []
     errors = []
@@ -116,7 +176,17 @@ def check_requirements(extra: Optional[str] = None) -> None:
                 deps_unfulfilled.append(requirement.name)
                 errors.append(_not_installed(requirement, extra))
         else:
-            if not requirement.specifier.contains(dist.version):
+            if dist.version is None:
+                # This shouldn't happen---it suggests a borked virtualenv. (See #12223)
+                # Try to give a vaguely helpful error message anyway.
+                # Type-ignore: the annotations don't reflect reality: see
+                #     https://github.com/python/typeshed/issues/7513
+                #     https://bugs.python.org/issue47060
+                deps_unfulfilled.append(requirement.name)  # type: ignore[unreachable]
+                errors.append(_no_reported_version(requirement, extra))
+
+            # We specify prereleases=True to allow prereleases such as RCs.
+            elif not requirement.specifier.contains(dist.version, prereleases=True):
                 deps_unfulfilled.append(requirement.name)
                 errors.append(_incorrect_version(requirement, dist.version, extra))
 
diff --git a/synapse/util/templates.py b/synapse/util/templates.py
index 12941065ca..fb758b7180 100644
--- a/synapse/util/templates.py
+++ b/synapse/util/templates.py
@@ -64,6 +64,7 @@ def build_jinja_env(
         {
             "format_ts": _format_ts_filter,
             "mxc_to_http": _create_mxc_to_http_filter(config.server.public_baseurl),
+            "localpart_from_email": _localpart_from_email_filter,
         }
     )
 
@@ -112,3 +113,7 @@ def _create_mxc_to_http_filter(
 
 def _format_ts_filter(value: int, format: str) -> str:
     return time.strftime(format, time.localtime(value / 1000))
+
+
+def _localpart_from_email_filter(address: str) -> str:
+    return address.rsplit("@", 1)[0]
diff --git a/synapse/visibility.py b/synapse/visibility.py
index 1b970ce479..49519eb8f5 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -14,12 +14,7 @@
 import logging
 from typing import Dict, FrozenSet, List, Optional
 
-from synapse.api.constants import (
-    AccountDataTypes,
-    EventTypes,
-    HistoryVisibility,
-    Membership,
-)
+from synapse.api.constants import EventTypes, HistoryVisibility, Membership
 from synapse.events import EventBase
 from synapse.events.utils import prune_event
 from synapse.storage import Storage
@@ -81,20 +76,14 @@ async def filter_events_for_client(
 
     types = ((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id))
 
+    # we exclude outliers at this point, and then handle them separately later
     event_id_to_state = await storage.state.get_state_for_events(
-        frozenset(e.event_id for e in events),
+        frozenset(e.event_id for e in events if not e.internal_metadata.outlier),
         state_filter=StateFilter.from_types(types),
     )
 
-    ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user(
-        user_id, AccountDataTypes.IGNORED_USER_LIST
-    )
-
-    ignore_list: FrozenSet[str] = frozenset()
-    if ignore_dict_content:
-        ignored_users_dict = ignore_dict_content.get("ignored_users", {})
-        if isinstance(ignored_users_dict, dict):
-            ignore_list = frozenset(ignored_users_dict.keys())
+    # Get the users who are ignored by the requesting user.
+    ignore_list = await storage.main.ignored_users(user_id)
 
     erased_senders = await storage.main.are_users_erased(e.sender for e in events)
 
@@ -154,6 +143,17 @@ async def filter_events_for_client(
         if event.event_id in always_include_ids:
             return event
 
+        # we need to handle outliers separately, since we don't have the room state.
+        if event.internal_metadata.outlier:
+            # Normally these can't be seen by clients, but we make an exception for
+            # for out-of-band membership events (eg, incoming invites, or rejections of
+            # said invite) for the user themselves.
+            if event.type == EventTypes.Member and event.state_key == user_id:
+                logger.debug("Returning out-of-band-membership event %s", event)
+                return event
+
+            return None
+
         state = event_id_to_state[event.event_id]
 
         # get the room_visibility at the time of the event.
@@ -198,6 +198,9 @@ async def filter_events_for_client(
 
             # Always allow the user to see their own leave events, otherwise
             # they won't see the room disappear if they reject the invite
+            #
+            # (Note this doesn't work for out-of-band invite rejections, which don't
+            # have prev_state populated. They are handled above in the outlier code.)
             if membership == "leave" and (
                 prev_membership == "join" or prev_membership == "invite"
             ):
diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py
index 9bd6275e92..edc584d0cf 100644
--- a/tests/appservice/test_appservice.py
+++ b/tests/appservice/test_appservice.py
@@ -36,7 +36,10 @@ class ApplicationServiceTestCase(unittest.TestCase):
             hostname="matrix.org",  # only used by get_groups_for_user
         )
         self.event = Mock(
-            type="m.something", room_id="!foo:bar", sender="@someone:somewhere"
+            event_id="$abc:xyz",
+            type="m.something",
+            room_id="!foo:bar",
+            sender="@someone:somewhere",
         )
 
         self.store = Mock()
@@ -50,7 +53,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.assertTrue(
             (
                 yield defer.ensureDeferred(
-                    self.service.is_interested(self.event, self.store)
+                    self.service.is_interested_in_event(
+                        self.event.event_id, self.event, self.store
+                    )
                 )
             )
         )
@@ -62,7 +67,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.assertFalse(
             (
                 yield defer.ensureDeferred(
-                    self.service.is_interested(self.event, self.store)
+                    self.service.is_interested_in_event(
+                        self.event.event_id, self.event, self.store
+                    )
                 )
             )
         )
@@ -76,7 +83,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.assertTrue(
             (
                 yield defer.ensureDeferred(
-                    self.service.is_interested(self.event, self.store)
+                    self.service.is_interested_in_event(
+                        self.event.event_id, self.event, self.store
+                    )
                 )
             )
         )
@@ -90,7 +99,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.assertTrue(
             (
                 yield defer.ensureDeferred(
-                    self.service.is_interested(self.event, self.store)
+                    self.service.is_interested_in_event(
+                        self.event.event_id, self.event, self.store
+                    )
                 )
             )
         )
@@ -104,7 +115,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.assertFalse(
             (
                 yield defer.ensureDeferred(
-                    self.service.is_interested(self.event, self.store)
+                    self.service.is_interested_in_event(
+                        self.event.event_id, self.event, self.store
+                    )
                 )
             )
         )
@@ -121,7 +134,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.assertTrue(
             (
                 yield defer.ensureDeferred(
-                    self.service.is_interested(self.event, self.store)
+                    self.service.is_interested_in_event(
+                        self.event.event_id, self.event, self.store
+                    )
                 )
             )
         )
@@ -174,7 +189,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.assertFalse(
             (
                 yield defer.ensureDeferred(
-                    self.service.is_interested(self.event, self.store)
+                    self.service.is_interested_in_event(
+                        self.event.event_id, self.event, self.store
+                    )
                 )
             )
         )
@@ -191,7 +208,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.assertTrue(
             (
                 yield defer.ensureDeferred(
-                    self.service.is_interested(self.event, self.store)
+                    self.service.is_interested_in_event(
+                        self.event.event_id, self.event, self.store
+                    )
                 )
             )
         )
@@ -207,7 +226,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.assertTrue(
             (
                 yield defer.ensureDeferred(
-                    self.service.is_interested(self.event, self.store)
+                    self.service.is_interested_in_event(
+                        self.event.event_id, self.event, self.store
+                    )
                 )
             )
         )
@@ -225,7 +246,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.assertTrue(
             (
                 yield defer.ensureDeferred(
-                    self.service.is_interested(event=self.event, store=self.store)
+                    self.service.is_interested_in_event(
+                        self.event.event_id, self.event, self.store
+                    )
                 )
             )
         )
diff --git a/tests/config/test_background_update.py b/tests/config/test_background_update.py
new file mode 100644
index 0000000000..0c32c1ca29
--- /dev/null
+++ b/tests/config/test_background_update.py
@@ -0,0 +1,58 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import yaml
+
+from synapse.storage.background_updates import BackgroundUpdater
+
+from tests.unittest import HomeserverTestCase, override_config
+
+
+class BackgroundUpdateConfigTestCase(HomeserverTestCase):
+    # Tests that the default values in the config are correctly loaded. Note that the default
+    # values are loaded when the corresponding config options are commented out, which is why there isn't
+    # a config specified here.
+    def test_default_configuration(self):
+        background_updater = BackgroundUpdater(
+            self.hs, self.hs.get_datastores().main.db_pool
+        )
+
+        self.assertEqual(background_updater.minimum_background_batch_size, 1)
+        self.assertEqual(background_updater.default_background_batch_size, 100)
+        self.assertEqual(background_updater.sleep_enabled, True)
+        self.assertEqual(background_updater.sleep_duration_ms, 1000)
+        self.assertEqual(background_updater.update_duration_ms, 100)
+
+    # Tests that non-default values for the config options are properly picked up and passed on.
+    @override_config(
+        yaml.safe_load(
+            """
+            background_updates:
+                background_update_duration_ms: 1000
+                sleep_enabled: false
+                sleep_duration_ms: 600
+                min_batch_size: 5
+                default_batch_size: 50
+            """
+        )
+    )
+    def test_custom_configuration(self):
+        background_updater = BackgroundUpdater(
+            self.hs, self.hs.get_datastores().main.db_pool
+        )
+
+        self.assertEqual(background_updater.minimum_background_batch_size, 5)
+        self.assertEqual(background_updater.default_background_batch_size, 50)
+        self.assertEqual(background_updater.sleep_enabled, False)
+        self.assertEqual(background_updater.sleep_duration_ms, 600)
+        self.assertEqual(background_updater.update_duration_ms, 1000)
diff --git a/tests/config/test_registration_config.py b/tests/config/test_registration_config.py
index 17a84d20d8..2acdb6ac61 100644
--- a/tests/config/test_registration_config.py
+++ b/tests/config/test_registration_config.py
@@ -11,14 +11,16 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
+import synapse.app.homeserver
 from synapse.config import ConfigError
 from synapse.config.homeserver import HomeServerConfig
 
-from tests.unittest import TestCase
+from tests.config.utils import ConfigFileTestCase
 from tests.utils import default_config
 
 
-class RegistrationConfigTestCase(TestCase):
+class RegistrationConfigTestCase(ConfigFileTestCase):
     def test_session_lifetime_must_not_be_exceeded_by_smaller_lifetimes(self):
         """
         session_lifetime should logically be larger than, or at least as large as,
@@ -76,3 +78,19 @@ class RegistrationConfigTestCase(TestCase):
         HomeServerConfig().parse_config_dict(
             {"session_lifetime": "31m", "refresh_token_lifetime": "31m", **config_dict}
         )
+
+    def test_refuse_to_start_if_open_registration_and_no_verification(self):
+        self.generate_config()
+        self.add_lines_to_config(
+            [
+                " ",
+                "enable_registration: true",
+                "registrations_require_3pid: []",
+                "enable_registration_captcha: false",
+                "registration_requires_token: false",
+            ]
+        )
+
+        # Test that allowing open registration without verification raises an error
+        with self.assertRaises(ConfigError):
+            synapse.app.homeserver.setup(["-c", self.config_file])
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index 45e3395b33..00ad19e446 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -16,6 +16,7 @@ from synapse.api.constants import EventContentFields
 from synapse.api.room_versions import RoomVersions
 from synapse.events import make_event_from_dict
 from synapse.events.utils import (
+    SerializeEventConfig,
     copy_power_levels_contents,
     prune_event,
     serialize_event,
@@ -392,7 +393,9 @@ class PruneEventTestCase(unittest.TestCase):
 
 class SerializeEventTestCase(unittest.TestCase):
     def serialize(self, ev, fields):
-        return serialize_event(ev, 1479807801915, only_event_fields=fields)
+        return serialize_event(
+            ev, 1479807801915, config=SerializeEventConfig(only_event_fields=fields)
+        )
 
     def test_event_fields_works_with_keys(self):
         self.assertEqual(
diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index 60e0c31f43..e90592855a 100644
--- a/tests/federation/test_federation_sender.py
+++ b/tests/federation/test_federation_sender.py
@@ -201,9 +201,12 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
         self.assertEqual(len(self.edus), 1)
         stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", None)
 
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
+
         # a second call should produce no new device EDUs
         self.hs.get_federation_sender().send_device_messages("host2")
-        self.pump()
         self.assertEqual(self.edus, [])
 
         # a second device
@@ -232,6 +235,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
         device1_signing_key = self.generate_and_upload_device_signing_key(u1, "D1")
         device2_signing_key = self.generate_and_upload_device_signing_key(u1, "D2")
 
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
+
         # expect two more edus
         self.assertEqual(len(self.edus), 2)
         stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", stream_id)
@@ -265,6 +272,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
             e2e_handler.upload_signing_keys_for_user(u1, cross_signing_keys)
         )
 
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
+
         # expect signing key update edu
         self.assertEqual(len(self.edus), 2)
         self.assertEqual(self.edus.pop(0)["edu_type"], "m.signing_key_update")
@@ -284,6 +295,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
         )
         self.assertEqual(ret["failures"], {})
 
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
+
         # expect two edus, in one or two transactions. We don't know what order the
         # devices will be updated.
         self.assertEqual(len(self.edus), 2)
@@ -307,6 +322,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
         self.login("user", "pass", device_id="D2")
         self.login("user", "pass", device_id="D3")
 
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
+
         # expect three edus
         self.assertEqual(len(self.edus), 3)
         stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", None)
@@ -318,6 +337,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
             self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"])
         )
 
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
+
         # expect three edus, in an unknown order
         self.assertEqual(len(self.edus), 3)
         for edu in self.edus:
@@ -350,12 +373,19 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
             self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"])
         )
 
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
+
         self.assertGreaterEqual(mock_send_txn.call_count, 4)
 
         # recover the server
         mock_send_txn.side_effect = self.record_transaction
         self.hs.get_federation_sender().send_device_messages("host2")
-        self.pump()
+
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
 
         # for each device, there should be a single update
         self.assertEqual(len(self.edus), 3)
@@ -390,6 +420,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
             self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"])
         )
 
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
+
         self.assertGreaterEqual(mock_send_txn.call_count, 4)
 
         # run the prune job
@@ -401,7 +435,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
         # recover the server
         mock_send_txn.side_effect = self.record_transaction
         self.hs.get_federation_sender().send_device_messages("host2")
-        self.pump()
+
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
 
         # there should be a single update for this user.
         self.assertEqual(len(self.edus), 1)
@@ -435,6 +472,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
         self.login("user", "pass", device_id="D2")
         self.login("user", "pass", device_id="D3")
 
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
+
         # delete them again
         self.get_success(
             self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"])
@@ -451,7 +492,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
         # recover the server
         mock_send_txn.side_effect = self.record_transaction
         self.hs.get_federation_sender().send_device_messages("host2")
-        self.pump()
+
+        # We queue up device list updates to be sent over federation, so we
+        # advance to clear the queue.
+        self.reactor.advance(1)
 
         # ... and we should get a single update for this user.
         self.assertEqual(len(self.edus), 1)
diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py
index abf2a0fe0d..c1579dac61 100644
--- a/tests/handlers/test_admin.py
+++ b/tests/handlers/test_admin.py
@@ -15,11 +15,15 @@
 from collections import Counter
 from unittest.mock import Mock
 
+from twisted.test.proto_helpers import MemoryReactor
+
 import synapse.rest.admin
 import synapse.storage
 from synapse.api.constants import EventTypes, JoinRules
 from synapse.api.room_versions import RoomVersions
 from synapse.rest.client import knock, login, room
+from synapse.server import HomeServer
+from synapse.util import Clock
 
 from tests import unittest
 
@@ -32,7 +36,7 @@ class ExfiltrateData(unittest.HomeserverTestCase):
         knock.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.admin_handler = hs.get_admin_handler()
 
         self.user1 = self.register_user("user1", "password")
@@ -41,7 +45,7 @@ class ExfiltrateData(unittest.HomeserverTestCase):
         self.user2 = self.register_user("user2", "password")
         self.token2 = self.login("user2", "password")
 
-    def test_single_public_joined_room(self):
+    def test_single_public_joined_room(self) -> None:
         """Test that we write *all* events for a public room"""
         room_id = self.helper.create_room_as(
             self.user1, tok=self.token1, is_public=True
@@ -74,7 +78,7 @@ class ExfiltrateData(unittest.HomeserverTestCase):
         self.assertEqual(counter[(EventTypes.Member, self.user1)], 1)
         self.assertEqual(counter[(EventTypes.Member, self.user2)], 1)
 
-    def test_single_private_joined_room(self):
+    def test_single_private_joined_room(self) -> None:
         """Tests that we correctly write state when we can't see all events in
         a room.
         """
@@ -112,7 +116,7 @@ class ExfiltrateData(unittest.HomeserverTestCase):
         self.assertEqual(counter[(EventTypes.Member, self.user1)], 1)
         self.assertEqual(counter[(EventTypes.Member, self.user2)], 1)
 
-    def test_single_left_room(self):
+    def test_single_left_room(self) -> None:
         """Tests that we don't see events in the room after we leave."""
         room_id = self.helper.create_room_as(self.user1, tok=self.token1)
         self.helper.send(room_id, body="Hello!", tok=self.token1)
@@ -144,7 +148,7 @@ class ExfiltrateData(unittest.HomeserverTestCase):
         self.assertEqual(counter[(EventTypes.Member, self.user1)], 1)
         self.assertEqual(counter[(EventTypes.Member, self.user2)], 2)
 
-    def test_single_left_rejoined_private_room(self):
+    def test_single_left_rejoined_private_room(self) -> None:
         """Tests that see the correct events in private rooms when we
         repeatedly join and leave.
         """
@@ -185,7 +189,7 @@ class ExfiltrateData(unittest.HomeserverTestCase):
         self.assertEqual(counter[(EventTypes.Member, self.user1)], 1)
         self.assertEqual(counter[(EventTypes.Member, self.user2)], 3)
 
-    def test_invite(self):
+    def test_invite(self) -> None:
         """Tests that pending invites get handled correctly."""
         room_id = self.helper.create_room_as(self.user1, tok=self.token1)
         self.helper.send(room_id, body="Hello!", tok=self.token1)
@@ -204,7 +208,7 @@ class ExfiltrateData(unittest.HomeserverTestCase):
         self.assertEqual(args[1].content["membership"], "invite")
         self.assertTrue(args[2])  # Assert there is at least one bit of state
 
-    def test_knock(self):
+    def test_knock(self) -> None:
         """Tests that knock get handled correctly."""
         # create a knockable v7 room
         room_id = self.helper.create_room_as(
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index 072e6bbcdd..cead9f90df 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -59,11 +59,11 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         self.event_source = hs.get_event_sources()
 
     def test_notify_interested_services(self):
-        interested_service = self._mkservice(is_interested=True)
+        interested_service = self._mkservice(is_interested_in_event=True)
         services = [
-            self._mkservice(is_interested=False),
+            self._mkservice(is_interested_in_event=False),
             interested_service,
-            self._mkservice(is_interested=False),
+            self._mkservice(is_interested_in_event=False),
         ]
 
         self.mock_as_api.query_user.return_value = make_awaitable(True)
@@ -85,7 +85,7 @@ class AppServiceHandlerTestCase(unittest.TestCase):
 
     def test_query_user_exists_unknown_user(self):
         user_id = "@someone:anywhere"
-        services = [self._mkservice(is_interested=True)]
+        services = [self._mkservice(is_interested_in_event=True)]
         services[0].is_interested_in_user.return_value = True
         self.mock_store.get_app_services.return_value = services
         self.mock_store.get_user_by_id.return_value = make_awaitable(None)
@@ -102,7 +102,7 @@ class AppServiceHandlerTestCase(unittest.TestCase):
 
     def test_query_user_exists_known_user(self):
         user_id = "@someone:anywhere"
-        services = [self._mkservice(is_interested=True)]
+        services = [self._mkservice(is_interested_in_event=True)]
         services[0].is_interested_in_user.return_value = True
         self.mock_store.get_app_services.return_value = services
         self.mock_store.get_user_by_id.return_value = make_awaitable({"name": user_id})
@@ -127,11 +127,11 @@ class AppServiceHandlerTestCase(unittest.TestCase):
 
         room_id = "!alpha:bet"
         servers = ["aperture"]
-        interested_service = self._mkservice_alias(is_interested_in_alias=True)
+        interested_service = self._mkservice_alias(is_room_alias_in_namespace=True)
         services = [
-            self._mkservice_alias(is_interested_in_alias=False),
+            self._mkservice_alias(is_room_alias_in_namespace=False),
             interested_service,
-            self._mkservice_alias(is_interested_in_alias=False),
+            self._mkservice_alias(is_room_alias_in_namespace=False),
         ]
 
         self.mock_as_api.query_alias.return_value = make_awaitable(True)
@@ -275,7 +275,7 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         to be pushed out to interested appservices, and that the stream ID is
         updated accordingly.
         """
-        interested_service = self._mkservice(is_interested=True)
+        interested_service = self._mkservice(is_interested_in_event=True)
         services = [interested_service]
         self.mock_store.get_app_services.return_value = services
         self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable(
@@ -304,7 +304,7 @@ class AppServiceHandlerTestCase(unittest.TestCase):
         Test sending out of order ephemeral events to the appservice handler
         are ignored.
         """
-        interested_service = self._mkservice(is_interested=True)
+        interested_service = self._mkservice(is_interested_in_event=True)
         services = [interested_service]
 
         self.mock_store.get_app_services.return_value = services
@@ -325,17 +325,45 @@ class AppServiceHandlerTestCase(unittest.TestCase):
             interested_service, ephemeral=[]
         )
 
-    def _mkservice(self, is_interested, protocols=None):
+    def _mkservice(
+        self, is_interested_in_event: bool, protocols: Optional[Iterable] = None
+    ) -> Mock:
+        """
+        Create a new mock representing an ApplicationService.
+
+        Args:
+            is_interested_in_event: Whether this application service will be considered
+                interested in all events.
+            protocols: The third-party protocols that this application service claims to
+                support.
+
+        Returns:
+            A mock representing the ApplicationService.
+        """
         service = Mock()
-        service.is_interested.return_value = make_awaitable(is_interested)
+        service.is_interested_in_event.return_value = make_awaitable(
+            is_interested_in_event
+        )
         service.token = "mock_service_token"
         service.url = "mock_service_url"
         service.protocols = protocols
         return service
 
-    def _mkservice_alias(self, is_interested_in_alias):
+    def _mkservice_alias(self, is_room_alias_in_namespace: bool) -> Mock:
+        """
+        Create a new mock representing an ApplicationService that is or is not interested
+        any given room aliase.
+
+        Args:
+            is_room_alias_in_namespace: If true, the application service will be interested
+                in all room aliases that are queried against it. If false, the application
+                service will not be interested in any room aliases.
+
+        Returns:
+            A mock representing the ApplicationService.
+        """
         service = Mock()
-        service.is_interested_in_alias.return_value = is_interested_in_alias
+        service.is_room_alias_in_namespace.return_value = is_room_alias_in_namespace
         service.token = "mock_service_token"
         service.url = "mock_service_url"
         return service
diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py
index 0c6e55e725..67a7829769 100644
--- a/tests/handlers/test_auth.py
+++ b/tests/handlers/test_auth.py
@@ -15,8 +15,12 @@ from unittest.mock import Mock
 
 import pymacaroons
 
+from twisted.test.proto_helpers import MemoryReactor
+
 from synapse.api.errors import AuthError, ResourceLimitError
 from synapse.rest import admin
+from synapse.server import HomeServer
+from synapse.util import Clock
 
 from tests import unittest
 from tests.test_utils import make_awaitable
@@ -27,7 +31,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
         admin.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.auth_handler = hs.get_auth_handler()
         self.macaroon_generator = hs.get_macaroon_generator()
 
@@ -42,23 +46,23 @@ class AuthTestCase(unittest.HomeserverTestCase):
 
         self.user1 = self.register_user("a_user", "pass")
 
-    def test_macaroon_caveats(self):
+    def test_macaroon_caveats(self) -> None:
         token = self.macaroon_generator.generate_guest_access_token("a_user")
         macaroon = pymacaroons.Macaroon.deserialize(token)
 
-        def verify_gen(caveat):
+        def verify_gen(caveat: str) -> bool:
             return caveat == "gen = 1"
 
-        def verify_user(caveat):
+        def verify_user(caveat: str) -> bool:
             return caveat == "user_id = a_user"
 
-        def verify_type(caveat):
+        def verify_type(caveat: str) -> bool:
             return caveat == "type = access"
 
-        def verify_nonce(caveat):
+        def verify_nonce(caveat: str) -> bool:
             return caveat.startswith("nonce =")
 
-        def verify_guest(caveat):
+        def verify_guest(caveat: str) -> bool:
             return caveat == "guest = true"
 
         v = pymacaroons.Verifier()
@@ -69,7 +73,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
         v.satisfy_general(verify_guest)
         v.verify(macaroon, self.hs.config.key.macaroon_secret_key)
 
-    def test_short_term_login_token_gives_user_id(self):
+    def test_short_term_login_token_gives_user_id(self) -> None:
         token = self.macaroon_generator.generate_short_term_login_token(
             self.user1, "", duration_in_ms=5000
         )
@@ -84,7 +88,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
             AuthError,
         )
 
-    def test_short_term_login_token_gives_auth_provider(self):
+    def test_short_term_login_token_gives_auth_provider(self) -> None:
         token = self.macaroon_generator.generate_short_term_login_token(
             self.user1, auth_provider_id="my_idp"
         )
@@ -92,7 +96,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
         self.assertEqual(self.user1, res.user_id)
         self.assertEqual("my_idp", res.auth_provider_id)
 
-    def test_short_term_login_token_cannot_replace_user_id(self):
+    def test_short_term_login_token_cannot_replace_user_id(self) -> None:
         token = self.macaroon_generator.generate_short_term_login_token(
             self.user1, "", duration_in_ms=5000
         )
@@ -112,7 +116,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
             AuthError,
         )
 
-    def test_mau_limits_disabled(self):
+    def test_mau_limits_disabled(self) -> None:
         self.auth_blocking._limit_usage_by_mau = False
         # Ensure does not throw exception
         self.get_success(
@@ -127,7 +131,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
             )
         )
 
-    def test_mau_limits_exceeded_large(self):
+    def test_mau_limits_exceeded_large(self) -> None:
         self.auth_blocking._limit_usage_by_mau = True
         self.hs.get_datastores().main.get_monthly_active_count = Mock(
             return_value=make_awaitable(self.large_number_of_users)
@@ -150,7 +154,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
             ResourceLimitError,
         )
 
-    def test_mau_limits_parity(self):
+    def test_mau_limits_parity(self) -> None:
         # Ensure we're not at the unix epoch.
         self.reactor.advance(1)
         self.auth_blocking._limit_usage_by_mau = True
@@ -189,7 +193,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
             )
         )
 
-    def test_mau_limits_not_exceeded(self):
+    def test_mau_limits_not_exceeded(self) -> None:
         self.auth_blocking._limit_usage_by_mau = True
 
         self.hs.get_datastores().main.get_monthly_active_count = Mock(
@@ -211,7 +215,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
             )
         )
 
-    def _get_macaroon(self):
+    def _get_macaroon(self) -> pymacaroons.Macaroon:
         token = self.macaroon_generator.generate_short_term_login_token(
             self.user1, "", duration_in_ms=5000
         )
diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py
index a267228846..a54aa29cf1 100644
--- a/tests/handlers/test_cas.py
+++ b/tests/handlers/test_cas.py
@@ -11,9 +11,14 @@
 #  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
+from typing import Any, Dict
 from unittest.mock import Mock
 
+from twisted.test.proto_helpers import MemoryReactor
+
 from synapse.handlers.cas import CasResponse
+from synapse.server import HomeServer
+from synapse.util import Clock
 
 from tests.test_utils import simple_async_mock
 from tests.unittest import HomeserverTestCase, override_config
@@ -24,7 +29,7 @@ SERVER_URL = "https://issuer/"
 
 
 class CasHandlerTestCase(HomeserverTestCase):
-    def default_config(self):
+    def default_config(self) -> Dict[str, Any]:
         config = super().default_config()
         config["public_baseurl"] = BASE_URL
         cas_config = {
@@ -40,7 +45,7 @@ class CasHandlerTestCase(HomeserverTestCase):
 
         return config
 
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         hs = self.setup_test_homeserver()
 
         self.handler = hs.get_cas_handler()
@@ -51,7 +56,7 @@ class CasHandlerTestCase(HomeserverTestCase):
 
         return hs
 
-    def test_map_cas_user_to_user(self):
+    def test_map_cas_user_to_user(self) -> None:
         """Ensure that mapping the CAS user returned from a provider to an MXID works properly."""
 
         # stub out the auth handler
@@ -75,7 +80,7 @@ class CasHandlerTestCase(HomeserverTestCase):
             auth_provider_session_id=None,
         )
 
-    def test_map_cas_user_to_existing_user(self):
+    def test_map_cas_user_to_existing_user(self) -> None:
         """Existing users can log in with CAS account."""
         store = self.hs.get_datastores().main
         self.get_success(
@@ -119,7 +124,7 @@ class CasHandlerTestCase(HomeserverTestCase):
             auth_provider_session_id=None,
         )
 
-    def test_map_cas_user_to_invalid_localpart(self):
+    def test_map_cas_user_to_invalid_localpart(self) -> None:
         """CAS automaps invalid characters to base-64 encoding."""
 
         # stub out the auth handler
@@ -150,7 +155,7 @@ class CasHandlerTestCase(HomeserverTestCase):
             }
         }
     )
-    def test_required_attributes(self):
+    def test_required_attributes(self) -> None:
         """The required attributes must be met from the CAS response."""
 
         # stub out the auth handler
@@ -166,7 +171,7 @@ class CasHandlerTestCase(HomeserverTestCase):
         auth_handler.complete_sso_login.assert_not_called()
 
         # The response doesn't have any department.
-        cas_response = CasResponse("test_user", {"userGroup": "staff"})
+        cas_response = CasResponse("test_user", {"userGroup": ["staff"]})
         request.reset_mock()
         self.get_success(
             self.handler._handle_cas_response(request, cas_response, "redirect_uri", "")
diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py
index ddda36c5a9..3a10791226 100644
--- a/tests/handlers/test_deactivate_account.py
+++ b/tests/handlers/test_deactivate_account.py
@@ -39,7 +39,7 @@ class DeactivateAccountTestCase(HomeserverTestCase):
         self.user = self.register_user("user", "pass")
         self.token = self.login("user", "pass")
 
-    def _deactivate_my_account(self):
+    def _deactivate_my_account(self) -> None:
         """
         Deactivates the account `self.user` using `self.token` and asserts
         that it returns a 200 success code.
diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py
index 683677fd07..01ea7d2a42 100644
--- a/tests/handlers/test_device.py
+++ b/tests/handlers/test_device.py
@@ -14,9 +14,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import synapse.api.errors
-import synapse.handlers.device
-import synapse.storage
+from typing import Optional
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.errors import NotFoundError, SynapseError
+from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN
+from synapse.server import HomeServer
+from synapse.util import Clock
 
 from tests import unittest
 
@@ -25,28 +30,27 @@ user2 = "@theresa:bbb"
 
 
 class DeviceTestCase(unittest.HomeserverTestCase):
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         hs = self.setup_test_homeserver("server", federation_http_client=None)
         self.handler = hs.get_device_handler()
         self.store = hs.get_datastores().main
         return hs
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         # These tests assume that it starts 1000 seconds in.
         self.reactor.advance(1000)
 
-    def test_device_is_created_with_invalid_name(self):
+    def test_device_is_created_with_invalid_name(self) -> None:
         self.get_failure(
             self.handler.check_device_registered(
                 user_id="@boris:foo",
                 device_id="foo",
-                initial_device_display_name="a"
-                * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1),
+                initial_device_display_name="a" * (MAX_DEVICE_DISPLAY_NAME_LEN + 1),
             ),
-            synapse.api.errors.SynapseError,
+            SynapseError,
         )
 
-    def test_device_is_created_if_doesnt_exist(self):
+    def test_device_is_created_if_doesnt_exist(self) -> None:
         res = self.get_success(
             self.handler.check_device_registered(
                 user_id="@boris:foo",
@@ -59,7 +63,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
         dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco"))
         self.assertEqual(dev["display_name"], "display name")
 
-    def test_device_is_preserved_if_exists(self):
+    def test_device_is_preserved_if_exists(self) -> None:
         res1 = self.get_success(
             self.handler.check_device_registered(
                 user_id="@boris:foo",
@@ -81,7 +85,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
         dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco"))
         self.assertEqual(dev["display_name"], "display name")
 
-    def test_device_id_is_made_up_if_unspecified(self):
+    def test_device_id_is_made_up_if_unspecified(self) -> None:
         device_id = self.get_success(
             self.handler.check_device_registered(
                 user_id="@theresa:foo",
@@ -93,7 +97,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
         dev = self.get_success(self.handler.store.get_device("@theresa:foo", device_id))
         self.assertEqual(dev["display_name"], "display")
 
-    def test_get_devices_by_user(self):
+    def test_get_devices_by_user(self) -> None:
         self._record_users()
 
         res = self.get_success(self.handler.get_devices_by_user(user1))
@@ -131,7 +135,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
             device_map["abc"],
         )
 
-    def test_get_device(self):
+    def test_get_device(self) -> None:
         self._record_users()
 
         res = self.get_success(self.handler.get_device(user1, "abc"))
@@ -146,21 +150,19 @@ class DeviceTestCase(unittest.HomeserverTestCase):
             res,
         )
 
-    def test_delete_device(self):
+    def test_delete_device(self) -> None:
         self._record_users()
 
         # delete the device
         self.get_success(self.handler.delete_device(user1, "abc"))
 
         # check the device was deleted
-        self.get_failure(
-            self.handler.get_device(user1, "abc"), synapse.api.errors.NotFoundError
-        )
+        self.get_failure(self.handler.get_device(user1, "abc"), NotFoundError)
 
         # we'd like to check the access token was invalidated, but that's a
         # bit of a PITA.
 
-    def test_delete_device_and_device_inbox(self):
+    def test_delete_device_and_device_inbox(self) -> None:
         self._record_users()
 
         # add an device_inbox
@@ -191,7 +193,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
         )
         self.assertIsNone(res)
 
-    def test_update_device(self):
+    def test_update_device(self) -> None:
         self._record_users()
 
         update = {"display_name": "new display"}
@@ -200,32 +202,29 @@ class DeviceTestCase(unittest.HomeserverTestCase):
         res = self.get_success(self.handler.get_device(user1, "abc"))
         self.assertEqual(res["display_name"], "new display")
 
-    def test_update_device_too_long_display_name(self):
+    def test_update_device_too_long_display_name(self) -> None:
         """Update a device with a display name that is invalid (too long)."""
         self._record_users()
 
         # Request to update a device display name with a new value that is longer than allowed.
-        update = {
-            "display_name": "a"
-            * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1)
-        }
+        update = {"display_name": "a" * (MAX_DEVICE_DISPLAY_NAME_LEN + 1)}
         self.get_failure(
             self.handler.update_device(user1, "abc", update),
-            synapse.api.errors.SynapseError,
+            SynapseError,
         )
 
         # Ensure the display name was not updated.
         res = self.get_success(self.handler.get_device(user1, "abc"))
         self.assertEqual(res["display_name"], "display 2")
 
-    def test_update_unknown_device(self):
+    def test_update_unknown_device(self) -> None:
         update = {"display_name": "new_display"}
         self.get_failure(
             self.handler.update_device("user_id", "unknown_device_id", update),
-            synapse.api.errors.NotFoundError,
+            NotFoundError,
         )
 
-    def _record_users(self):
+    def _record_users(self) -> None:
         # check this works for both devices which have a recorded client_ip,
         # and those which don't.
         self._record_user(user1, "xyz", "display 0")
@@ -238,8 +237,13 @@ class DeviceTestCase(unittest.HomeserverTestCase):
         self.reactor.advance(10000)
 
     def _record_user(
-        self, user_id, device_id, display_name, access_token=None, ip=None
-    ):
+        self,
+        user_id: str,
+        device_id: str,
+        display_name: str,
+        access_token: Optional[str] = None,
+        ip: Optional[str] = None,
+    ) -> None:
         device_id = self.get_success(
             self.handler.check_device_registered(
                 user_id=user_id,
@@ -248,7 +252,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
             )
         )
 
-        if ip is not None:
+        if access_token is not None and ip is not None:
             self.get_success(
                 self.store.insert_client_ip(
                     user_id, access_token, ip, "user_agent", device_id
@@ -258,7 +262,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
 
 
 class DehydrationTestCase(unittest.HomeserverTestCase):
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         hs = self.setup_test_homeserver("server", federation_http_client=None)
         self.handler = hs.get_device_handler()
         self.registration = hs.get_registration_handler()
@@ -266,7 +270,7 @@ class DehydrationTestCase(unittest.HomeserverTestCase):
         self.store = hs.get_datastores().main
         return hs
 
-    def test_dehydrate_and_rehydrate_device(self):
+    def test_dehydrate_and_rehydrate_device(self) -> None:
         user_id = "@boris:dehydration"
 
         self.get_success(self.store.register_user(user_id, "foobar"))
@@ -303,7 +307,7 @@ class DehydrationTestCase(unittest.HomeserverTestCase):
                 access_token=access_token,
                 device_id="not the right device ID",
             ),
-            synapse.api.errors.NotFoundError,
+            NotFoundError,
         )
 
         # dehydrating the right devices should succeed and change our device ID
@@ -331,7 +335,7 @@ class DehydrationTestCase(unittest.HomeserverTestCase):
         # make sure that the device ID that we were initially assigned no longer exists
         self.get_failure(
             self.handler.get_device(user_id, device_id),
-            synapse.api.errors.NotFoundError,
+            NotFoundError,
         )
 
         # make sure that there's no device available for dehydrating now
diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
index 6e403a87c5..11ad44223d 100644
--- a/tests/handlers/test_directory.py
+++ b/tests/handlers/test_directory.py
@@ -12,14 +12,18 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
+from typing import Any, Awaitable, Callable, Dict
 from unittest.mock import Mock
 
+from twisted.test.proto_helpers import MemoryReactor
+
 import synapse.api.errors
 import synapse.rest.admin
 from synapse.api.constants import EventTypes
 from synapse.rest.client import directory, login, room
-from synapse.types import RoomAlias, create_requester
+from synapse.server import HomeServer
+from synapse.types import JsonDict, RoomAlias, create_requester
+from synapse.util import Clock
 
 from tests import unittest
 from tests.test_utils import make_awaitable
@@ -28,13 +32,15 @@ from tests.test_utils import make_awaitable
 class DirectoryTestCase(unittest.HomeserverTestCase):
     """Tests the directory service."""
 
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         self.mock_federation = Mock()
         self.mock_registry = Mock()
 
-        self.query_handlers = {}
+        self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {}
 
-        def register_query_handler(query_type, handler):
+        def register_query_handler(
+            query_type: str, handler: Callable[[dict], Awaitable[JsonDict]]
+        ) -> None:
             self.query_handlers[query_type] = handler
 
         self.mock_registry.register_query_handler = register_query_handler
@@ -54,7 +60,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
 
         return hs
 
-    def test_get_local_association(self):
+    def test_get_local_association(self) -> None:
         self.get_success(
             self.store.create_room_alias_association(
                 self.my_room, "!8765qwer:test", ["test"]
@@ -65,7 +71,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual({"room_id": "!8765qwer:test", "servers": ["test"]}, result)
 
-    def test_get_remote_association(self):
+    def test_get_remote_association(self) -> None:
         self.mock_federation.make_query.return_value = make_awaitable(
             {"room_id": "!8765qwer:test", "servers": ["test", "remote"]}
         )
@@ -83,7 +89,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
             ignore_backoff=True,
         )
 
-    def test_incoming_fed_query(self):
+    def test_incoming_fed_query(self) -> None:
         self.get_success(
             self.store.create_room_alias_association(
                 self.your_room, "!8765asdf:test", ["test"]
@@ -105,7 +111,7 @@ class TestCreateAlias(unittest.HomeserverTestCase):
         directory.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.handler = hs.get_directory_handler()
 
         # Create user
@@ -125,7 +131,7 @@ class TestCreateAlias(unittest.HomeserverTestCase):
         self.test_user_tok = self.login("user", "pass")
         self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok)
 
-    def test_create_alias_joined_room(self):
+    def test_create_alias_joined_room(self) -> None:
         """A user can create an alias for a room they're in."""
         self.get_success(
             self.handler.create_association(
@@ -135,7 +141,7 @@ class TestCreateAlias(unittest.HomeserverTestCase):
             )
         )
 
-    def test_create_alias_other_room(self):
+    def test_create_alias_other_room(self) -> None:
         """A user cannot create an alias for a room they're NOT in."""
         other_room_id = self.helper.create_room_as(
             self.admin_user, tok=self.admin_user_tok
@@ -150,7 +156,7 @@ class TestCreateAlias(unittest.HomeserverTestCase):
             synapse.api.errors.SynapseError,
         )
 
-    def test_create_alias_admin(self):
+    def test_create_alias_admin(self) -> None:
         """An admin can create an alias for a room they're NOT in."""
         other_room_id = self.helper.create_room_as(
             self.test_user, tok=self.test_user_tok
@@ -173,7 +179,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase):
         directory.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastores().main
         self.handler = hs.get_directory_handler()
         self.state_handler = hs.get_state_handler()
@@ -195,7 +201,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase):
         self.test_user_tok = self.login("user", "pass")
         self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok)
 
-    def _create_alias(self, user):
+    def _create_alias(self, user) -> None:
         # Create a new alias to this room.
         self.get_success(
             self.store.create_room_alias_association(
@@ -203,7 +209,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase):
             )
         )
 
-    def test_delete_alias_not_allowed(self):
+    def test_delete_alias_not_allowed(self) -> None:
         """A user that doesn't meet the expected guidelines cannot delete an alias."""
         self._create_alias(self.admin_user)
         self.get_failure(
@@ -213,7 +219,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase):
             synapse.api.errors.AuthError,
         )
 
-    def test_delete_alias_creator(self):
+    def test_delete_alias_creator(self) -> None:
         """An alias creator can delete their own alias."""
         # Create an alias from a different user.
         self._create_alias(self.test_user)
@@ -232,7 +238,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase):
             synapse.api.errors.SynapseError,
         )
 
-    def test_delete_alias_admin(self):
+    def test_delete_alias_admin(self) -> None:
         """A server admin can delete an alias created by another user."""
         # Create an alias from a different user.
         self._create_alias(self.test_user)
@@ -251,7 +257,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase):
             synapse.api.errors.SynapseError,
         )
 
-    def test_delete_alias_sufficient_power(self):
+    def test_delete_alias_sufficient_power(self) -> None:
         """A user with a sufficient power level should be able to delete an alias."""
         self._create_alias(self.admin_user)
 
@@ -288,7 +294,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase):
         directory.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastores().main
         self.handler = hs.get_directory_handler()
         self.state_handler = hs.get_state_handler()
@@ -317,7 +323,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase):
         )
         return room_alias
 
-    def _set_canonical_alias(self, content):
+    def _set_canonical_alias(self, content) -> None:
         """Configure the canonical alias state on the room."""
         self.helper.send_state(
             self.room_id,
@@ -334,7 +340,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase):
             )
         )
 
-    def test_remove_alias(self):
+    def test_remove_alias(self) -> None:
         """Removing an alias that is the canonical alias should remove it there too."""
         # Set this new alias as the canonical alias for this room
         self._set_canonical_alias(
@@ -356,7 +362,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase):
         self.assertNotIn("alias", data["content"])
         self.assertNotIn("alt_aliases", data["content"])
 
-    def test_remove_other_alias(self):
+    def test_remove_other_alias(self) -> None:
         """Removing an alias listed as in alt_aliases should remove it there too."""
         # Create a second alias.
         other_test_alias = "#test2:test"
@@ -393,7 +399,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase):
 
     servlets = [directory.register_servlets, room.register_servlets]
 
-    def default_config(self):
+    def default_config(self) -> Dict[str, Any]:
         config = super().default_config()
 
         # Add custom alias creation rules to the config.
@@ -403,7 +409,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase):
 
         return config
 
-    def test_denied(self):
+    def test_denied(self) -> None:
         room_id = self.helper.create_room_as(self.user_id)
 
         channel = self.make_request(
@@ -413,7 +419,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase):
         )
         self.assertEqual(403, channel.code, channel.result)
 
-    def test_allowed(self):
+    def test_allowed(self) -> None:
         room_id = self.helper.create_room_as(self.user_id)
 
         channel = self.make_request(
@@ -423,7 +429,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase):
         )
         self.assertEqual(200, channel.code, channel.result)
 
-    def test_denied_during_creation(self):
+    def test_denied_during_creation(self) -> None:
         """A room alias that is not allowed should be rejected during creation."""
         # Invalid room alias.
         self.helper.create_room_as(
@@ -432,7 +438,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase):
             extra_content={"room_alias_name": "foo"},
         )
 
-    def test_allowed_during_creation(self):
+    def test_allowed_during_creation(self) -> None:
         """A valid room alias should be allowed during creation."""
         room_id = self.helper.create_room_as(
             self.user_id,
@@ -459,7 +465,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase):
     data = {"room_alias_name": "unofficial_test"}
     allowed_localpart = "allowed"
 
-    def default_config(self):
+    def default_config(self) -> Dict[str, Any]:
         config = super().default_config()
 
         # Add custom room list publication rules to the config.
@@ -474,7 +480,9 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase):
 
         return config
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(
+        self, reactor: MemoryReactor, clock: Clock, hs: HomeServer
+    ) -> HomeServer:
         self.allowed_user_id = self.register_user(self.allowed_localpart, "pass")
         self.allowed_access_token = self.login(self.allowed_localpart, "pass")
 
@@ -483,7 +491,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase):
 
         return hs
 
-    def test_denied_without_publication_permission(self):
+    def test_denied_without_publication_permission(self) -> None:
         """
         Try to create a room, register an alias for it, and publish it,
         as a user without permission to publish rooms.
@@ -497,7 +505,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase):
             expect_code=403,
         )
 
-    def test_allowed_when_creating_private_room(self):
+    def test_allowed_when_creating_private_room(self) -> None:
         """
         Try to create a room, register an alias for it, and NOT publish it,
         as a user without permission to publish rooms.
@@ -511,7 +519,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase):
             expect_code=200,
         )
 
-    def test_allowed_with_publication_permission(self):
+    def test_allowed_with_publication_permission(self) -> None:
         """
         Try to create a room, register an alias for it, and publish it,
         as a user WITH permission to publish rooms.
@@ -525,7 +533,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase):
             expect_code=200,
         )
 
-    def test_denied_publication_with_invalid_alias(self):
+    def test_denied_publication_with_invalid_alias(self) -> None:
         """
         Try to create a room, register an alias for it, and publish it,
         as a user WITH permission to publish rooms.
@@ -538,7 +546,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase):
             expect_code=403,
         )
 
-    def test_can_create_as_private_room_after_rejection(self):
+    def test_can_create_as_private_room_after_rejection(self) -> None:
         """
         After failing to publish a room with an alias as a user without publish permission,
         retry as the same user, but without publishing the room.
@@ -549,7 +557,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase):
         self.test_denied_without_publication_permission()
         self.test_allowed_when_creating_private_room()
 
-    def test_can_create_with_permission_after_rejection(self):
+    def test_can_create_with_permission_after_rejection(self) -> None:
         """
         After failing to publish a room with an alias as a user without publish permission,
         retry as someone with permission, using the same alias.
@@ -566,7 +574,9 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase):
 
     servlets = [directory.register_servlets, room.register_servlets]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(
+        self, reactor: MemoryReactor, clock: Clock, hs: HomeServer
+    ) -> HomeServer:
         room_id = self.helper.create_room_as(self.user_id)
 
         channel = self.make_request(
@@ -579,7 +589,7 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase):
 
         return hs
 
-    def test_disabling_room_list(self):
+    def test_disabling_room_list(self) -> None:
         self.room_list_handler.enable_room_list_search = True
         self.directory_handler.enable_room_list_search = True
 
diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
index 9338ab92e9..ac21a28c43 100644
--- a/tests/handlers/test_e2e_keys.py
+++ b/tests/handlers/test_e2e_keys.py
@@ -20,33 +20,37 @@ from parameterized import parameterized
 from signedjson import key as key, sign as sign
 
 from twisted.internet import defer
+from twisted.test.proto_helpers import MemoryReactor
 
 from synapse.api.constants import RoomEncryptionAlgorithms
 from synapse.api.errors import Codes, SynapseError
+from synapse.server import HomeServer
+from synapse.types import JsonDict
+from synapse.util import Clock
 
 from tests import unittest
 from tests.test_utils import make_awaitable
 
 
 class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         return self.setup_test_homeserver(federation_client=mock.Mock())
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.handler = hs.get_e2e_keys_handler()
         self.store = self.hs.get_datastores().main
 
-    def test_query_local_devices_no_devices(self):
+    def test_query_local_devices_no_devices(self) -> None:
         """If the user has no devices, we expect an empty list."""
         local_user = "@boris:" + self.hs.hostname
         res = self.get_success(self.handler.query_local_devices({local_user: None}))
         self.assertDictEqual(res, {local_user: {}})
 
-    def test_reupload_one_time_keys(self):
+    def test_reupload_one_time_keys(self) -> None:
         """we should be able to re-upload the same keys"""
         local_user = "@boris:" + self.hs.hostname
         device_id = "xyz"
-        keys = {
+        keys: JsonDict = {
             "alg1:k1": "key1",
             "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}},
             "alg2:k3": {"key": "key3"},
@@ -74,7 +78,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
             res, {"one_time_key_counts": {"alg1": 1, "alg2": 2, "signed_curve25519": 0}}
         )
 
-    def test_change_one_time_keys(self):
+    def test_change_one_time_keys(self) -> None:
         """attempts to change one-time-keys should be rejected"""
 
         local_user = "@boris:" + self.hs.hostname
@@ -134,7 +138,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
             SynapseError,
         )
 
-    def test_claim_one_time_key(self):
+    def test_claim_one_time_key(self) -> None:
         local_user = "@boris:" + self.hs.hostname
         device_id = "xyz"
         keys = {"alg1:k1": "key1"}
@@ -161,7 +165,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
             },
         )
 
-    def test_fallback_key(self):
+    def test_fallback_key(self) -> None:
         local_user = "@boris:" + self.hs.hostname
         device_id = "xyz"
         fallback_key = {"alg1:k1": "fallback_key1"}
@@ -294,7 +298,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
             {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}},
         )
 
-    def test_replace_master_key(self):
+    def test_replace_master_key(self) -> None:
         """uploading a new signing key should make the old signing key unavailable"""
         local_user = "@boris:" + self.hs.hostname
         keys1 = {
@@ -328,7 +332,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
         )
         self.assertDictEqual(devices["master_keys"], {local_user: keys2["master_key"]})
 
-    def test_reupload_signatures(self):
+    def test_reupload_signatures(self) -> None:
         """re-uploading a signature should not fail"""
         local_user = "@boris:" + self.hs.hostname
         keys1 = {
@@ -433,7 +437,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
         self.assertDictEqual(devices["device_keys"][local_user]["abc"], device_key_1)
         self.assertDictEqual(devices["device_keys"][local_user]["def"], device_key_2)
 
-    def test_self_signing_key_doesnt_show_up_as_device(self):
+    def test_self_signing_key_doesnt_show_up_as_device(self) -> None:
         """signing keys should be hidden when fetching a user's devices"""
         local_user = "@boris:" + self.hs.hostname
         keys1 = {
@@ -462,7 +466,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
         res = self.get_success(self.handler.query_local_devices({local_user: None}))
         self.assertDictEqual(res, {local_user: {}})
 
-    def test_upload_signatures(self):
+    def test_upload_signatures(self) -> None:
         """should check signatures that are uploaded"""
         # set up a user with cross-signing keys and a device.  This user will
         # try uploading signatures
@@ -686,7 +690,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
             other_master_key["signatures"][local_user]["ed25519:" + usersigning_pubkey],
         )
 
-    def test_query_devices_remote_no_sync(self):
+    def test_query_devices_remote_no_sync(self) -> None:
         """Tests that querying keys for a remote user that we don't share a room
         with returns the cross signing keys correctly.
         """
@@ -759,7 +763,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
             },
         )
 
-    def test_query_devices_remote_sync(self):
+    def test_query_devices_remote_sync(self) -> None:
         """Tests that querying keys for a remote user that we share a room with,
         but haven't yet fetched the keys for, returns the cross signing keys
         correctly.
@@ -845,7 +849,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
             (["device_1", "device_2"],),
         ]
     )
-    def test_query_all_devices_caches_result(self, device_ids: Iterable[str]):
+    def test_query_all_devices_caches_result(self, device_ids: Iterable[str]) -> None:
         """Test that requests for all of a remote user's devices are cached.
 
         We do this by asserting that only one call over federation was made, and that
@@ -853,7 +857,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
         """
         local_user_id = "@test:test"
         remote_user_id = "@test:other"
-        request_body = {"device_keys": {remote_user_id: []}}
+        request_body: JsonDict = {"device_keys": {remote_user_id: []}}
 
         response_devices = [
             {
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index e8b4e39d1a..89078fc637 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -12,9 +12,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import List
+from typing import List, cast
 from unittest import TestCase
 
+from twisted.test.proto_helpers import MemoryReactor
+
 from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseError
 from synapse.api.room_versions import RoomVersions
@@ -23,7 +25,9 @@ from synapse.federation.federation_base import event_from_pdu_json
 from synapse.logging.context import LoggingContext, run_in_background
 from synapse.rest import admin
 from synapse.rest.client import login, room
+from synapse.server import HomeServer
 from synapse.types import create_requester
+from synapse.util import Clock
 from synapse.util.stringutils import random_string
 
 from tests import unittest
@@ -42,7 +46,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
         room.register_servlets,
     ]
 
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         hs = self.setup_test_homeserver(federation_http_client=None)
         self.handler = hs.get_federation_handler()
         self.store = hs.get_datastores().main
@@ -50,7 +54,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
         self._event_auth_handler = hs.get_event_auth_handler()
         return hs
 
-    def test_exchange_revoked_invite(self):
+    def test_exchange_revoked_invite(self) -> None:
         user_id = self.register_user("kermit", "test")
         tok = self.login("kermit", "test")
 
@@ -96,7 +100,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
         self.assertEqual(failure.errcode, Codes.FORBIDDEN, failure)
         self.assertEqual(failure.msg, "You are not invited to this room.")
 
-    def test_rejected_message_event_state(self):
+    def test_rejected_message_event_state(self) -> None:
         """
         Check that we store the state group correctly for rejected non-state events.
 
@@ -126,7 +130,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
                 "content": {},
                 "room_id": room_id,
                 "sender": "@yetanotheruser:" + OTHER_SERVER,
-                "depth": join_event["depth"] + 1,
+                "depth": cast(int, join_event["depth"]) + 1,
                 "prev_events": [join_event.event_id],
                 "auth_events": [],
                 "origin_server_ts": self.clock.time_msec(),
@@ -149,7 +153,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual(sg, sg2)
 
-    def test_rejected_state_event_state(self):
+    def test_rejected_state_event_state(self) -> None:
         """
         Check that we store the state group correctly for rejected state events.
 
@@ -180,7 +184,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
                 "content": {},
                 "room_id": room_id,
                 "sender": "@yetanotheruser:" + OTHER_SERVER,
-                "depth": join_event["depth"] + 1,
+                "depth": cast(int, join_event["depth"]) + 1,
                 "prev_events": [join_event.event_id],
                 "auth_events": [],
                 "origin_server_ts": self.clock.time_msec(),
@@ -203,7 +207,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual(sg, sg2)
 
-    def test_backfill_with_many_backward_extremities(self):
+    def test_backfill_with_many_backward_extremities(self) -> None:
         """
         Check that we can backfill with many backward extremities.
         The goal is to make sure that when we only use a portion
@@ -262,7 +266,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
             )
         self.get_success(d)
 
-    def test_backfill_floating_outlier_membership_auth(self):
+    def test_backfill_floating_outlier_membership_auth(self) -> None:
         """
         As the local homeserver, check that we can properly process a federated
         event from the OTHER_SERVER with auth_events that include a floating
@@ -377,7 +381,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
                 for ae in auth_events
             ]
 
-        self.handler.federation_client.get_event_auth = get_event_auth
+        self.handler.federation_client.get_event_auth = get_event_auth  # type: ignore[assignment]
 
         with LoggingContext("receive_pdu"):
             # Fake the OTHER_SERVER federating the message event over to our local homeserver
@@ -397,7 +401,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
     @unittest.override_config(
         {"rc_invites": {"per_user": {"per_second": 0.5, "burst_count": 3}}}
     )
-    def test_invite_by_user_ratelimit(self):
+    def test_invite_by_user_ratelimit(self) -> None:
         """Tests that invites from federation to a particular user are
         actually rate-limited.
         """
@@ -446,7 +450,9 @@ class FederationTestCase(unittest.HomeserverTestCase):
             exc=LimitExceededError,
         )
 
-    def _build_and_send_join_event(self, other_server, other_user, room_id):
+    def _build_and_send_join_event(
+        self, other_server: str, other_user: str, room_id: str
+    ) -> EventBase:
         join_event = self.get_success(
             self.handler.on_make_join_request(other_server, room_id, other_user)
         )
@@ -469,7 +475,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
 
 
 class EventFromPduTestCase(TestCase):
-    def test_valid_json(self):
+    def test_valid_json(self) -> None:
         """Valid JSON should be turned into an event."""
         ev = event_from_pdu_json(
             {
@@ -487,7 +493,7 @@ class EventFromPduTestCase(TestCase):
 
         self.assertIsInstance(ev, EventBase)
 
-    def test_invalid_numbers(self):
+    def test_invalid_numbers(self) -> None:
         """Invalid values for an integer should be rejected, all floats should be rejected."""
         for value in [
             -(2 ** 53),
@@ -512,7 +518,7 @@ class EventFromPduTestCase(TestCase):
                     RoomVersions.V6,
                 )
 
-    def test_invalid_nested(self):
+    def test_invalid_nested(self) -> None:
         """List and dictionaries are recursively searched."""
         with self.assertRaises(SynapseError):
             event_from_pdu_json(
diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py
index e8418b6638..014815db6e 100644
--- a/tests/handlers/test_oidc.py
+++ b/tests/handlers/test_oidc.py
@@ -13,14 +13,18 @@
 # limitations under the License.
 import json
 import os
+from typing import Any, Dict
 from unittest.mock import ANY, Mock, patch
 from urllib.parse import parse_qs, urlparse
 
 import pymacaroons
 
+from twisted.test.proto_helpers import MemoryReactor
+
 from synapse.handlers.sso import MappingException
 from synapse.server import HomeServer
-from synapse.types import UserID
+from synapse.types import JsonDict, UserID
+from synapse.util import Clock
 from synapse.util.macaroons import get_value_from_macaroon
 
 from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock
@@ -98,7 +102,7 @@ class TestMappingProviderFailures(TestMappingProvider):
         }
 
 
-async def get_json(url):
+async def get_json(url: str) -> JsonDict:
     # Mock get_json calls to handle jwks & oidc discovery endpoints
     if url == WELL_KNOWN:
         # Minimal discovery document, as defined in OpenID.Discovery
@@ -116,6 +120,8 @@ async def get_json(url):
     elif url == JWKS_URI:
         return {"keys": []}
 
+    return {}
+
 
 def _key_file_path() -> str:
     """path to a file containing the private half of a test key"""
@@ -147,12 +153,12 @@ class OidcHandlerTestCase(HomeserverTestCase):
     if not HAS_OIDC:
         skip = "requires OIDC"
 
-    def default_config(self):
+    def default_config(self) -> Dict[str, Any]:
         config = super().default_config()
         config["public_baseurl"] = BASE_URL
         return config
 
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         self.http_client = Mock(spec=["get_json"])
         self.http_client.get_json.side_effect = get_json
         self.http_client.user_agent = b"Synapse Test"
@@ -164,7 +170,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
         sso_handler = hs.get_sso_handler()
         # Mock the render error method.
         self.render_error = Mock(return_value=None)
-        sso_handler.render_error = self.render_error
+        sso_handler.render_error = self.render_error  # type: ignore[assignment]
 
         # Reduce the number of attempts when generating MXIDs.
         sso_handler._MAP_USERNAME_RETRIES = 3
@@ -193,14 +199,14 @@ class OidcHandlerTestCase(HomeserverTestCase):
         return args
 
     @override_config({"oidc_config": DEFAULT_CONFIG})
-    def test_config(self):
+    def test_config(self) -> None:
         """Basic config correctly sets up the callback URL and client auth correctly."""
         self.assertEqual(self.provider._callback_url, CALLBACK_URL)
         self.assertEqual(self.provider._client_auth.client_id, CLIENT_ID)
         self.assertEqual(self.provider._client_auth.client_secret, CLIENT_SECRET)
 
     @override_config({"oidc_config": {**DEFAULT_CONFIG, "discover": True}})
-    def test_discovery(self):
+    def test_discovery(self) -> None:
         """The handler should discover the endpoints from OIDC discovery document."""
         # This would throw if some metadata were invalid
         metadata = self.get_success(self.provider.load_metadata())
@@ -219,13 +225,13 @@ class OidcHandlerTestCase(HomeserverTestCase):
         self.http_client.get_json.assert_not_called()
 
     @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG})
-    def test_no_discovery(self):
+    def test_no_discovery(self) -> None:
         """When discovery is disabled, it should not try to load from discovery document."""
         self.get_success(self.provider.load_metadata())
         self.http_client.get_json.assert_not_called()
 
     @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG})
-    def test_load_jwks(self):
+    def test_load_jwks(self) -> None:
         """JWKS loading is done once (then cached) if used."""
         jwks = self.get_success(self.provider.load_jwks())
         self.http_client.get_json.assert_called_once_with(JWKS_URI)
@@ -253,7 +259,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
             self.get_failure(self.provider.load_jwks(force=True), RuntimeError)
 
     @override_config({"oidc_config": DEFAULT_CONFIG})
-    def test_validate_config(self):
+    def test_validate_config(self) -> None:
         """Provider metadatas are extensively validated."""
         h = self.provider
 
@@ -336,14 +342,14 @@ class OidcHandlerTestCase(HomeserverTestCase):
             force_load_metadata()
 
     @override_config({"oidc_config": {**DEFAULT_CONFIG, "skip_verification": True}})
-    def test_skip_verification(self):
+    def test_skip_verification(self) -> None:
         """Provider metadata validation can be disabled by config."""
         with self.metadata_edit({"issuer": "http://insecure"}):
             # This should not throw
             get_awaitable_result(self.provider.load_metadata())
 
     @override_config({"oidc_config": DEFAULT_CONFIG})
-    def test_redirect_request(self):
+    def test_redirect_request(self) -> None:
         """The redirect request has the right arguments & generates a valid session cookie."""
         req = Mock(spec=["cookies"])
         req.cookies = []
@@ -387,7 +393,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
         self.assertEqual(redirect, "http://client/redirect")
 
     @override_config({"oidc_config": DEFAULT_CONFIG})
-    def test_callback_error(self):
+    def test_callback_error(self) -> None:
         """Errors from the provider returned in the callback are displayed."""
         request = Mock(args={})
         request.args[b"error"] = [b"invalid_client"]
@@ -399,7 +405,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
         self.assertRenderedError("invalid_client", "some description")
 
     @override_config({"oidc_config": DEFAULT_CONFIG})
-    def test_callback(self):
+    def test_callback(self) -> None:
         """Code callback works and display errors if something went wrong.
 
         A lot of scenarios are tested here:
@@ -428,9 +434,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
             "username": username,
         }
         expected_user_id = "@%s:%s" % (username, self.hs.hostname)
-        self.provider._exchange_code = simple_async_mock(return_value=token)
-        self.provider._parse_id_token = simple_async_mock(return_value=userinfo)
-        self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo)
+        self.provider._exchange_code = simple_async_mock(return_value=token)  # type: ignore[assignment]
+        self.provider._parse_id_token = simple_async_mock(return_value=userinfo)  # type: ignore[assignment]
+        self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo)  # type: ignore[assignment]
         auth_handler = self.hs.get_auth_handler()
         auth_handler.complete_sso_login = simple_async_mock()
 
@@ -468,7 +474,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
             self.assertRenderedError("mapping_error")
 
         # Handle ID token errors
-        self.provider._parse_id_token = simple_async_mock(raises=Exception())
+        self.provider._parse_id_token = simple_async_mock(raises=Exception())  # type: ignore[assignment]
         self.get_success(self.handler.handle_oidc_callback(request))
         self.assertRenderedError("invalid_token")
 
@@ -483,7 +489,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
             "type": "bearer",
             "access_token": "access_token",
         }
-        self.provider._exchange_code = simple_async_mock(return_value=token)
+        self.provider._exchange_code = simple_async_mock(return_value=token)  # type: ignore[assignment]
         self.get_success(self.handler.handle_oidc_callback(request))
 
         auth_handler.complete_sso_login.assert_called_once_with(
@@ -510,8 +516,8 @@ class OidcHandlerTestCase(HomeserverTestCase):
         id_token = {
             "sid": "abcdefgh",
         }
-        self.provider._parse_id_token = simple_async_mock(return_value=id_token)
-        self.provider._exchange_code = simple_async_mock(return_value=token)
+        self.provider._parse_id_token = simple_async_mock(return_value=id_token)  # type: ignore[assignment]
+        self.provider._exchange_code = simple_async_mock(return_value=token)  # type: ignore[assignment]
         auth_handler.complete_sso_login.reset_mock()
         self.provider._fetch_userinfo.reset_mock()
         self.get_success(self.handler.handle_oidc_callback(request))
@@ -531,21 +537,21 @@ class OidcHandlerTestCase(HomeserverTestCase):
         self.render_error.assert_not_called()
 
         # Handle userinfo fetching error
-        self.provider._fetch_userinfo = simple_async_mock(raises=Exception())
+        self.provider._fetch_userinfo = simple_async_mock(raises=Exception())  # type: ignore[assignment]
         self.get_success(self.handler.handle_oidc_callback(request))
         self.assertRenderedError("fetch_error")
 
         # Handle code exchange failure
         from synapse.handlers.oidc import OidcError
 
-        self.provider._exchange_code = simple_async_mock(
+        self.provider._exchange_code = simple_async_mock(  # type: ignore[assignment]
             raises=OidcError("invalid_request")
         )
         self.get_success(self.handler.handle_oidc_callback(request))
         self.assertRenderedError("invalid_request")
 
     @override_config({"oidc_config": DEFAULT_CONFIG})
-    def test_callback_session(self):
+    def test_callback_session(self) -> None:
         """The callback verifies the session presence and validity"""
         request = Mock(spec=["args", "getCookie", "cookies"])
 
@@ -590,7 +596,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
     @override_config(
         {"oidc_config": {**DEFAULT_CONFIG, "client_auth_method": "client_secret_post"}}
     )
-    def test_exchange_code(self):
+    def test_exchange_code(self) -> None:
         """Code exchange behaves correctly and handles various error scenarios."""
         token = {"type": "bearer"}
         token_json = json.dumps(token).encode("utf-8")
@@ -686,7 +692,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
             }
         }
     )
-    def test_exchange_code_jwt_key(self):
+    def test_exchange_code_jwt_key(self) -> None:
         """Test that code exchange works with a JWK client secret."""
         from authlib.jose import jwt
 
@@ -741,7 +747,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
             }
         }
     )
-    def test_exchange_code_no_auth(self):
+    def test_exchange_code_no_auth(self) -> None:
         """Test that code exchange works with no client secret."""
         token = {"type": "bearer"}
         self.http_client.request = simple_async_mock(
@@ -776,7 +782,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
             }
         }
     )
-    def test_extra_attributes(self):
+    def test_extra_attributes(self) -> None:
         """
         Login while using a mapping provider that implements get_extra_attributes.
         """
@@ -790,8 +796,8 @@ class OidcHandlerTestCase(HomeserverTestCase):
             "username": "foo",
             "phone": "1234567",
         }
-        self.provider._exchange_code = simple_async_mock(return_value=token)
-        self.provider._parse_id_token = simple_async_mock(return_value=userinfo)
+        self.provider._exchange_code = simple_async_mock(return_value=token)  # type: ignore[assignment]
+        self.provider._parse_id_token = simple_async_mock(return_value=userinfo)  # type: ignore[assignment]
         auth_handler = self.hs.get_auth_handler()
         auth_handler.complete_sso_login = simple_async_mock()
 
@@ -817,12 +823,12 @@ class OidcHandlerTestCase(HomeserverTestCase):
         )
 
     @override_config({"oidc_config": DEFAULT_CONFIG})
-    def test_map_userinfo_to_user(self):
+    def test_map_userinfo_to_user(self) -> None:
         """Ensure that mapping the userinfo returned from a provider to an MXID works properly."""
         auth_handler = self.hs.get_auth_handler()
         auth_handler.complete_sso_login = simple_async_mock()
 
-        userinfo = {
+        userinfo: dict = {
             "sub": "test_user",
             "username": "test_user",
         }
@@ -870,7 +876,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
         )
 
     @override_config({"oidc_config": {**DEFAULT_CONFIG, "allow_existing_users": True}})
-    def test_map_userinfo_to_existing_user(self):
+    def test_map_userinfo_to_existing_user(self) -> None:
         """Existing users can log in with OpenID Connect when allow_existing_users is True."""
         store = self.hs.get_datastores().main
         user = UserID.from_string("@test_user:test")
@@ -974,7 +980,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
         )
 
     @override_config({"oidc_config": DEFAULT_CONFIG})
-    def test_map_userinfo_to_invalid_localpart(self):
+    def test_map_userinfo_to_invalid_localpart(self) -> None:
         """If the mapping provider generates an invalid localpart it should be rejected."""
         self.get_success(
             _make_callback_with_userinfo(self.hs, {"sub": "test2", "username": "föö"})
@@ -991,7 +997,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
             }
         }
     )
-    def test_map_userinfo_to_user_retries(self):
+    def test_map_userinfo_to_user_retries(self) -> None:
         """The mapping provider can retry generating an MXID if the MXID is already in use."""
         auth_handler = self.hs.get_auth_handler()
         auth_handler.complete_sso_login = simple_async_mock()
@@ -1039,7 +1045,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
         )
 
     @override_config({"oidc_config": DEFAULT_CONFIG})
-    def test_empty_localpart(self):
+    def test_empty_localpart(self) -> None:
         """Attempts to map onto an empty localpart should be rejected."""
         userinfo = {
             "sub": "tester",
@@ -1058,7 +1064,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
             }
         }
     )
-    def test_null_localpart(self):
+    def test_null_localpart(self) -> None:
         """Mapping onto a null localpart via an empty OIDC attribute should be rejected"""
         userinfo = {
             "sub": "tester",
@@ -1075,7 +1081,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
             }
         }
     )
-    def test_attribute_requirements(self):
+    def test_attribute_requirements(self) -> None:
         """The required attributes must be met from the OIDC userinfo response."""
         auth_handler = self.hs.get_auth_handler()
         auth_handler.complete_sso_login = simple_async_mock()
@@ -1115,7 +1121,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
             }
         }
     )
-    def test_attribute_requirements_contains(self):
+    def test_attribute_requirements_contains(self) -> None:
         """Test that auth succeeds if userinfo attribute CONTAINS required value"""
         auth_handler = self.hs.get_auth_handler()
         auth_handler.complete_sso_login = simple_async_mock()
@@ -1146,7 +1152,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
             }
         }
     )
-    def test_attribute_requirements_mismatch(self):
+    def test_attribute_requirements_mismatch(self) -> None:
         """
         Test that auth fails if attributes exist but don't match,
         or are non-string values.
@@ -1154,7 +1160,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
         auth_handler = self.hs.get_auth_handler()
         auth_handler.complete_sso_login = simple_async_mock()
         # userinfo with "test": "not_foobar" attribute should fail
-        userinfo = {
+        userinfo: dict = {
             "sub": "tester",
             "username": "tester",
             "test": "not_foobar",
@@ -1248,9 +1254,9 @@ async def _make_callback_with_userinfo(
 
     handler = hs.get_oidc_handler()
     provider = handler._providers["oidc"]
-    provider._exchange_code = simple_async_mock(return_value={"id_token": ""})
-    provider._parse_id_token = simple_async_mock(return_value=userinfo)
-    provider._fetch_userinfo = simple_async_mock(return_value=userinfo)
+    provider._exchange_code = simple_async_mock(return_value={"id_token": ""})  # type: ignore[assignment]
+    provider._parse_id_token = simple_async_mock(return_value=userinfo)  # type: ignore[assignment]
+    provider._fetch_userinfo = simple_async_mock(return_value=userinfo)  # type: ignore[assignment]
 
     state = "state"
     session = handler._token_generator.generate_oidc_session_token(
diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py
index 49d832de81..d401fda938 100644
--- a/tests/handlers/test_password_providers.py
+++ b/tests/handlers/test_password_providers.py
@@ -124,7 +124,6 @@ class PasswordCustomAuthProvider:
                 ("m.login.password", ("password",)): self.check_auth,
             }
         )
-        pass
 
     def check_auth(self, *args):
         return mock_password_provider.check_auth(*args)
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index 6ddec9ecf1..b2ed9cbe37 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -331,11 +331,11 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase):
 
         # Extract presence update user ID and state information into lists of tuples
         db_presence_states = [(ps[0], ps[1]) for _, ps in db_presence_states[0]]
-        presence_states = [(ps.user_id, ps.state) for ps in presence_states]
+        presence_states_compare = [(ps.user_id, ps.state) for ps in presence_states]
 
         # Compare what we put into the storage with what we got out.
         # They should be identical.
-        self.assertEqual(presence_states, db_presence_states)
+        self.assertEqual(presence_states_compare, db_presence_states)
 
 
 class PresenceTimeoutTestCase(unittest.TestCase):
@@ -357,6 +357,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
 
         self.assertIsNotNone(new_state)
+        assert new_state is not None
         self.assertEqual(new_state.state, PresenceState.UNAVAILABLE)
         self.assertEqual(new_state.status_msg, status_msg)
 
@@ -380,6 +381,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
 
         self.assertIsNotNone(new_state)
+        assert new_state is not None
         self.assertEqual(new_state.state, PresenceState.BUSY)
         self.assertEqual(new_state.status_msg, status_msg)
 
@@ -399,6 +401,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
 
         self.assertIsNotNone(new_state)
+        assert new_state is not None
         self.assertEqual(new_state.state, PresenceState.OFFLINE)
         self.assertEqual(new_state.status_msg, status_msg)
 
@@ -420,6 +423,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         )
 
         self.assertIsNotNone(new_state)
+        assert new_state is not None
         self.assertEqual(new_state.state, PresenceState.ONLINE)
         self.assertEqual(new_state.status_msg, status_msg)
 
@@ -477,6 +481,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
         )
 
         self.assertIsNotNone(new_state)
+        assert new_state is not None
         self.assertEqual(new_state.state, PresenceState.OFFLINE)
         self.assertEqual(new_state.status_msg, status_msg)
 
@@ -653,13 +658,13 @@ class PresenceHandlerTestCase(unittest.HomeserverTestCase):
         self._set_presencestate_with_status_msg(user_id, PresenceState.ONLINE, None)
 
     def _set_presencestate_with_status_msg(
-        self, user_id: str, state: PresenceState, status_msg: Optional[str]
+        self, user_id: str, state: str, status_msg: Optional[str]
     ):
         """Set a PresenceState and status_msg and check the result.
 
         Args:
             user_id: User for that the status is to be set.
-            PresenceState: The new PresenceState.
+            state: The new PresenceState.
             status_msg: Status message that is to be set.
         """
         self.get_success(
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index 972cbac6e4..1ec105c373 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -11,14 +11,17 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from typing import Any, Dict
+from typing import Any, Awaitable, Callable, Dict
 from unittest.mock import Mock
 
+from twisted.test.proto_helpers import MemoryReactor
+
 import synapse.types
 from synapse.api.errors import AuthError, SynapseError
 from synapse.rest import admin
 from synapse.server import HomeServer
-from synapse.types import UserID
+from synapse.types import JsonDict, UserID
+from synapse.util import Clock
 
 from tests import unittest
 from tests.test_utils import make_awaitable
@@ -29,13 +32,15 @@ class ProfileTestCase(unittest.HomeserverTestCase):
 
     servlets = [admin.register_servlets]
 
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         self.mock_federation = Mock()
         self.mock_registry = Mock()
 
-        self.query_handlers = {}
+        self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {}
 
-        def register_query_handler(query_type, handler):
+        def register_query_handler(
+            query_type: str, handler: Callable[[dict], Awaitable[JsonDict]]
+        ) -> None:
             self.query_handlers[query_type] = handler
 
         self.mock_registry.register_query_handler = register_query_handler
@@ -47,7 +52,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
         )
         return hs
 
-    def prepare(self, reactor, clock, hs: HomeServer):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastores().main
 
         self.frank = UserID.from_string("@1234abcd:test")
@@ -58,7 +63,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
 
         self.handler = hs.get_profile_handler()
 
-    def test_get_my_name(self):
+    def test_get_my_name(self) -> None:
         self.get_success(
             self.store.set_profile_displayname(self.frank.localpart, "Frank")
         )
@@ -67,7 +72,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual("Frank", displayname)
 
-    def test_set_my_name(self):
+    def test_set_my_name(self) -> None:
         self.get_success(
             self.handler.set_displayname(
                 self.frank, synapse.types.create_requester(self.frank), "Frank Jr."
@@ -110,7 +115,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
             self.get_success(self.store.get_profile_displayname(self.frank.localpart))
         )
 
-    def test_set_my_name_if_disabled(self):
+    def test_set_my_name_if_disabled(self) -> None:
         self.hs.config.registration.enable_set_displayname = False
 
         # Setting displayname for the first time is allowed
@@ -135,7 +140,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
             SynapseError,
         )
 
-    def test_set_my_name_noauth(self):
+    def test_set_my_name_noauth(self) -> None:
         self.get_failure(
             self.handler.set_displayname(
                 self.frank, synapse.types.create_requester(self.bob), "Frank Jr."
@@ -143,7 +148,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
             AuthError,
         )
 
-    def test_get_other_name(self):
+    def test_get_other_name(self) -> None:
         self.mock_federation.make_query.return_value = make_awaitable(
             {"displayname": "Alice"}
         )
@@ -158,7 +163,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
             ignore_backoff=True,
         )
 
-    def test_incoming_fed_query(self):
+    def test_incoming_fed_query(self) -> None:
         self.get_success(self.store.create_profile("caroline"))
         self.get_success(self.store.set_profile_displayname("caroline", "Caroline"))
 
@@ -174,7 +179,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual({"displayname": "Caroline"}, response)
 
-    def test_get_my_avatar(self):
+    def test_get_my_avatar(self) -> None:
         self.get_success(
             self.store.set_profile_avatar_url(
                 self.frank.localpart, "http://my.server/me.png"
@@ -184,7 +189,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
 
         self.assertEqual("http://my.server/me.png", avatar_url)
 
-    def test_set_my_avatar(self):
+    def test_set_my_avatar(self) -> None:
         self.get_success(
             self.handler.set_avatar_url(
                 self.frank,
@@ -225,7 +230,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
             (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))),
         )
 
-    def test_set_my_avatar_if_disabled(self):
+    def test_set_my_avatar_if_disabled(self) -> None:
         self.hs.config.registration.enable_set_avatar_url = False
 
         # Setting displayname for the first time is allowed
@@ -250,7 +255,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
             SynapseError,
         )
 
-    def test_avatar_constraints_no_config(self):
+    def test_avatar_constraints_no_config(self) -> None:
         """Tests that the method to check an avatar against configured constraints skips
         all of its check if no constraint is configured.
         """
@@ -263,7 +268,13 @@ class ProfileTestCase(unittest.HomeserverTestCase):
         self.assertTrue(res)
 
     @unittest.override_config({"max_avatar_size": 50})
-    def test_avatar_constraints_missing(self):
+    def test_avatar_constraints_allow_empty_avatar_url(self) -> None:
+        """An empty avatar is always permitted."""
+        res = self.get_success(self.handler.check_avatar_size_and_mime_type(""))
+        self.assertTrue(res)
+
+    @unittest.override_config({"max_avatar_size": 50})
+    def test_avatar_constraints_missing(self) -> None:
         """Tests that an avatar isn't allowed if the file at the given MXC URI couldn't
         be found.
         """
@@ -273,7 +284,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
         self.assertFalse(res)
 
     @unittest.override_config({"max_avatar_size": 50})
-    def test_avatar_constraints_file_size(self):
+    def test_avatar_constraints_file_size(self) -> None:
         """Tests that a file that's above the allowed file size is forbidden but one
         that's below it is allowed.
         """
@@ -295,7 +306,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
         self.assertFalse(res)
 
     @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]})
-    def test_avatar_constraint_mime_type(self):
+    def test_avatar_constraint_mime_type(self) -> None:
         """Tests that a file with an unauthorised MIME type is forbidden but one with
         an authorised content type is allowed.
         """
diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py
index cff07a8973..d37292ce13 100644
--- a/tests/handlers/test_room_summary.py
+++ b/tests/handlers/test_room_summary.py
@@ -172,6 +172,9 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
         result_room_ids = []
         result_children_ids = []
         for result_room in result["rooms"]:
+            # Ensure federation results are not leaking over the client-server API.
+            self.assertNotIn("allowed_room_ids", result_room)
+
             result_room_ids.append(result_room["room_id"])
             result_children_ids.append(
                 [
diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py
index 23941abed8..8d4404eda1 100644
--- a/tests/handlers/test_saml.py
+++ b/tests/handlers/test_saml.py
@@ -12,12 +12,16 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 
-from typing import Optional
+from typing import Any, Dict, Optional
 from unittest.mock import Mock
 
 import attr
 
+from twisted.test.proto_helpers import MemoryReactor
+
 from synapse.api.errors import RedirectException
+from synapse.server import HomeServer
+from synapse.util import Clock
 
 from tests.test_utils import simple_async_mock
 from tests.unittest import HomeserverTestCase, override_config
@@ -81,10 +85,10 @@ class TestRedirectMappingProvider(TestMappingProvider):
 
 
 class SamlHandlerTestCase(HomeserverTestCase):
-    def default_config(self):
+    def default_config(self) -> Dict[str, Any]:
         config = super().default_config()
         config["public_baseurl"] = BASE_URL
-        saml_config = {
+        saml_config: Dict[str, Any] = {
             "sp_config": {"metadata": {}},
             # Disable grandfathering.
             "grandfathered_mxid_source_attribute": None,
@@ -98,7 +102,7 @@ class SamlHandlerTestCase(HomeserverTestCase):
 
         return config
 
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         hs = self.setup_test_homeserver()
 
         self.handler = hs.get_saml_handler()
@@ -114,7 +118,7 @@ class SamlHandlerTestCase(HomeserverTestCase):
     elif not has_xmlsec1:
         skip = "Requires xmlsec1"
 
-    def test_map_saml_response_to_user(self):
+    def test_map_saml_response_to_user(self) -> None:
         """Ensure that mapping the SAML response returned from a provider to an MXID works properly."""
 
         # stub out the auth handler
@@ -140,7 +144,7 @@ class SamlHandlerTestCase(HomeserverTestCase):
         )
 
     @override_config({"saml2_config": {"grandfathered_mxid_source_attribute": "mxid"}})
-    def test_map_saml_response_to_existing_user(self):
+    def test_map_saml_response_to_existing_user(self) -> None:
         """Existing users can log in with SAML account."""
         store = self.hs.get_datastores().main
         self.get_success(
@@ -186,7 +190,7 @@ class SamlHandlerTestCase(HomeserverTestCase):
             auth_provider_session_id=None,
         )
 
-    def test_map_saml_response_to_invalid_localpart(self):
+    def test_map_saml_response_to_invalid_localpart(self) -> None:
         """If the mapping provider generates an invalid localpart it should be rejected."""
 
         # stub out the auth handler
@@ -207,7 +211,7 @@ class SamlHandlerTestCase(HomeserverTestCase):
         )
         auth_handler.complete_sso_login.assert_not_called()
 
-    def test_map_saml_response_to_user_retries(self):
+    def test_map_saml_response_to_user_retries(self) -> None:
         """The mapping provider can retry generating an MXID if the MXID is already in use."""
 
         # stub out the auth handler and error renderer
@@ -271,7 +275,7 @@ class SamlHandlerTestCase(HomeserverTestCase):
             }
         }
     )
-    def test_map_saml_response_redirect(self):
+    def test_map_saml_response_redirect(self) -> None:
         """Test a mapping provider that raises a RedirectException"""
 
         saml_response = FakeAuthnResponse({"uid": "test", "username": "test_user"})
@@ -292,7 +296,7 @@ class SamlHandlerTestCase(HomeserverTestCase):
             },
         }
     )
-    def test_attribute_requirements(self):
+    def test_attribute_requirements(self) -> None:
         """The required attributes must be met from the SAML response."""
 
         # stub out the auth handler
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index f91a80b9fa..ffd5c4cb93 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -18,11 +18,14 @@ from typing import Dict
 from unittest.mock import ANY, Mock, call
 
 from twisted.internet import defer
+from twisted.test.proto_helpers import MemoryReactor
 from twisted.web.resource import Resource
 
 from synapse.api.errors import AuthError
 from synapse.federation.transport.server import TransportLayerServer
-from synapse.types import UserID, create_requester
+from synapse.server import HomeServer
+from synapse.types import JsonDict, UserID, create_requester
+from synapse.util import Clock
 
 from tests import unittest
 from tests.test_utils import make_awaitable
@@ -42,7 +45,9 @@ ROOM_ID = "a-room"
 OTHER_ROOM_ID = "another-room"
 
 
-def _expect_edu_transaction(edu_type, content, origin="test"):
+def _expect_edu_transaction(
+    edu_type: str, content: JsonDict, origin: str = "test"
+) -> JsonDict:
     return {
         "origin": origin,
         "origin_server_ts": 1000000,
@@ -51,12 +56,12 @@ def _expect_edu_transaction(edu_type, content, origin="test"):
     }
 
 
-def _make_edu_transaction_json(edu_type, content):
+def _make_edu_transaction_json(edu_type: str, content: JsonDict) -> bytes:
     return json.dumps(_expect_edu_transaction(edu_type, content)).encode("utf8")
 
 
 class TypingNotificationsTestCase(unittest.HomeserverTestCase):
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         # we mock out the keyring so as to skip the authentication check on the
         # federation API call.
         mock_keyring = Mock(spec=["verify_json_for_server"])
@@ -83,7 +88,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
         d["/_matrix/federation"] = TransportLayerServer(self.hs)
         return d
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         mock_notifier = hs.get_notifier()
         self.on_new_event = mock_notifier.on_new_event
 
@@ -111,24 +116,24 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
 
         self.room_members = []
 
-        async def check_user_in_room(room_id, user_id):
+        async def check_user_in_room(room_id: str, user_id: str) -> None:
             if user_id not in [u.to_string() for u in self.room_members]:
                 raise AuthError(401, "User is not in the room")
             return None
 
         hs.get_auth().check_user_in_room = check_user_in_room
 
-        async def check_host_in_room(room_id, server_name):
+        async def check_host_in_room(room_id: str, server_name: str) -> bool:
             return room_id == ROOM_ID
 
         hs.get_event_auth_handler().check_host_in_room = check_host_in_room
 
-        def get_joined_hosts_for_room(room_id):
+        def get_joined_hosts_for_room(room_id: str):
             return {member.domain for member in self.room_members}
 
         self.datastore.get_joined_hosts_for_room = get_joined_hosts_for_room
 
-        async def get_users_in_room(room_id):
+        async def get_users_in_room(room_id: str):
             return {str(u) for u in self.room_members}
 
         self.datastore.get_users_in_room = get_users_in_room
@@ -153,7 +158,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
             lambda *args, **kwargs: make_awaitable(None)
         )
 
-    def test_started_typing_local(self):
+    def test_started_typing_local(self) -> None:
         self.room_members = [U_APPLE, U_BANANA]
 
         self.assertEqual(self.event_source.get_current_key(), 0)
@@ -187,7 +192,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
         )
 
     @override_config({"send_federation": True})
-    def test_started_typing_remote_send(self):
+    def test_started_typing_remote_send(self) -> None:
         self.room_members = [U_APPLE, U_ONION]
 
         self.get_success(
@@ -217,7 +222,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
             try_trailing_slash_on_400=True,
         )
 
-    def test_started_typing_remote_recv(self):
+    def test_started_typing_remote_recv(self) -> None:
         self.room_members = [U_APPLE, U_ONION]
 
         self.assertEqual(self.event_source.get_current_key(), 0)
@@ -256,7 +261,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
             ],
         )
 
-    def test_started_typing_remote_recv_not_in_room(self):
+    def test_started_typing_remote_recv_not_in_room(self) -> None:
         self.room_members = [U_APPLE, U_ONION]
 
         self.assertEqual(self.event_source.get_current_key(), 0)
@@ -292,7 +297,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
         self.assertEqual(events[1], 0)
 
     @override_config({"send_federation": True})
-    def test_stopped_typing(self):
+    def test_stopped_typing(self) -> None:
         self.room_members = [U_APPLE, U_BANANA, U_ONION]
 
         # Gut-wrenching
@@ -343,7 +348,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
             [{"type": "m.typing", "room_id": ROOM_ID, "content": {"user_ids": []}}],
         )
 
-    def test_typing_timeout(self):
+    def test_typing_timeout(self) -> None:
         self.room_members = [U_APPLE, U_BANANA]
 
         self.assertEqual(self.event_source.get_current_key(), 0)
diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index c3f20f9692..10dd94b549 100644
--- a/tests/module_api/test_api.py
+++ b/tests/module_api/test_api.py
@@ -86,6 +86,16 @@ class ModuleApiTestCase(HomeserverTestCase):
         displayname = self.get_success(self.store.get_profile_displayname("bob"))
         self.assertEqual(displayname, "Bobberino")
 
+    def test_can_register_admin_user(self):
+        user_id = self.get_success(
+            self.register_user(
+                "bob_module_admin", "1234", displayname="Bobberino Admin", admin=True
+            )
+        )
+        found_user = self.get_success(self.module_api.get_userinfo_by_id(user_id))
+        self.assertEqual(found_user.user_id.to_string(), user_id)
+        self.assertIdentical(found_user.is_admin, True)
+
     def test_get_userinfo_by_id(self):
         user_id = self.register_user("alice", "1234")
         found_user = self.get_success(self.module_api.get_userinfo_by_id(user_id))
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index c284beb37c..ba158f5d93 100644
--- a/tests/push/test_http.py
+++ b/tests/push/test_http.py
@@ -11,14 +11,19 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from typing import Any, Dict, List, Optional, Tuple
 from unittest.mock import Mock
 
 from twisted.internet.defer import Deferred
+from twisted.test.proto_helpers import MemoryReactor
 
 import synapse.rest.admin
 from synapse.logging.context import make_deferred_yieldable
 from synapse.push import PusherConfigException
-from synapse.rest.client import login, receipts, room
+from synapse.rest.client import login, push_rule, receipts, room
+from synapse.server import HomeServer
+from synapse.types import JsonDict
+from synapse.util import Clock
 
 from tests.unittest import HomeserverTestCase, override_config
 
@@ -29,22 +34,23 @@ class HTTPPusherTests(HomeserverTestCase):
         room.register_servlets,
         login.register_servlets,
         receipts.register_servlets,
+        push_rule.register_servlets,
     ]
     user_id = True
     hijack_auth = False
 
-    def default_config(self):
+    def default_config(self) -> Dict[str, Any]:
         config = super().default_config()
         config["start_pushers"] = True
         return config
 
-    def make_homeserver(self, reactor, clock):
-        self.push_attempts = []
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+        self.push_attempts: List[Tuple[Deferred, str, dict]] = []
 
         m = Mock()
 
         def post_json_get_json(url, body):
-            d = Deferred()
+            d: Deferred = Deferred()
             self.push_attempts.append((d, url, body))
             return make_deferred_yieldable(d)
 
@@ -54,7 +60,7 @@ class HTTPPusherTests(HomeserverTestCase):
 
         return hs
 
-    def test_invalid_configuration(self):
+    def test_invalid_configuration(self) -> None:
         """Invalid push configurations should be rejected."""
         # Register the user who gets notified
         user_id = self.register_user("user", "pass")
@@ -66,7 +72,7 @@ class HTTPPusherTests(HomeserverTestCase):
         )
         token_id = user_tuple.token_id
 
-        def test_data(data):
+        def test_data(data: Optional[JsonDict]) -> None:
             self.get_failure(
                 self.hs.get_pusherpool().add_pusher(
                     user_id=user_id,
@@ -93,7 +99,7 @@ class HTTPPusherTests(HomeserverTestCase):
         # A url with an incorrect path isn't accepted.
         test_data({"url": "http://example.com/foo"})
 
-    def test_sends_http(self):
+    def test_sends_http(self) -> None:
         """
         The HTTP pusher will send pushes for each message to a HTTP endpoint
         when configured to do so.
@@ -198,7 +204,7 @@ class HTTPPusherTests(HomeserverTestCase):
         self.assertEqual(len(pushers), 1)
         self.assertTrue(pushers[0].last_stream_ordering > last_stream_ordering)
 
-    def test_sends_high_priority_for_encrypted(self):
+    def test_sends_high_priority_for_encrypted(self) -> None:
         """
         The HTTP pusher will send pushes at high priority if they correspond
         to an encrypted message.
@@ -319,7 +325,7 @@ class HTTPPusherTests(HomeserverTestCase):
         )
         self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "high")
 
-    def test_sends_high_priority_for_one_to_one_only(self):
+    def test_sends_high_priority_for_one_to_one_only(self) -> None:
         """
         The HTTP pusher will send pushes at high priority if they correspond
         to a message in a one-to-one room.
@@ -402,7 +408,7 @@ class HTTPPusherTests(HomeserverTestCase):
         # check that this is low-priority
         self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low")
 
-    def test_sends_high_priority_for_mention(self):
+    def test_sends_high_priority_for_mention(self) -> None:
         """
         The HTTP pusher will send pushes at high priority if they correspond
         to a message containing the user's display name.
@@ -478,7 +484,7 @@ class HTTPPusherTests(HomeserverTestCase):
         # check that this is low-priority
         self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low")
 
-    def test_sends_high_priority_for_atroom(self):
+    def test_sends_high_priority_for_atroom(self) -> None:
         """
         The HTTP pusher will send pushes at high priority if they correspond
         to a message that contains @room.
@@ -561,7 +567,7 @@ class HTTPPusherTests(HomeserverTestCase):
         # check that this is low-priority
         self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low")
 
-    def test_push_unread_count_group_by_room(self):
+    def test_push_unread_count_group_by_room(self) -> None:
         """
         The HTTP pusher will group unread count by number of unread rooms.
         """
@@ -574,7 +580,7 @@ class HTTPPusherTests(HomeserverTestCase):
         self._check_push_attempt(6, 1)
 
     @override_config({"push": {"group_unread_count_by_room": False}})
-    def test_push_unread_count_message_count(self):
+    def test_push_unread_count_message_count(self) -> None:
         """
         The HTTP pusher will send the total unread message count.
         """
@@ -587,7 +593,7 @@ class HTTPPusherTests(HomeserverTestCase):
         # last read receipt
         self._check_push_attempt(6, 3)
 
-    def _test_push_unread_count(self):
+    def _test_push_unread_count(self) -> None:
         """
         Tests that the correct unread count appears in sent push notifications
 
@@ -679,7 +685,7 @@ class HTTPPusherTests(HomeserverTestCase):
 
         self.helper.send(room_id, body="HELLO???", tok=other_access_token)
 
-    def _advance_time_and_make_push_succeed(self, expected_push_attempts):
+    def _advance_time_and_make_push_succeed(self, expected_push_attempts: int) -> None:
         self.pump()
         self.push_attempts[expected_push_attempts - 1][0].callback({})
 
@@ -706,7 +712,9 @@ class HTTPPusherTests(HomeserverTestCase):
             expected_unread_count_last_push,
         )
 
-    def _send_read_request(self, access_token, message_event_id, room_id):
+    def _send_read_request(
+        self, access_token: str, message_event_id: str, room_id: str
+    ) -> None:
         # Now set the user's read receipt position to the first event
         #
         # This will actually trigger a new notification to be sent out so that
@@ -719,3 +727,67 @@ class HTTPPusherTests(HomeserverTestCase):
             access_token=access_token,
         )
         self.assertEqual(channel.code, 200, channel.json_body)
+
+    def _make_user_with_pusher(self, username: str) -> Tuple[str, str]:
+        user_id = self.register_user(username, "pass")
+        access_token = self.login(username, "pass")
+
+        # Register the pusher
+        user_tuple = self.get_success(
+            self.hs.get_datastores().main.get_user_by_access_token(access_token)
+        )
+        token_id = user_tuple.token_id
+
+        self.get_success(
+            self.hs.get_pusherpool().add_pusher(
+                user_id=user_id,
+                access_token=token_id,
+                kind="http",
+                app_id="m.http",
+                app_display_name="HTTP Push Notifications",
+                device_display_name="pushy push",
+                pushkey="a@example.com",
+                lang=None,
+                data={"url": "http://example.com/_matrix/push/v1/notify"},
+            )
+        )
+
+        return user_id, access_token
+
+    def test_dont_notify_rule_overrides_message(self) -> None:
+        """
+        The override push rule will suppress notification
+        """
+
+        user_id, access_token = self._make_user_with_pusher("user")
+        other_user_id, other_access_token = self._make_user_with_pusher("otheruser")
+
+        # Create a room
+        room = self.helper.create_room_as(user_id, tok=access_token)
+
+        # Disable user notifications for this room -> user
+        body = {
+            "conditions": [{"kind": "event_match", "key": "room_id", "pattern": room}],
+            "actions": ["dont_notify"],
+        }
+        channel = self.make_request(
+            "PUT",
+            "/pushrules/global/override/best.friend",
+            body,
+            access_token=access_token,
+        )
+        self.assertEqual(channel.code, 200)
+
+        # Check we start with no pushes
+        self.assertEqual(len(self.push_attempts), 0)
+
+        # The other user joins
+        self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+        # The other user sends a message (ignored by dont_notify push rule set above)
+        self.helper.send(room, body="Hi!", tok=other_access_token)
+        self.assertEqual(len(self.push_attempts), 0)
+
+        # The user sends a message back (sends a notification)
+        self.helper.send(room, body="Hello", tok=access_token)
+        self.assertEqual(len(self.push_attempts), 1)
diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py
index 3849beb9d6..5dba187076 100644
--- a/tests/push/test_push_rule_evaluator.py
+++ b/tests/push/test_push_rule_evaluator.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Any, Dict
+from typing import Dict, Optional, Union
 
 import frozendict
 
@@ -20,12 +20,13 @@ from synapse.api.room_versions import RoomVersions
 from synapse.events import FrozenEvent
 from synapse.push import push_rule_evaluator
 from synapse.push.push_rule_evaluator import PushRuleEvaluatorForEvent
+from synapse.types import JsonDict
 
 from tests import unittest
 
 
 class PushRuleEvaluatorTestCase(unittest.TestCase):
-    def _get_evaluator(self, content):
+    def _get_evaluator(self, content: JsonDict) -> PushRuleEvaluatorForEvent:
         event = FrozenEvent(
             {
                 "event_id": "$event_id",
@@ -39,12 +40,12 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
         )
         room_member_count = 0
         sender_power_level = 0
-        power_levels = {}
+        power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
         return PushRuleEvaluatorForEvent(
             event, room_member_count, sender_power_level, power_levels
         )
 
-    def test_display_name(self):
+    def test_display_name(self) -> None:
         """Check for a matching display name in the body of the event."""
         evaluator = self._get_evaluator({"body": "foo bar baz"})
 
@@ -71,20 +72,20 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
         self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar"))
 
     def _assert_matches(
-        self, condition: Dict[str, Any], content: Dict[str, Any], msg=None
+        self, condition: JsonDict, content: JsonDict, msg: Optional[str] = None
     ) -> None:
         evaluator = self._get_evaluator(content)
         self.assertTrue(evaluator.matches(condition, "@user:test", "display_name"), msg)
 
     def _assert_not_matches(
-        self, condition: Dict[str, Any], content: Dict[str, Any], msg=None
+        self, condition: JsonDict, content: JsonDict, msg: Optional[str] = None
     ) -> None:
         evaluator = self._get_evaluator(content)
         self.assertFalse(
             evaluator.matches(condition, "@user:test", "display_name"), msg
         )
 
-    def test_event_match_body(self):
+    def test_event_match_body(self) -> None:
         """Check that event_match conditions on content.body work as expected"""
 
         # if the key is `content.body`, the pattern matches substrings.
@@ -165,7 +166,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
             r"? after \ should match any character",
         )
 
-    def test_event_match_non_body(self):
+    def test_event_match_non_body(self) -> None:
         """Check that event_match conditions on other keys work as expected"""
 
         # if the key is anything other than 'content.body', the pattern must match the
@@ -241,7 +242,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
             "pattern should not match before a newline",
         )
 
-    def test_no_body(self):
+    def test_no_body(self) -> None:
         """Not having a body shouldn't break the evaluator."""
         evaluator = self._get_evaluator({})
 
@@ -250,7 +251,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
         }
         self.assertFalse(evaluator.matches(condition, "@user:test", "foo"))
 
-    def test_invalid_body(self):
+    def test_invalid_body(self) -> None:
         """A non-string body should not break the evaluator."""
         condition = {
             "kind": "contains_display_name",
@@ -260,7 +261,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
             evaluator = self._get_evaluator({"body": body})
             self.assertFalse(evaluator.matches(condition, "@user:test", "foo"))
 
-    def test_tweaks_for_actions(self):
+    def test_tweaks_for_actions(self) -> None:
         """
         This tests the behaviour of tweaks_for_actions.
         """
diff --git a/tests/replication/_base.py b/tests/replication/_base.py
index a7a05a564f..9c5df266bd 100644
--- a/tests/replication/_base.py
+++ b/tests/replication/_base.py
@@ -251,7 +251,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
                 self.connect_any_redis_attempts,
             )
 
-            self.hs.get_tcp_replication().start_replication(self.hs)
+            self.hs.get_replication_command_handler().start_replication(self.hs)
 
         # When we see a connection attempt to the master replication listener we
         # automatically set up the connection. This is so that tests don't
@@ -375,7 +375,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
         )
 
         if worker_hs.config.redis.redis_enabled:
-            worker_hs.get_tcp_replication().start_replication(worker_hs)
+            worker_hs.get_replication_command_handler().start_replication(worker_hs)
 
         return worker_hs
 
diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py
index f9d5da723c..641a94133b 100644
--- a/tests/replication/tcp/streams/test_events.py
+++ b/tests/replication/tcp/streams/test_events.py
@@ -420,7 +420,7 @@ class EventsStreamTestCase(BaseStreamTestCase):
 
         # Manually send an old RDATA command, which should get dropped. This
         # re-uses the row from above, but with an earlier stream token.
-        self.hs.get_tcp_replication().send_command(
+        self.hs.get_replication_command_handler().send_command(
             RdataCommand("events", "master", 1, row)
         )
 
diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py
index 3ff5afc6e5..9a229dd23f 100644
--- a/tests/replication/tcp/streams/test_typing.py
+++ b/tests/replication/tcp/streams/test_typing.py
@@ -118,7 +118,7 @@ class TypingStreamTestCase(BaseStreamTestCase):
 
         # Reset the typing handler
         self.hs.get_replication_streams()["typing"].last_token = 0
-        self.hs.get_tcp_replication()._streams["typing"].last_token = 0
+        self.hs.get_replication_command_handler()._streams["typing"].last_token = 0
         typing._latest_room_serial = 0
         typing._typing_stream_change_cache = StreamChangeCache(
             "TypingStreamChangeCache", typing._latest_room_serial
diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py
index 1b6a4bf4b0..26b8bd512a 100644
--- a/tests/replication/test_federation_ack.py
+++ b/tests/replication/test_federation_ack.py
@@ -48,7 +48,7 @@ class FederationAckTestCase(HomeserverTestCase):
         transport, rather than assuming that the implementation has a
         ReplicationCommandHandler.
         """
-        rch = self.hs.get_tcp_replication()
+        rch = self.hs.get_replication_command_handler()
 
         # wire up the ReplicationCommandHandler to a mock connection, which needs
         # to implement IReplicationConnection. (Note that Mock doesn't understand
diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py
index fb36aa9940..6cf56b1e35 100644
--- a/tests/rest/admin/test_background_updates.py
+++ b/tests/rest/admin/test_background_updates.py
@@ -39,6 +39,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
         self.store = hs.get_datastores().main
         self.admin_user = self.register_user("admin", "pass", admin=True)
         self.admin_user_tok = self.login("admin", "pass")
+        self.updater = BackgroundUpdater(hs, self.store.db_pool)
 
     @parameterized.expand(
         [
@@ -135,10 +136,10 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
         """Test the status API works with a background update."""
 
         # Create a new background update
-
         self._register_bg_update()
 
         self.store.db_pool.updates.start_doing_background_updates()
+
         self.reactor.pump([1.0, 1.0, 1.0])
 
         channel = self.make_request(
@@ -155,10 +156,10 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
                 "current_updates": {
                     "master": {
                         "name": "test_update",
-                        "average_items_per_ms": 0.001,
+                        "average_items_per_ms": 0.1,
                         "total_duration_ms": 1000.0,
                         "total_item_count": (
-                            BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE
+                            self.updater.default_background_batch_size
                         ),
                     }
                 },
@@ -210,10 +211,10 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
                 "current_updates": {
                     "master": {
                         "name": "test_update",
-                        "average_items_per_ms": 0.001,
+                        "average_items_per_ms": 0.1,
                         "total_duration_ms": 1000.0,
                         "total_item_count": (
-                            BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE
+                            self.updater.default_background_batch_size
                         ),
                     }
                 },
@@ -239,10 +240,10 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
                 "current_updates": {
                     "master": {
                         "name": "test_update",
-                        "average_items_per_ms": 0.001,
+                        "average_items_per_ms": 0.1,
                         "total_duration_ms": 1000.0,
                         "total_item_count": (
-                            BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE
+                            self.updater.default_background_batch_size
                         ),
                     }
                 },
@@ -278,11 +279,9 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
                 "current_updates": {
                     "master": {
                         "name": "test_update",
-                        "average_items_per_ms": 0.001,
+                        "average_items_per_ms": 0.05263157894736842,
                         "total_duration_ms": 2000.0,
-                        "total_item_count": (
-                            2 * BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE
-                        ),
+                        "total_item_count": (110),
                     }
                 },
                 "enabled": True,
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index a60ea0a563..bef911d5df 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -1050,6 +1050,25 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
 
         self._is_erased("@user:test", True)
 
+    @override_config({"max_avatar_size": 1234})
+    def test_deactivate_user_erase_true_avatar_nonnull_but_empty(self) -> None:
+        """Check we can erase a user whose avatar is the empty string.
+
+        Reproduces #12257.
+        """
+        # Patch `self.other_user` to have an empty string as their avatar.
+        self.get_success(self.store.set_profile_avatar_url("user", ""))
+
+        # Check we can still erase them.
+        channel = self.make_request(
+            "POST",
+            self.url,
+            access_token=self.admin_user_tok,
+            content={"erase": True},
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+        self._is_erased("@user:test", True)
+
     def test_deactivate_user_erase_false(self) -> None:
         """
         Test deactivating a user and set `erase` to `false`
diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py
index def836054d..27946febff 100644
--- a/tests/rest/client/test_account.py
+++ b/tests/rest/client/test_account.py
@@ -31,7 +31,7 @@ from synapse.rest import admin
 from synapse.rest.client import account, login, register, room
 from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource
 from synapse.server import HomeServer
-from synapse.types import JsonDict
+from synapse.types import JsonDict, UserID
 from synapse.util import Clock
 
 from tests import unittest
@@ -1222,6 +1222,62 @@ class AccountStatusTestCase(unittest.HomeserverTestCase):
             expected_failures=[users[2]],
         )
 
+    @unittest.override_config(
+        {
+            "use_account_validity_in_account_status": True,
+        }
+    )
+    def test_no_account_validity(self) -> None:
+        """Tests that if we decide to include account validity in the response but no
+        account validity 'is_user_expired' callback is provided, we default to marking all
+        users as not expired.
+        """
+        user = self.register_user("someuser", "password")
+
+        self._test_status(
+            users=[user],
+            expected_statuses={
+                user: {
+                    "exists": True,
+                    "deactivated": False,
+                    "org.matrix.expired": False,
+                },
+            },
+            expected_failures=[],
+        )
+
+    @unittest.override_config(
+        {
+            "use_account_validity_in_account_status": True,
+        }
+    )
+    def test_account_validity_expired(self) -> None:
+        """Test that if we decide to include account validity in the response and the user
+        is expired, we return the correct info.
+        """
+        user = self.register_user("someuser", "password")
+
+        async def is_expired(user_id: str) -> bool:
+            # We can't blindly say everyone is expired, otherwise the request to get the
+            # account status will fail.
+            return UserID.from_string(user_id).localpart == "someuser"
+
+        self.hs.get_account_validity_handler()._is_user_expired_callbacks.append(
+            is_expired
+        )
+
+        self._test_status(
+            users=[user],
+            expected_statuses={
+                user: {
+                    "exists": True,
+                    "deactivated": False,
+                    "org.matrix.expired": True,
+                },
+            },
+            expected_failures=[],
+        )
+
     def _test_status(
         self,
         users: Optional[List[str]],
diff --git a/tests/rest/client/test_shared_rooms.py b/tests/rest/client/test_mutual_rooms.py
index 3818b7b14b..7b7d283bb6 100644
--- a/tests/rest/client/test_shared_rooms.py
+++ b/tests/rest/client/test_mutual_rooms.py
@@ -14,7 +14,7 @@
 from twisted.test.proto_helpers import MemoryReactor
 
 import synapse.rest.admin
-from synapse.rest.client import login, room, shared_rooms
+from synapse.rest.client import login, mutual_rooms, room
 from synapse.server import HomeServer
 from synapse.util import Clock
 
@@ -22,16 +22,16 @@ from tests import unittest
 from tests.server import FakeChannel
 
 
-class UserSharedRoomsTest(unittest.HomeserverTestCase):
+class UserMutualRoomsTest(unittest.HomeserverTestCase):
     """
-    Tests the UserSharedRoomsServlet.
+    Tests the UserMutualRoomsServlet.
     """
 
     servlets = [
         login.register_servlets,
         synapse.rest.admin.register_servlets_for_client_rest_resource,
         room.register_servlets,
-        shared_rooms.register_servlets,
+        mutual_rooms.register_servlets,
     ]
 
     def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
@@ -43,10 +43,10 @@ class UserSharedRoomsTest(unittest.HomeserverTestCase):
         self.store = hs.get_datastores().main
         self.handler = hs.get_user_directory_handler()
 
-    def _get_shared_rooms(self, token: str, other_user: str) -> FakeChannel:
+    def _get_mutual_rooms(self, token: str, other_user: str) -> FakeChannel:
         return self.make_request(
             "GET",
-            "/_matrix/client/unstable/uk.half-shot.msc2666/user/shared_rooms/%s"
+            "/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms/%s"
             % other_user,
             access_token=token,
         )
@@ -56,14 +56,14 @@ class UserSharedRoomsTest(unittest.HomeserverTestCase):
         A room should show up in the shared list of rooms between two users
         if it is public.
         """
-        self._check_shared_rooms_with(room_one_is_public=True, room_two_is_public=True)
+        self._check_mutual_rooms_with(room_one_is_public=True, room_two_is_public=True)
 
     def test_shared_room_list_private(self) -> None:
         """
         A room should show up in the shared list of rooms between two users
         if it is private.
         """
-        self._check_shared_rooms_with(
+        self._check_mutual_rooms_with(
             room_one_is_public=False, room_two_is_public=False
         )
 
@@ -72,9 +72,9 @@ class UserSharedRoomsTest(unittest.HomeserverTestCase):
         The shared room list between two users should contain both public and private
         rooms.
         """
-        self._check_shared_rooms_with(room_one_is_public=True, room_two_is_public=False)
+        self._check_mutual_rooms_with(room_one_is_public=True, room_two_is_public=False)
 
-    def _check_shared_rooms_with(
+    def _check_mutual_rooms_with(
         self, room_one_is_public: bool, room_two_is_public: bool
     ) -> None:
         """Checks that shared public or private rooms between two users appear in
@@ -94,7 +94,7 @@ class UserSharedRoomsTest(unittest.HomeserverTestCase):
 
         # Check shared rooms from user1's perspective.
         # We should see the one room in common
-        channel = self._get_shared_rooms(u1_token, u2)
+        channel = self._get_mutual_rooms(u1_token, u2)
         self.assertEqual(200, channel.code, channel.result)
         self.assertEqual(len(channel.json_body["joined"]), 1)
         self.assertEqual(channel.json_body["joined"][0], room_id_one)
@@ -107,7 +107,7 @@ class UserSharedRoomsTest(unittest.HomeserverTestCase):
         self.helper.join(room_id_two, user=u2, tok=u2_token)
 
         # Check shared rooms again. We should now see both rooms.
-        channel = self._get_shared_rooms(u1_token, u2)
+        channel = self._get_mutual_rooms(u1_token, u2)
         self.assertEqual(200, channel.code, channel.result)
         self.assertEqual(len(channel.json_body["joined"]), 2)
         for room_id_id in channel.json_body["joined"]:
@@ -128,7 +128,7 @@ class UserSharedRoomsTest(unittest.HomeserverTestCase):
         self.helper.join(room, user=u2, tok=u2_token)
 
         # Assert user directory is not empty
-        channel = self._get_shared_rooms(u1_token, u2)
+        channel = self._get_mutual_rooms(u1_token, u2)
         self.assertEqual(200, channel.code, channel.result)
         self.assertEqual(len(channel.json_body["joined"]), 1)
         self.assertEqual(channel.json_body["joined"][0], room)
@@ -136,11 +136,11 @@ class UserSharedRoomsTest(unittest.HomeserverTestCase):
         self.helper.leave(room, user=u1, tok=u1_token)
 
         # Check user1's view of shared rooms with user2
-        channel = self._get_shared_rooms(u1_token, u2)
+        channel = self._get_mutual_rooms(u1_token, u2)
         self.assertEqual(200, channel.code, channel.result)
         self.assertEqual(len(channel.json_body["joined"]), 0)
 
         # Check user2's view of shared rooms with user1
-        channel = self._get_shared_rooms(u2_token, u1)
+        channel = self._get_mutual_rooms(u2_token, u1)
         self.assertEqual(200, channel.code, channel.result)
         self.assertEqual(len(channel.json_body["joined"]), 0)
diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py
index 709f851a38..fe97a0b3dd 100644
--- a/tests/rest/client/test_relations.py
+++ b/tests/rest/client/test_relations.py
@@ -15,17 +15,16 @@
 
 import itertools
 import urllib.parse
-from typing import Any, Dict, List, Optional, Tuple
+from typing import Any, Callable, Dict, List, Optional, Tuple
 from unittest.mock import patch
 
 from twisted.test.proto_helpers import MemoryReactor
 
-from synapse.api.constants import EventTypes, RelationTypes
+from synapse.api.constants import AccountDataTypes, EventTypes, RelationTypes
 from synapse.rest import admin
 from synapse.rest.client import login, register, relations, room, sync
 from synapse.server import HomeServer
-from synapse.storage.relations import RelationPaginationToken
-from synapse.types import JsonDict, StreamToken
+from synapse.types import JsonDict
 from synapse.util import Clock
 
 from tests import unittest
@@ -80,6 +79,7 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase):
         content: Optional[dict] = None,
         access_token: Optional[str] = None,
         parent_id: Optional[str] = None,
+        expected_response_code: int = 200,
     ) -> FakeChannel:
         """Helper function to send a relation pointing at `self.parent_id`
 
@@ -116,16 +116,60 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase):
             content,
             access_token=access_token,
         )
+        self.assertEqual(expected_response_code, channel.code, channel.json_body)
         return channel
 
+    def _get_related_events(self) -> List[str]:
+        """
+        Requests /relations on the parent ID and returns a list of event IDs.
+        """
+        # Request the relations of the event.
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}",
+            access_token=self.user_token,
+        )
+        self.assertEquals(200, channel.code, channel.json_body)
+        return [ev["event_id"] for ev in channel.json_body["chunk"]]
+
+    def _get_bundled_aggregations(self) -> JsonDict:
+        """
+        Requests /event on the parent ID and returns the m.relations field (from unsigned), if it exists.
+        """
+        # Fetch the bundled aggregations of the event.
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}",
+            access_token=self.user_token,
+        )
+        self.assertEquals(200, channel.code, channel.json_body)
+        return channel.json_body["unsigned"].get("m.relations", {})
+
+    def _get_aggregations(self) -> List[JsonDict]:
+        """Request /aggregations on the parent ID and includes the returned chunk."""
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}",
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+        return channel.json_body["chunk"]
+
+    def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict:
+        """
+        Find the parent event in a chunk of events and assert that it has the proper bundled aggregations.
+        """
+        for event in events:
+            if event["event_id"] == self.parent_id:
+                return event
+
+        raise AssertionError(f"Event {self.parent_id} not found in chunk")
+
 
 class RelationsTestCase(BaseRelationsTestCase):
     def test_send_relation(self) -> None:
         """Tests that sending a relation works."""
-
         channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍")
-        self.assertEqual(200, channel.code, channel.json_body)
-
         event_id = channel.json_body["event_id"]
 
         channel = self.make_request(
@@ -152,13 +196,13 @@ class RelationsTestCase(BaseRelationsTestCase):
 
     def test_deny_invalid_event(self) -> None:
         """Test that we deny relations on non-existant events"""
-        channel = self._send_relation(
+        self._send_relation(
             RelationTypes.ANNOTATION,
             EventTypes.Message,
             parent_id="foo",
             content={"body": "foo", "msgtype": "m.text"},
+            expected_response_code=400,
         )
-        self.assertEqual(400, channel.code, channel.json_body)
 
         # Unless that event is referenced from another event!
         self.get_success(
@@ -172,13 +216,12 @@ class RelationsTestCase(BaseRelationsTestCase):
                 desc="test_deny_invalid_event",
             )
         )
-        channel = self._send_relation(
+        self._send_relation(
             RelationTypes.THREAD,
             EventTypes.Message,
             parent_id="foo",
             content={"body": "foo", "msgtype": "m.text"},
         )
-        self.assertEqual(200, channel.code, channel.json_body)
 
     def test_deny_invalid_room(self) -> None:
         """Test that we deny relations on non-existant events"""
@@ -188,18 +231,20 @@ class RelationsTestCase(BaseRelationsTestCase):
         parent_id = res["event_id"]
 
         # Attempt to send an annotation to that event.
-        channel = self._send_relation(
-            RelationTypes.ANNOTATION, "m.reaction", parent_id=parent_id, key="A"
+        self._send_relation(
+            RelationTypes.ANNOTATION,
+            "m.reaction",
+            parent_id=parent_id,
+            key="A",
+            expected_response_code=400,
         )
-        self.assertEqual(400, channel.code, channel.json_body)
 
     def test_deny_double_react(self) -> None:
         """Test that we deny relations on membership events"""
-        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a")
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
-        self.assertEqual(400, channel.code, channel.json_body)
+        self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a")
+        self._send_relation(
+            RelationTypes.ANNOTATION, "m.reaction", "a", expected_response_code=400
+        )
 
     def test_deny_forked_thread(self) -> None:
         """It is invalid to start a thread off a thread."""
@@ -209,386 +254,24 @@ class RelationsTestCase(BaseRelationsTestCase):
             content={"msgtype": "m.text", "body": "foo"},
             parent_id=self.parent_id,
         )
-        self.assertEqual(200, channel.code, channel.json_body)
         parent_id = channel.json_body["event_id"]
 
-        channel = self._send_relation(
+        self._send_relation(
             RelationTypes.THREAD,
             "m.room.message",
             content={"msgtype": "m.text", "body": "foo"},
             parent_id=parent_id,
-        )
-        self.assertEqual(400, channel.code, channel.json_body)
-
-    def test_basic_paginate_relations(self) -> None:
-        """Tests that calling pagination API correctly the latest relations."""
-        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
-        self.assertEqual(200, channel.code, channel.json_body)
-        first_annotation_id = channel.json_body["event_id"]
-
-        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b")
-        self.assertEqual(200, channel.code, channel.json_body)
-        second_annotation_id = channel.json_body["event_id"]
-
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        # We expect to get back a single pagination result, which is the latest
-        # full relation event we sent above.
-        self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
-        self.assert_dict(
-            {
-                "event_id": second_annotation_id,
-                "sender": self.user_id,
-                "type": "m.reaction",
-            },
-            channel.json_body["chunk"][0],
-        )
-
-        # We also expect to get the original event (the id of which is self.parent_id)
-        self.assertEqual(
-            channel.json_body["original_event"]["event_id"], self.parent_id
-        )
-
-        # Make sure next_batch has something in it that looks like it could be a
-        # valid token.
-        self.assertIsInstance(
-            channel.json_body.get("next_batch"), str, channel.json_body
-        )
-
-        # Request the relations again, but with a different direction.
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{self.room}/relations"
-            f"/{self.parent_id}?limit=1&org.matrix.msc3715.dir=f",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        # We expect to get back a single pagination result, which is the earliest
-        # full relation event we sent above.
-        self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
-        self.assert_dict(
-            {
-                "event_id": first_annotation_id,
-                "sender": self.user_id,
-                "type": "m.reaction",
-            },
-            channel.json_body["chunk"][0],
-        )
-
-    def _stream_token_to_relation_token(self, token: str) -> str:
-        """Convert a StreamToken into a legacy token (RelationPaginationToken)."""
-        room_key = self.get_success(StreamToken.from_string(self.store, token)).room_key
-        return self.get_success(
-            RelationPaginationToken(
-                topological=room_key.topological, stream=room_key.stream
-            ).to_string(self.store)
-        )
-
-    def test_repeated_paginate_relations(self) -> None:
-        """Test that if we paginate using a limit and tokens then we get the
-        expected events.
-        """
-
-        expected_event_ids = []
-        for idx in range(10):
-            channel = self._send_relation(
-                RelationTypes.ANNOTATION, "m.reaction", chr(ord("a") + idx)
-            )
-            self.assertEqual(200, channel.code, channel.json_body)
-            expected_event_ids.append(channel.json_body["event_id"])
-
-        prev_token = ""
-        found_event_ids: List[str] = []
-        for _ in range(20):
-            from_token = ""
-            if prev_token:
-                from_token = "&from=" + prev_token
-
-            channel = self.make_request(
-                "GET",
-                f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}",
-                access_token=self.user_token,
-            )
-            self.assertEqual(200, channel.code, channel.json_body)
-
-            found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
-            next_batch = channel.json_body.get("next_batch")
-
-            self.assertNotEqual(prev_token, next_batch)
-            prev_token = next_batch
-
-            if not prev_token:
-                break
-
-        # We paginated backwards, so reverse
-        found_event_ids.reverse()
-        self.assertEqual(found_event_ids, expected_event_ids)
-
-        # Reset and try again, but convert the tokens to the legacy format.
-        prev_token = ""
-        found_event_ids = []
-        for _ in range(20):
-            from_token = ""
-            if prev_token:
-                from_token = "&from=" + self._stream_token_to_relation_token(prev_token)
-
-            channel = self.make_request(
-                "GET",
-                f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}",
-                access_token=self.user_token,
-            )
-            self.assertEqual(200, channel.code, channel.json_body)
-
-            found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
-            next_batch = channel.json_body.get("next_batch")
-
-            self.assertNotEqual(prev_token, next_batch)
-            prev_token = next_batch
-
-            if not prev_token:
-                break
-
-        # We paginated backwards, so reverse
-        found_event_ids.reverse()
-        self.assertEqual(found_event_ids, expected_event_ids)
-
-    def test_pagination_from_sync_and_messages(self) -> None:
-        """Pagination tokens from /sync and /messages can be used to paginate /relations."""
-        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A")
-        self.assertEqual(200, channel.code, channel.json_body)
-        annotation_id = channel.json_body["event_id"]
-        # Send an event after the relation events.
-        self.helper.send(self.room, body="Latest event", tok=self.user_token)
-
-        # Request /sync, limiting it such that only the latest event is returned
-        # (and not the relation).
-        filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 1}}}')
-        channel = self.make_request(
-            "GET", f"/sync?filter={filter}", access_token=self.user_token
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"]
-        sync_prev_batch = room_timeline["prev_batch"]
-        self.assertIsNotNone(sync_prev_batch)
-        # Ensure the relation event is not in the batch returned from /sync.
-        self.assertNotIn(
-            annotation_id, [ev["event_id"] for ev in room_timeline["events"]]
-        )
-
-        # Request /messages, limiting it such that only the latest event is
-        # returned (and not the relation).
-        channel = self.make_request(
-            "GET",
-            f"/rooms/{self.room}/messages?dir=b&limit=1",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        messages_end = channel.json_body["end"]
-        self.assertIsNotNone(messages_end)
-        # Ensure the relation event is not in the chunk returned from /messages.
-        self.assertNotIn(
-            annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]]
+            expected_response_code=400,
         )
 
-        # Request /relations with the pagination tokens received from both the
-        # /sync and /messages responses above, in turn.
-        #
-        # This is a tiny bit silly since the client wouldn't know the parent ID
-        # from the requests above; consider the parent ID to be known from a
-        # previous /sync.
-        for from_token in (sync_prev_batch, messages_end):
-            channel = self.make_request(
-                "GET",
-                f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}",
-                access_token=self.user_token,
-            )
-            self.assertEqual(200, channel.code, channel.json_body)
-
-            # The relation should be in the returned chunk.
-            self.assertIn(
-                annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]]
-            )
-
-    def test_aggregation_pagination_groups(self) -> None:
-        """Test that we can paginate annotation groups correctly."""
-
-        # We need to create ten separate users to send each reaction.
-        access_tokens = [self.user_token, self.user2_token]
-        idx = 0
-        while len(access_tokens) < 10:
-            user_id, token = self._create_user("test" + str(idx))
-            idx += 1
-
-            self.helper.join(self.room, user=user_id, tok=token)
-            access_tokens.append(token)
-
-        idx = 0
-        sent_groups = {"👍": 10, "a": 7, "b": 5, "c": 3, "d": 2, "e": 1}
-        for key in itertools.chain.from_iterable(
-            itertools.repeat(key, num) for key, num in sent_groups.items()
-        ):
-            channel = self._send_relation(
-                RelationTypes.ANNOTATION,
-                "m.reaction",
-                key=key,
-                access_token=access_tokens[idx],
-            )
-            self.assertEqual(200, channel.code, channel.json_body)
-
-            idx += 1
-            idx %= len(access_tokens)
-
-        prev_token: Optional[str] = None
-        found_groups: Dict[str, int] = {}
-        for _ in range(20):
-            from_token = ""
-            if prev_token:
-                from_token = "&from=" + prev_token
-
-            channel = self.make_request(
-                "GET",
-                f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1{from_token}",
-                access_token=self.user_token,
-            )
-            self.assertEqual(200, channel.code, channel.json_body)
-
-            self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
-
-            for groups in channel.json_body["chunk"]:
-                # We only expect reactions
-                self.assertEqual(groups["type"], "m.reaction", channel.json_body)
-
-                # We should only see each key once
-                self.assertNotIn(groups["key"], found_groups, channel.json_body)
-
-                found_groups[groups["key"]] = groups["count"]
-
-            next_batch = channel.json_body.get("next_batch")
-
-            self.assertNotEqual(prev_token, next_batch)
-            prev_token = next_batch
-
-            if not prev_token:
-                break
-
-        self.assertEqual(sent_groups, found_groups)
-
-    def test_aggregation_pagination_within_group(self) -> None:
-        """Test that we can paginate within an annotation group."""
-
-        # We need to create ten separate users to send each reaction.
-        access_tokens = [self.user_token, self.user2_token]
-        idx = 0
-        while len(access_tokens) < 10:
-            user_id, token = self._create_user("test" + str(idx))
-            idx += 1
-
-            self.helper.join(self.room, user=user_id, tok=token)
-            access_tokens.append(token)
-
-        idx = 0
-        expected_event_ids = []
-        for _ in range(10):
-            channel = self._send_relation(
-                RelationTypes.ANNOTATION,
-                "m.reaction",
-                key="👍",
-                access_token=access_tokens[idx],
-            )
-            self.assertEqual(200, channel.code, channel.json_body)
-            expected_event_ids.append(channel.json_body["event_id"])
-
-            idx += 1
-
-        # Also send a different type of reaction so that we test we don't see it
-        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a")
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        prev_token = ""
-        found_event_ids: List[str] = []
-        encoded_key = urllib.parse.quote_plus("👍".encode())
-        for _ in range(20):
-            from_token = ""
-            if prev_token:
-                from_token = "&from=" + prev_token
-
-            channel = self.make_request(
-                "GET",
-                f"/_matrix/client/unstable/rooms/{self.room}"
-                f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}"
-                f"/m.reaction/{encoded_key}?limit=1{from_token}",
-                access_token=self.user_token,
-            )
-            self.assertEqual(200, channel.code, channel.json_body)
-
-            self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
-
-            found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
-
-            next_batch = channel.json_body.get("next_batch")
-
-            self.assertNotEqual(prev_token, next_batch)
-            prev_token = next_batch
-
-            if not prev_token:
-                break
-
-        # We paginated backwards, so reverse
-        found_event_ids.reverse()
-        self.assertEqual(found_event_ids, expected_event_ids)
-
-        # Reset and try again, but convert the tokens to the legacy format.
-        prev_token = ""
-        found_event_ids = []
-        for _ in range(20):
-            from_token = ""
-            if prev_token:
-                from_token = "&from=" + self._stream_token_to_relation_token(prev_token)
-
-            channel = self.make_request(
-                "GET",
-                f"/_matrix/client/unstable/rooms/{self.room}"
-                f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}"
-                f"/m.reaction/{encoded_key}?limit=1{from_token}",
-                access_token=self.user_token,
-            )
-            self.assertEqual(200, channel.code, channel.json_body)
-
-            self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
-
-            found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
-
-            next_batch = channel.json_body.get("next_batch")
-
-            self.assertNotEqual(prev_token, next_batch)
-            prev_token = next_batch
-
-            if not prev_token:
-                break
-
-        # We paginated backwards, so reverse
-        found_event_ids.reverse()
-        self.assertEqual(found_event_ids, expected_event_ids)
-
     def test_aggregation(self) -> None:
         """Test that annotations get correctly aggregated."""
 
-        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        channel = self._send_relation(
+        self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
+        self._send_relation(
             RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token
         )
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b")
-        self.assertEqual(200, channel.code, channel.json_body)
+        self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b")
 
         channel = self.make_request(
             "GET",
@@ -618,220 +301,6 @@ class RelationsTestCase(BaseRelationsTestCase):
         )
         self.assertEqual(400, channel.code, channel.json_body)
 
-    @unittest.override_config(
-        {"experimental_features": {"msc3440_enabled": True, "msc3666_enabled": True}}
-    )
-    def test_bundled_aggregations(self) -> None:
-        """
-        Test that annotations, references, and threads get correctly bundled.
-
-        Note that this doesn't test against /relations since only thread relations
-        get bundled via that API. See test_aggregation_get_event_for_thread.
-
-        See test_edit for a similar test for edits.
-        """
-        # Setup by sending a variety of relations.
-        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        channel = self._send_relation(
-            RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b")
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test")
-        self.assertEqual(200, channel.code, channel.json_body)
-        reply_1 = channel.json_body["event_id"]
-
-        channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test")
-        self.assertEqual(200, channel.code, channel.json_body)
-        reply_2 = channel.json_body["event_id"]
-
-        channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
-        self.assertEqual(200, channel.code, channel.json_body)
-        thread_2 = channel.json_body["event_id"]
-
-        def assert_bundle(event_json: JsonDict) -> None:
-            """Assert the expected values of the bundled aggregations."""
-            relations_dict = event_json["unsigned"].get("m.relations")
-
-            # Ensure the fields are as expected.
-            self.assertCountEqual(
-                relations_dict.keys(),
-                (
-                    RelationTypes.ANNOTATION,
-                    RelationTypes.REFERENCE,
-                    RelationTypes.THREAD,
-                ),
-            )
-
-            # Check the values of each field.
-            self.assertEqual(
-                {
-                    "chunk": [
-                        {"type": "m.reaction", "key": "a", "count": 2},
-                        {"type": "m.reaction", "key": "b", "count": 1},
-                    ]
-                },
-                relations_dict[RelationTypes.ANNOTATION],
-            )
-
-            self.assertEqual(
-                {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]},
-                relations_dict[RelationTypes.REFERENCE],
-            )
-
-            self.assertEqual(
-                2,
-                relations_dict[RelationTypes.THREAD].get("count"),
-            )
-            self.assertTrue(
-                relations_dict[RelationTypes.THREAD].get("current_user_participated")
-            )
-            # The latest thread event has some fields that don't matter.
-            self.assert_dict(
-                {
-                    "content": {
-                        "m.relates_to": {
-                            "event_id": self.parent_id,
-                            "rel_type": RelationTypes.THREAD,
-                        }
-                    },
-                    "event_id": thread_2,
-                    "room_id": self.room,
-                    "sender": self.user_id,
-                    "type": "m.room.test",
-                    "user_id": self.user_id,
-                },
-                relations_dict[RelationTypes.THREAD].get("latest_event"),
-            )
-
-        # Request the event directly.
-        channel = self.make_request(
-            "GET",
-            f"/rooms/{self.room}/event/{self.parent_id}",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        assert_bundle(channel.json_body)
-
-        # Request the room messages.
-        channel = self.make_request(
-            "GET",
-            f"/rooms/{self.room}/messages?dir=b",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"]))
-
-        # Request the room context.
-        channel = self.make_request(
-            "GET",
-            f"/rooms/{self.room}/context/{self.parent_id}",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        assert_bundle(channel.json_body["event"])
-
-        # Request sync.
-        channel = self.make_request("GET", "/sync", access_token=self.user_token)
-        self.assertEqual(200, channel.code, channel.json_body)
-        room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"]
-        self.assertTrue(room_timeline["limited"])
-        assert_bundle(self._find_event_in_chunk(room_timeline["events"]))
-
-        # Request search.
-        channel = self.make_request(
-            "POST",
-            "/search",
-            # Search term matches the parent message.
-            content={"search_categories": {"room_events": {"search_term": "Hi"}}},
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        chunk = [
-            result["result"]
-            for result in channel.json_body["search_categories"]["room_events"][
-                "results"
-            ]
-        ]
-        assert_bundle(self._find_event_in_chunk(chunk))
-
-    def test_aggregation_get_event_for_annotation(self) -> None:
-        """Test that annotations do not get bundled aggregations included
-        when directly requested.
-        """
-        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
-        self.assertEqual(200, channel.code, channel.json_body)
-        annotation_id = channel.json_body["event_id"]
-
-        # Annotate the annotation.
-        channel = self._send_relation(
-            RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=annotation_id
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        channel = self.make_request(
-            "GET",
-            f"/rooms/{self.room}/event/{annotation_id}",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        self.assertIsNone(channel.json_body["unsigned"].get("m.relations"))
-
-    def test_aggregation_get_event_for_thread(self) -> None:
-        """Test that threads get bundled aggregations included when directly requested."""
-        channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
-        self.assertEqual(200, channel.code, channel.json_body)
-        thread_id = channel.json_body["event_id"]
-
-        # Annotate the annotation.
-        channel = self._send_relation(
-            RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        channel = self.make_request(
-            "GET",
-            f"/rooms/{self.room}/event/{thread_id}",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        self.assertEqual(
-            channel.json_body["unsigned"].get("m.relations"),
-            {
-                RelationTypes.ANNOTATION: {
-                    "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}]
-                },
-            },
-        )
-
-        # It should also be included when the entire thread is requested.
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        self.assertEqual(len(channel.json_body["chunk"]), 1)
-
-        thread_message = channel.json_body["chunk"][0]
-        self.assertEqual(
-            thread_message["unsigned"].get("m.relations"),
-            {
-                RelationTypes.ANNOTATION: {
-                    "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}]
-                },
-            },
-        )
-
-    @unittest.override_config({"experimental_features": {"msc3440_enabled": True}})
     def test_ignore_invalid_room(self) -> None:
         """Test that we ignore invalid relations over federation."""
         # Create another room and send a message in it.
@@ -953,8 +422,6 @@ class RelationsTestCase(BaseRelationsTestCase):
             "m.room.message",
             content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body},
         )
-        self.assertEqual(200, channel.code, channel.json_body)
-
         edit_event_id = channel.json_body["event_id"]
 
         def assert_bundle(event_json: JsonDict) -> None:
@@ -1030,7 +497,7 @@ class RelationsTestCase(BaseRelationsTestCase):
         shouldn't be allowed, are correctly handled.
         """
 
-        channel = self._send_relation(
+        self._send_relation(
             RelationTypes.REPLACE,
             "m.room.message",
             content={
@@ -1039,7 +506,6 @@ class RelationsTestCase(BaseRelationsTestCase):
                 "m.new_content": {"msgtype": "m.text", "body": "First edit"},
             },
         )
-        self.assertEqual(200, channel.code, channel.json_body)
 
         new_body = {"msgtype": "m.text", "body": "I've been edited!"}
         channel = self._send_relation(
@@ -1047,11 +513,9 @@ class RelationsTestCase(BaseRelationsTestCase):
             "m.room.message",
             content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body},
         )
-        self.assertEqual(200, channel.code, channel.json_body)
-
         edit_event_id = channel.json_body["event_id"]
 
-        channel = self._send_relation(
+        self._send_relation(
             RelationTypes.REPLACE,
             "m.room.message.WRONG_TYPE",
             content={
@@ -1060,7 +524,6 @@ class RelationsTestCase(BaseRelationsTestCase):
                 "m.new_content": {"msgtype": "m.text", "body": "Edit, but wrong type"},
             },
         )
-        self.assertEqual(200, channel.code, channel.json_body)
 
         channel = self.make_request(
             "GET",
@@ -1091,7 +554,6 @@ class RelationsTestCase(BaseRelationsTestCase):
             "m.room.message",
             content={"msgtype": "m.text", "body": "A reply!"},
         )
-        self.assertEqual(200, channel.code, channel.json_body)
         reply = channel.json_body["event_id"]
 
         new_body = {"msgtype": "m.text", "body": "I've been edited!"}
@@ -1101,8 +563,6 @@ class RelationsTestCase(BaseRelationsTestCase):
             content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body},
             parent_id=reply,
         )
-        self.assertEqual(200, channel.code, channel.json_body)
-
         edit_event_id = channel.json_body["event_id"]
 
         channel = self.make_request(
@@ -1138,7 +598,6 @@ class RelationsTestCase(BaseRelationsTestCase):
             {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict
         )
 
-    @unittest.override_config({"experimental_features": {"msc3440_enabled": True}})
     def test_edit_thread(self) -> None:
         """Test that editing a thread works."""
 
@@ -1148,17 +607,15 @@ class RelationsTestCase(BaseRelationsTestCase):
             "m.room.message",
             content={"msgtype": "m.text", "body": "A threaded reply!"},
         )
-        self.assertEqual(200, channel.code, channel.json_body)
         threaded_event_id = channel.json_body["event_id"]
 
         new_body = {"msgtype": "m.text", "body": "I've been edited!"}
-        channel = self._send_relation(
+        self._send_relation(
             RelationTypes.REPLACE,
             "m.room.message",
             content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body},
             parent_id=threaded_event_id,
         )
-        self.assertEqual(200, channel.code, channel.json_body)
 
         # Fetch the thread root, to get the bundled aggregation for the thread.
         channel = self.make_request(
@@ -1190,11 +647,10 @@ class RelationsTestCase(BaseRelationsTestCase):
                 "m.new_content": new_body,
             },
         )
-        self.assertEqual(200, channel.code, channel.json_body)
         edit_event_id = channel.json_body["event_id"]
 
         # Edit the edit event.
-        channel = self._send_relation(
+        self._send_relation(
             RelationTypes.REPLACE,
             "m.room.message",
             content={
@@ -1204,7 +660,6 @@ class RelationsTestCase(BaseRelationsTestCase):
             },
             parent_id=edit_event_id,
         )
-        self.assertEqual(200, channel.code, channel.json_body)
 
         # Request the original event.
         channel = self.make_request(
@@ -1231,7 +686,6 @@ class RelationsTestCase(BaseRelationsTestCase):
     def test_unknown_relations(self) -> None:
         """Unknown relations should be accepted."""
         channel = self._send_relation("m.relation.test", "m.room.test")
-        self.assertEqual(200, channel.code, channel.json_body)
         event_id = channel.json_body["event_id"]
 
         channel = self.make_request(
@@ -1272,28 +726,15 @@ class RelationsTestCase(BaseRelationsTestCase):
         self.assertEqual(200, channel.code, channel.json_body)
         self.assertEqual(channel.json_body["chunk"], [])
 
-    def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict:
-        """
-        Find the parent event in a chunk of events and assert that it has the proper bundled aggregations.
-        """
-        for event in events:
-            if event["event_id"] == self.parent_id:
-                return event
-
-        raise AssertionError(f"Event {self.parent_id} not found in chunk")
-
     def test_background_update(self) -> None:
         """Test the event_arbitrary_relations background update."""
         channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍")
-        self.assertEqual(200, channel.code, channel.json_body)
         annotation_event_id_good = channel.json_body["event_id"]
 
         channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="A")
-        self.assertEqual(200, channel.code, channel.json_body)
         annotation_event_id_bad = channel.json_body["event_id"]
 
         channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
-        self.assertEqual(200, channel.code, channel.json_body)
         thread_event_id = channel.json_body["event_id"]
 
         # Clean-up the table as if the inserts did not happen during event creation.
@@ -1345,8 +786,638 @@ class RelationsTestCase(BaseRelationsTestCase):
         )
 
 
+class RelationPaginationTestCase(BaseRelationsTestCase):
+    def test_basic_paginate_relations(self) -> None:
+        """Tests that calling pagination API correctly the latest relations."""
+        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
+        first_annotation_id = channel.json_body["event_id"]
+
+        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b")
+        second_annotation_id = channel.json_body["event_id"]
+
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1",
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+
+        # We expect to get back a single pagination result, which is the latest
+        # full relation event we sent above.
+        self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
+        self.assert_dict(
+            {
+                "event_id": second_annotation_id,
+                "sender": self.user_id,
+                "type": "m.reaction",
+            },
+            channel.json_body["chunk"][0],
+        )
+
+        # We also expect to get the original event (the id of which is self.parent_id)
+        self.assertEqual(
+            channel.json_body["original_event"]["event_id"], self.parent_id
+        )
+
+        # Make sure next_batch has something in it that looks like it could be a
+        # valid token.
+        self.assertIsInstance(
+            channel.json_body.get("next_batch"), str, channel.json_body
+        )
+
+        # Request the relations again, but with a different direction.
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/unstable/rooms/{self.room}/relations"
+            f"/{self.parent_id}?limit=1&org.matrix.msc3715.dir=f",
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+
+        # We expect to get back a single pagination result, which is the earliest
+        # full relation event we sent above.
+        self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
+        self.assert_dict(
+            {
+                "event_id": first_annotation_id,
+                "sender": self.user_id,
+                "type": "m.reaction",
+            },
+            channel.json_body["chunk"][0],
+        )
+
+    def test_repeated_paginate_relations(self) -> None:
+        """Test that if we paginate using a limit and tokens then we get the
+        expected events.
+        """
+
+        expected_event_ids = []
+        for idx in range(10):
+            channel = self._send_relation(
+                RelationTypes.ANNOTATION, "m.reaction", chr(ord("a") + idx)
+            )
+            expected_event_ids.append(channel.json_body["event_id"])
+
+        prev_token = ""
+        found_event_ids: List[str] = []
+        for _ in range(20):
+            from_token = ""
+            if prev_token:
+                from_token = "&from=" + prev_token
+
+            channel = self.make_request(
+                "GET",
+                f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}",
+                access_token=self.user_token,
+            )
+            self.assertEqual(200, channel.code, channel.json_body)
+
+            found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
+            next_batch = channel.json_body.get("next_batch")
+
+            self.assertNotEqual(prev_token, next_batch)
+            prev_token = next_batch
+
+            if not prev_token:
+                break
+
+        # We paginated backwards, so reverse
+        found_event_ids.reverse()
+        self.assertEqual(found_event_ids, expected_event_ids)
+
+    def test_pagination_from_sync_and_messages(self) -> None:
+        """Pagination tokens from /sync and /messages can be used to paginate /relations."""
+        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A")
+        annotation_id = channel.json_body["event_id"]
+        # Send an event after the relation events.
+        self.helper.send(self.room, body="Latest event", tok=self.user_token)
+
+        # Request /sync, limiting it such that only the latest event is returned
+        # (and not the relation).
+        filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 1}}}')
+        channel = self.make_request(
+            "GET", f"/sync?filter={filter}", access_token=self.user_token
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+        room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"]
+        sync_prev_batch = room_timeline["prev_batch"]
+        self.assertIsNotNone(sync_prev_batch)
+        # Ensure the relation event is not in the batch returned from /sync.
+        self.assertNotIn(
+            annotation_id, [ev["event_id"] for ev in room_timeline["events"]]
+        )
+
+        # Request /messages, limiting it such that only the latest event is
+        # returned (and not the relation).
+        channel = self.make_request(
+            "GET",
+            f"/rooms/{self.room}/messages?dir=b&limit=1",
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+        messages_end = channel.json_body["end"]
+        self.assertIsNotNone(messages_end)
+        # Ensure the relation event is not in the chunk returned from /messages.
+        self.assertNotIn(
+            annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]]
+        )
+
+        # Request /relations with the pagination tokens received from both the
+        # /sync and /messages responses above, in turn.
+        #
+        # This is a tiny bit silly since the client wouldn't know the parent ID
+        # from the requests above; consider the parent ID to be known from a
+        # previous /sync.
+        for from_token in (sync_prev_batch, messages_end):
+            channel = self.make_request(
+                "GET",
+                f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}",
+                access_token=self.user_token,
+            )
+            self.assertEqual(200, channel.code, channel.json_body)
+
+            # The relation should be in the returned chunk.
+            self.assertIn(
+                annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]]
+            )
+
+    def test_aggregation_pagination_groups(self) -> None:
+        """Test that we can paginate annotation groups correctly."""
+
+        # We need to create ten separate users to send each reaction.
+        access_tokens = [self.user_token, self.user2_token]
+        idx = 0
+        while len(access_tokens) < 10:
+            user_id, token = self._create_user("test" + str(idx))
+            idx += 1
+
+            self.helper.join(self.room, user=user_id, tok=token)
+            access_tokens.append(token)
+
+        idx = 0
+        sent_groups = {"👍": 10, "a": 7, "b": 5, "c": 3, "d": 2, "e": 1}
+        for key in itertools.chain.from_iterable(
+            itertools.repeat(key, num) for key, num in sent_groups.items()
+        ):
+            self._send_relation(
+                RelationTypes.ANNOTATION,
+                "m.reaction",
+                key=key,
+                access_token=access_tokens[idx],
+            )
+
+            idx += 1
+            idx %= len(access_tokens)
+
+        prev_token: Optional[str] = None
+        found_groups: Dict[str, int] = {}
+        for _ in range(20):
+            from_token = ""
+            if prev_token:
+                from_token = "&from=" + prev_token
+
+            channel = self.make_request(
+                "GET",
+                f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1{from_token}",
+                access_token=self.user_token,
+            )
+            self.assertEqual(200, channel.code, channel.json_body)
+
+            self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
+
+            for groups in channel.json_body["chunk"]:
+                # We only expect reactions
+                self.assertEqual(groups["type"], "m.reaction", channel.json_body)
+
+                # We should only see each key once
+                self.assertNotIn(groups["key"], found_groups, channel.json_body)
+
+                found_groups[groups["key"]] = groups["count"]
+
+            next_batch = channel.json_body.get("next_batch")
+
+            self.assertNotEqual(prev_token, next_batch)
+            prev_token = next_batch
+
+            if not prev_token:
+                break
+
+        self.assertEqual(sent_groups, found_groups)
+
+    def test_aggregation_pagination_within_group(self) -> None:
+        """Test that we can paginate within an annotation group."""
+
+        # We need to create ten separate users to send each reaction.
+        access_tokens = [self.user_token, self.user2_token]
+        idx = 0
+        while len(access_tokens) < 10:
+            user_id, token = self._create_user("test" + str(idx))
+            idx += 1
+
+            self.helper.join(self.room, user=user_id, tok=token)
+            access_tokens.append(token)
+
+        idx = 0
+        expected_event_ids = []
+        for _ in range(10):
+            channel = self._send_relation(
+                RelationTypes.ANNOTATION,
+                "m.reaction",
+                key="👍",
+                access_token=access_tokens[idx],
+            )
+            expected_event_ids.append(channel.json_body["event_id"])
+
+            idx += 1
+
+        # Also send a different type of reaction so that we test we don't see it
+        self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a")
+
+        prev_token = ""
+        found_event_ids: List[str] = []
+        encoded_key = urllib.parse.quote_plus("👍".encode())
+        for _ in range(20):
+            from_token = ""
+            if prev_token:
+                from_token = "&from=" + prev_token
+
+            channel = self.make_request(
+                "GET",
+                f"/_matrix/client/unstable/rooms/{self.room}"
+                f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}"
+                f"/m.reaction/{encoded_key}?limit=1{from_token}",
+                access_token=self.user_token,
+            )
+            self.assertEqual(200, channel.code, channel.json_body)
+
+            self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
+
+            found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
+
+            next_batch = channel.json_body.get("next_batch")
+
+            self.assertNotEqual(prev_token, next_batch)
+            prev_token = next_batch
+
+            if not prev_token:
+                break
+
+        # We paginated backwards, so reverse
+        found_event_ids.reverse()
+        self.assertEqual(found_event_ids, expected_event_ids)
+
+
+class BundledAggregationsTestCase(BaseRelationsTestCase):
+    """
+    See RelationsTestCase.test_edit for a similar test for edits.
+
+    Note that this doesn't test against /relations since only thread relations
+    get bundled via that API. See test_aggregation_get_event_for_thread.
+    """
+
+    def _test_bundled_aggregations(
+        self,
+        relation_type: str,
+        assertion_callable: Callable[[JsonDict], None],
+        expected_db_txn_for_event: int,
+    ) -> None:
+        """
+        Makes requests to various endpoints which should include bundled aggregations
+        and then calls an assertion function on the bundled aggregations.
+
+        Args:
+            relation_type: The field to search for in the `m.relations` field in unsigned.
+            assertion_callable: Called with the contents of unsigned["m.relations"][relation_type]
+                for relation-specific assertions.
+            expected_db_txn_for_event: The number of database transactions which
+                are expected for a call to /event/.
+        """
+
+        def assert_bundle(event_json: JsonDict) -> None:
+            """Assert the expected values of the bundled aggregations."""
+            relations_dict = event_json["unsigned"].get("m.relations")
+
+            # Ensure the fields are as expected.
+            self.assertCountEqual(relations_dict.keys(), (relation_type,))
+            assertion_callable(relations_dict[relation_type])
+
+        # Request the event directly.
+        channel = self.make_request(
+            "GET",
+            f"/rooms/{self.room}/event/{self.parent_id}",
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+        assert_bundle(channel.json_body)
+        assert channel.resource_usage is not None
+        self.assertEqual(channel.resource_usage.db_txn_count, expected_db_txn_for_event)
+
+        # Request the room messages.
+        channel = self.make_request(
+            "GET",
+            f"/rooms/{self.room}/messages?dir=b",
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+        assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"]))
+
+        # Request the room context.
+        channel = self.make_request(
+            "GET",
+            f"/rooms/{self.room}/context/{self.parent_id}",
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+        assert_bundle(channel.json_body["event"])
+
+        # Request sync.
+        filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 4}}}')
+        channel = self.make_request(
+            "GET", f"/sync?filter={filter}", access_token=self.user_token
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+        room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"]
+        self.assertTrue(room_timeline["limited"])
+        assert_bundle(self._find_event_in_chunk(room_timeline["events"]))
+
+        # Request search.
+        channel = self.make_request(
+            "POST",
+            "/search",
+            # Search term matches the parent message.
+            content={"search_categories": {"room_events": {"search_term": "Hi"}}},
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+        chunk = [
+            result["result"]
+            for result in channel.json_body["search_categories"]["room_events"][
+                "results"
+            ]
+        ]
+        assert_bundle(self._find_event_in_chunk(chunk))
+
+    @unittest.override_config({"experimental_features": {"msc3666_enabled": True}})
+    def test_annotation(self) -> None:
+        """
+        Test that annotations get correctly bundled.
+        """
+        # Setup by sending a variety of relations.
+        self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
+        self._send_relation(
+            RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token
+        )
+        self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b")
+
+        def assert_annotations(bundled_aggregations: JsonDict) -> None:
+            self.assertEqual(
+                {
+                    "chunk": [
+                        {"type": "m.reaction", "key": "a", "count": 2},
+                        {"type": "m.reaction", "key": "b", "count": 1},
+                    ]
+                },
+                bundled_aggregations,
+            )
+
+        self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7)
+
+    @unittest.override_config({"experimental_features": {"msc3666_enabled": True}})
+    def test_reference(self) -> None:
+        """
+        Test that references get correctly bundled.
+        """
+        channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test")
+        reply_1 = channel.json_body["event_id"]
+
+        channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test")
+        reply_2 = channel.json_body["event_id"]
+
+        def assert_annotations(bundled_aggregations: JsonDict) -> None:
+            self.assertEqual(
+                {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]},
+                bundled_aggregations,
+            )
+
+        self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7)
+
+    @unittest.override_config({"experimental_features": {"msc3666_enabled": True}})
+    def test_thread(self) -> None:
+        """
+        Test that threads get correctly bundled.
+        """
+        self._send_relation(RelationTypes.THREAD, "m.room.test")
+        channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
+        thread_2 = channel.json_body["event_id"]
+
+        def assert_annotations(bundled_aggregations: JsonDict) -> None:
+            self.assertEqual(2, bundled_aggregations.get("count"))
+            self.assertTrue(bundled_aggregations.get("current_user_participated"))
+            # The latest thread event has some fields that don't matter.
+            self.assert_dict(
+                {
+                    "content": {
+                        "m.relates_to": {
+                            "event_id": self.parent_id,
+                            "rel_type": RelationTypes.THREAD,
+                        }
+                    },
+                    "event_id": thread_2,
+                    "sender": self.user_id,
+                    "type": "m.room.test",
+                },
+                bundled_aggregations.get("latest_event"),
+            )
+
+        self._test_bundled_aggregations(RelationTypes.THREAD, assert_annotations, 9)
+
+    def test_aggregation_get_event_for_annotation(self) -> None:
+        """Test that annotations do not get bundled aggregations included
+        when directly requested.
+        """
+        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
+        annotation_id = channel.json_body["event_id"]
+
+        # Annotate the annotation.
+        self._send_relation(
+            RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=annotation_id
+        )
+
+        channel = self.make_request(
+            "GET",
+            f"/rooms/{self.room}/event/{annotation_id}",
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+        self.assertIsNone(channel.json_body["unsigned"].get("m.relations"))
+
+    def test_aggregation_get_event_for_thread(self) -> None:
+        """Test that threads get bundled aggregations included when directly requested."""
+        channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
+        thread_id = channel.json_body["event_id"]
+
+        # Annotate the annotation.
+        self._send_relation(
+            RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id
+        )
+
+        channel = self.make_request(
+            "GET",
+            f"/rooms/{self.room}/event/{thread_id}",
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+        self.assertEqual(
+            channel.json_body["unsigned"].get("m.relations"),
+            {
+                RelationTypes.ANNOTATION: {
+                    "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}]
+                },
+            },
+        )
+
+        # It should also be included when the entire thread is requested.
+        channel = self.make_request(
+            "GET",
+            f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1",
+            access_token=self.user_token,
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+        self.assertEqual(len(channel.json_body["chunk"]), 1)
+
+        thread_message = channel.json_body["chunk"][0]
+        self.assertEqual(
+            thread_message["unsigned"].get("m.relations"),
+            {
+                RelationTypes.ANNOTATION: {
+                    "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}]
+                },
+            },
+        )
+
+    def test_bundled_aggregations_with_filter(self) -> None:
+        """
+        If "unsigned" is an omitted field (due to filtering), adding the bundled
+        aggregations should not break.
+
+        Note that the spec allows for a server to return additional fields beyond
+        what is specified.
+        """
+        self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
+
+        # Note that the sync filter does not include "unsigned" as a field.
+        filter = urllib.parse.quote_plus(
+            b'{"event_fields": ["content", "event_id"], "room": {"timeline": {"limit": 3}}}'
+        )
+        channel = self.make_request(
+            "GET", f"/sync?filter={filter}", access_token=self.user_token
+        )
+        self.assertEqual(200, channel.code, channel.json_body)
+
+        # Ensure the timeline is limited, find the parent event.
+        room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"]
+        self.assertTrue(room_timeline["limited"])
+        parent_event = self._find_event_in_chunk(room_timeline["events"])
+
+        # Ensure there's bundled aggregations on it.
+        self.assertIn("unsigned", parent_event)
+        self.assertIn("m.relations", parent_event["unsigned"])
+
+
+class RelationIgnoredUserTestCase(BaseRelationsTestCase):
+    """Relations sent from an ignored user should be ignored."""
+
+    def _test_ignored_user(
+        self, allowed_event_ids: List[str], ignored_event_ids: List[str]
+    ) -> None:
+        """
+        Fetch the relations and ensure they're all there, then ignore user2, and
+        repeat.
+        """
+        # Get the relations.
+        event_ids = self._get_related_events()
+        self.assertCountEqual(event_ids, allowed_event_ids + ignored_event_ids)
+
+        # Ignore user2 and re-do the requests.
+        self.get_success(
+            self.store.add_account_data_for_user(
+                self.user_id,
+                AccountDataTypes.IGNORED_USER_LIST,
+                {"ignored_users": {self.user2_id: {}}},
+            )
+        )
+
+        # Get the relations.
+        event_ids = self._get_related_events()
+        self.assertCountEqual(event_ids, allowed_event_ids)
+
+    def test_annotation(self) -> None:
+        """Annotations should ignore"""
+        # Send 2 from us, 2 from the to be ignored user.
+        allowed_event_ids = []
+        ignored_event_ids = []
+        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a")
+        allowed_event_ids.append(channel.json_body["event_id"])
+        channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="b")
+        allowed_event_ids.append(channel.json_body["event_id"])
+        channel = self._send_relation(
+            RelationTypes.ANNOTATION,
+            "m.reaction",
+            key="a",
+            access_token=self.user2_token,
+        )
+        ignored_event_ids.append(channel.json_body["event_id"])
+        channel = self._send_relation(
+            RelationTypes.ANNOTATION,
+            "m.reaction",
+            key="c",
+            access_token=self.user2_token,
+        )
+        ignored_event_ids.append(channel.json_body["event_id"])
+
+        self._test_ignored_user(allowed_event_ids, ignored_event_ids)
+
+    def test_reference(self) -> None:
+        """Annotations should ignore"""
+        channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test")
+        allowed_event_ids = [channel.json_body["event_id"]]
+
+        channel = self._send_relation(
+            RelationTypes.REFERENCE, "m.room.test", access_token=self.user2_token
+        )
+        ignored_event_ids = [channel.json_body["event_id"]]
+
+        self._test_ignored_user(allowed_event_ids, ignored_event_ids)
+
+    def test_thread(self) -> None:
+        """Annotations should ignore"""
+        channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
+        allowed_event_ids = [channel.json_body["event_id"]]
+
+        channel = self._send_relation(
+            RelationTypes.THREAD, "m.room.test", access_token=self.user2_token
+        )
+        ignored_event_ids = [channel.json_body["event_id"]]
+
+        self._test_ignored_user(allowed_event_ids, ignored_event_ids)
+
+
 class RelationRedactionTestCase(BaseRelationsTestCase):
-    """Test the behaviour of relations when the parent or child event is redacted."""
+    """
+    Test the behaviour of relations when the parent or child event is redacted.
+
+    The behaviour of each relation type is subtly different which causes the tests
+    to be a bit repetitive, they follow a naming scheme of:
+
+        test_redact_(relation|parent)_{relation_type}
+
+    The first bit of "relation" means that the event with the relation defined
+    on it (the child event) is to be redacted. A "parent" means that the target
+    of the relation (the parent event) is to be redacted.
+
+    The relation_type describes which type of relation is under test (i.e. it is
+    related to the value of rel_type in the event content).
+    """
 
     def _redact(self, event_id: str) -> None:
         channel = self.make_request(
@@ -1358,40 +1429,116 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
         self.assertEqual(200, channel.code, channel.json_body)
 
     def test_redact_relation_annotation(self) -> None:
-        """Test that annotations of an event are properly handled after the
+        """
+        Test that annotations of an event are properly handled after the
         annotation is redacted.
+
+        The redacted relation should not be included in bundled aggregations or
+        the response to relations.
         """
         channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
-        self.assertEqual(200, channel.code, channel.json_body)
         to_redact_event_id = channel.json_body["event_id"]
 
         channel = self._send_relation(
             RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token
         )
-        self.assertEqual(200, channel.code, channel.json_body)
+        unredacted_event_id = channel.json_body["event_id"]
+
+        # Both relations should exist.
+        event_ids = self._get_related_events()
+        relations = self._get_bundled_aggregations()
+        self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id])
+        self.assertEquals(
+            relations["m.annotation"],
+            {"chunk": [{"type": "m.reaction", "key": "a", "count": 2}]},
+        )
+
+        # Both relations appear in the aggregation.
+        chunk = self._get_aggregations()
+        self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 2}])
 
         # Redact one of the reactions.
         self._redact(to_redact_event_id)
 
-        # Ensure that the aggregations are correct.
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}",
-            access_token=self.user_token,
+        # The unredacted relation should still exist.
+        event_ids = self._get_related_events()
+        relations = self._get_bundled_aggregations()
+        self.assertEquals(event_ids, [unredacted_event_id])
+        self.assertEquals(
+            relations["m.annotation"],
+            {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]},
         )
-        self.assertEqual(200, channel.code, channel.json_body)
 
+        # The unredacted aggregation should still exist.
+        chunk = self._get_aggregations()
+        self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 1}])
+
+    def test_redact_relation_thread(self) -> None:
+        """
+        Test that thread replies are properly handled after the thread reply redacted.
+
+        The redacted event should not be included in bundled aggregations or
+        the response to relations.
+        """
+        channel = self._send_relation(
+            RelationTypes.THREAD,
+            EventTypes.Message,
+            content={"body": "reply 1", "msgtype": "m.text"},
+        )
+        unredacted_event_id = channel.json_body["event_id"]
+
+        # Note that the *last* event in the thread is redacted, as that gets
+        # included in the bundled aggregation.
+        channel = self._send_relation(
+            RelationTypes.THREAD,
+            EventTypes.Message,
+            content={"body": "reply 2", "msgtype": "m.text"},
+        )
+        to_redact_event_id = channel.json_body["event_id"]
+
+        # Both relations exist.
+        event_ids = self._get_related_events()
+        relations = self._get_bundled_aggregations()
+        self.assertEquals(event_ids, [to_redact_event_id, unredacted_event_id])
+        self.assertDictContainsSubset(
+            {
+                "count": 2,
+                "current_user_participated": True,
+            },
+            relations[RelationTypes.THREAD],
+        )
+        # And the latest event returned is the event that will be redacted.
+        self.assertEqual(
+            relations[RelationTypes.THREAD]["latest_event"]["event_id"],
+            to_redact_event_id,
+        )
+
+        # Redact one of the reactions.
+        self._redact(to_redact_event_id)
+
+        # The unredacted relation should still exist.
+        event_ids = self._get_related_events()
+        relations = self._get_bundled_aggregations()
+        self.assertEquals(event_ids, [unredacted_event_id])
+        self.assertDictContainsSubset(
+            {
+                "count": 1,
+                "current_user_participated": True,
+            },
+            relations[RelationTypes.THREAD],
+        )
+        # And the latest event is now the unredacted event.
         self.assertEqual(
-            channel.json_body,
-            {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]},
+            relations[RelationTypes.THREAD]["latest_event"]["event_id"],
+            unredacted_event_id,
         )
 
-    def test_redact_relation_edit(self) -> None:
+    def test_redact_parent_edit(self) -> None:
         """Test that edits of an event are redacted when the original event
         is redacted.
         """
         # Add a relation
-        channel = self._send_relation(
+        self._send_relation(
             RelationTypes.REPLACE,
             "m.room.message",
             parent_id=self.parent_id,
@@ -1401,54 +1548,83 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
                 "m.new_content": {"msgtype": "m.text", "body": "First edit"},
             },
         )
-        self.assertEqual(200, channel.code, channel.json_body)
 
         # Check the relation is returned
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{self.room}/relations"
-            f"/{self.parent_id}/m.replace/m.room.message",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        self.assertIn("chunk", channel.json_body)
-        self.assertEqual(len(channel.json_body["chunk"]), 1)
+        event_ids = self._get_related_events()
+        relations = self._get_bundled_aggregations()
+        self.assertEqual(len(event_ids), 1)
+        self.assertIn(RelationTypes.REPLACE, relations)
 
         # Redact the original event
         self._redact(self.parent_id)
 
-        # Try to check for remaining m.replace relations
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{self.room}/relations"
-            f"/{self.parent_id}/m.replace/m.room.message",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
+        # The relations are not returned.
+        event_ids = self._get_related_events()
+        relations = self._get_bundled_aggregations()
+        self.assertEqual(len(event_ids), 0)
+        self.assertEqual(relations, {})
 
-        # Check that no relations are returned
-        self.assertIn("chunk", channel.json_body)
-        self.assertEqual(channel.json_body["chunk"], [])
-
-    def test_redact_parent(self) -> None:
-        """Test that annotations of an event are redacted when the original event
+    def test_redact_parent_annotation(self) -> None:
+        """Test that annotations of an event are viewable when the original event
         is redacted.
         """
         # Add a relation
         channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍")
-        self.assertEqual(200, channel.code, channel.json_body)
+        related_event_id = channel.json_body["event_id"]
+
+        # The relations should exist.
+        event_ids = self._get_related_events()
+        relations = self._get_bundled_aggregations()
+        self.assertEqual(len(event_ids), 1)
+        self.assertIn(RelationTypes.ANNOTATION, relations)
+
+        # The aggregation should exist.
+        chunk = self._get_aggregations()
+        self.assertEqual(chunk, [{"type": "m.reaction", "key": "👍", "count": 1}])
 
         # Redact the original event.
         self._redact(self.parent_id)
 
-        # Check that aggregations returns zero
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}/m.annotation/m.reaction",
-            access_token=self.user_token,
+        # The relations are returned.
+        event_ids = self._get_related_events()
+        relations = self._get_bundled_aggregations()
+        self.assertEquals(event_ids, [related_event_id])
+        self.assertEquals(
+            relations["m.annotation"],
+            {"chunk": [{"type": "m.reaction", "key": "👍", "count": 1}]},
         )
-        self.assertEqual(200, channel.code, channel.json_body)
 
-        self.assertIn("chunk", channel.json_body)
-        self.assertEqual(channel.json_body["chunk"], [])
+        # There's nothing to aggregate.
+        chunk = self._get_aggregations()
+        self.assertEqual(chunk, [{"count": 1, "key": "👍", "type": "m.reaction"}])
+
+    @unittest.override_config({"experimental_features": {"msc3440_enabled": True}})
+    def test_redact_parent_thread(self) -> None:
+        """
+        Test that thread replies are still available when the root event is redacted.
+        """
+        channel = self._send_relation(
+            RelationTypes.THREAD,
+            EventTypes.Message,
+            content={"body": "reply 1", "msgtype": "m.text"},
+        )
+        related_event_id = channel.json_body["event_id"]
+
+        # Redact one of the reactions.
+        self._redact(self.parent_id)
+
+        # The unredacted relation should still exist.
+        event_ids = self._get_related_events()
+        relations = self._get_bundled_aggregations()
+        self.assertEquals(len(event_ids), 1)
+        self.assertDictContainsSubset(
+            {
+                "count": 1,
+                "current_user_participated": True,
+            },
+            relations[RelationTypes.THREAD],
+        )
+        self.assertEqual(
+            relations[RelationTypes.THREAD]["latest_event"]["event_id"],
+            related_event_id,
+        )
diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
index f3bf8d0934..7b8fe6d025 100644
--- a/tests/rest/client/test_retention.py
+++ b/tests/rest/client/test_retention.py
@@ -24,6 +24,7 @@ from synapse.util import Clock
 from synapse.visibility import filter_events_for_client
 
 from tests import unittest
+from tests.unittest import override_config
 
 one_hour_ms = 3600000
 one_day_ms = one_hour_ms * 24
@@ -38,7 +39,10 @@ class RetentionTestCase(unittest.HomeserverTestCase):
 
     def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         config = self.default_config()
-        config["retention"] = {
+
+        # merge this default retention config with anything that was specified in
+        # @override_config
+        retention_config = {
             "enabled": True,
             "default_policy": {
                 "min_lifetime": one_day_ms,
@@ -47,6 +51,8 @@ class RetentionTestCase(unittest.HomeserverTestCase):
             "allowed_lifetime_min": one_day_ms,
             "allowed_lifetime_max": one_day_ms * 3,
         }
+        retention_config.update(config.get("retention", {}))
+        config["retention"] = retention_config
 
         self.hs = self.setup_test_homeserver(config=config)
 
@@ -115,22 +121,20 @@ class RetentionTestCase(unittest.HomeserverTestCase):
 
         self._test_retention_event_purged(room_id, one_day_ms * 2)
 
+    @override_config({"retention": {"purge_jobs": [{"interval": "5d"}]}})
     def test_visibility(self) -> None:
         """Tests that synapse.visibility.filter_events_for_client correctly filters out
-        outdated events
+        outdated events, even if the purge job hasn't got to them yet.
+
+        We do this by setting a very long time between purge jobs.
         """
         store = self.hs.get_datastores().main
         storage = self.hs.get_storage()
         room_id = self.helper.create_room_as(self.user_id, tok=self.token)
-        events = []
 
         # Send a first event, which should be filtered out at the end of the test.
         resp = self.helper.send(room_id=room_id, body="1", tok=self.token)
-
-        # Get the event from the store so that we end up with a FrozenEvent that we can
-        # give to filter_events_for_client. We need to do this now because the event won't
-        # be in the database anymore after it has expired.
-        events.append(self.get_success(store.get_event(resp.get("event_id"))))
+        first_event_id = resp.get("event_id")
 
         # Advance the time by 2 days. We're using the default retention policy, therefore
         # after this the first event will still be valid.
@@ -138,16 +142,17 @@ class RetentionTestCase(unittest.HomeserverTestCase):
 
         # Send another event, which shouldn't get filtered out.
         resp = self.helper.send(room_id=room_id, body="2", tok=self.token)
-
         valid_event_id = resp.get("event_id")
 
-        events.append(self.get_success(store.get_event(valid_event_id)))
-
         # Advance the time by another 2 days. After this, the first event should be
         # outdated but not the second one.
         self.reactor.advance(one_day_ms * 2 / 1000)
 
-        # Run filter_events_for_client with our list of FrozenEvents.
+        # Fetch the events, and run filter_events_for_client on them
+        events = self.get_success(
+            store.get_events_as_list([first_event_id, valid_event_id])
+        )
+        self.assertEqual(2, len(events), "events retrieved from database")
         filtered_events = self.get_success(
             filter_events_for_client(storage, self.user_id, events)
         )
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index 37866ee330..3a9617d6da 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -2141,21 +2141,19 @@ class RelationsTestCase(unittest.HomeserverTestCase):
 
     def test_filter_relation_senders(self) -> None:
         # Messages which second user reacted to.
-        filter = {"io.element.relation_senders": [self.second_user_id]}
+        filter = {"related_by_senders": [self.second_user_id]}
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 1, chunk)
         self.assertEqual(chunk[0]["event_id"], self.event_id_1)
 
         # Messages which third user reacted to.
-        filter = {"io.element.relation_senders": [self.third_user_id]}
+        filter = {"related_by_senders": [self.third_user_id]}
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 1, chunk)
         self.assertEqual(chunk[0]["event_id"], self.event_id_2)
 
         # Messages which either user reacted to.
-        filter = {
-            "io.element.relation_senders": [self.second_user_id, self.third_user_id]
-        }
+        filter = {"related_by_senders": [self.second_user_id, self.third_user_id]}
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 2, chunk)
         self.assertCountEqual(
@@ -2164,20 +2162,20 @@ class RelationsTestCase(unittest.HomeserverTestCase):
 
     def test_filter_relation_type(self) -> None:
         # Messages which have annotations.
-        filter = {"io.element.relation_types": [RelationTypes.ANNOTATION]}
+        filter = {"related_by_rel_types": [RelationTypes.ANNOTATION]}
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 1, chunk)
         self.assertEqual(chunk[0]["event_id"], self.event_id_1)
 
         # Messages which have references.
-        filter = {"io.element.relation_types": [RelationTypes.REFERENCE]}
+        filter = {"related_by_rel_types": [RelationTypes.REFERENCE]}
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 1, chunk)
         self.assertEqual(chunk[0]["event_id"], self.event_id_2)
 
         # Messages which have either annotations or references.
         filter = {
-            "io.element.relation_types": [
+            "related_by_rel_types": [
                 RelationTypes.ANNOTATION,
                 RelationTypes.REFERENCE,
             ]
@@ -2191,8 +2189,8 @@ class RelationsTestCase(unittest.HomeserverTestCase):
     def test_filter_relation_senders_and_type(self) -> None:
         # Messages which second user reacted to.
         filter = {
-            "io.element.relation_senders": [self.second_user_id],
-            "io.element.relation_types": [RelationTypes.ANNOTATION],
+            "related_by_senders": [self.second_user_id],
+            "related_by_rel_types": [RelationTypes.ANNOTATION],
         }
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 1, chunk)
diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py
index 58f1ea11b7..e7de67e3a3 100644
--- a/tests/rest/client/test_third_party_rules.py
+++ b/tests/rest/client/test_third_party_rules.py
@@ -775,3 +775,124 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
         self.assertEqual(args[0], user_id)
         self.assertFalse(args[1])
         self.assertTrue(args[2])
+
+    def test_check_can_deactivate_user(self) -> None:
+        """Tests that the on_user_deactivation_status_changed module callback is called
+        correctly when processing a user's deactivation.
+        """
+        # Register a mocked callback.
+        deactivation_mock = Mock(return_value=make_awaitable(False))
+        third_party_rules = self.hs.get_third_party_event_rules()
+        third_party_rules._check_can_deactivate_user_callbacks.append(
+            deactivation_mock,
+        )
+
+        # Register a user that we'll deactivate.
+        user_id = self.register_user("altan", "password")
+        tok = self.login("altan", "password")
+
+        # Deactivate that user.
+        channel = self.make_request(
+            "POST",
+            "/_matrix/client/v3/account/deactivate",
+            {
+                "auth": {
+                    "type": LoginType.PASSWORD,
+                    "password": "password",
+                    "identifier": {
+                        "type": "m.id.user",
+                        "user": user_id,
+                    },
+                },
+                "erase": True,
+            },
+            access_token=tok,
+        )
+
+        # Check that the deactivation was blocked
+        self.assertEqual(channel.code, 403, channel.json_body)
+
+        # Check that the mock was called once.
+        deactivation_mock.assert_called_once()
+        args = deactivation_mock.call_args[0]
+
+        # Check that the mock was called with the right user ID
+        self.assertEqual(args[0], user_id)
+
+        # Check that the request was not made by an admin
+        self.assertEqual(args[1], False)
+
+    def test_check_can_deactivate_user_admin(self) -> None:
+        """Tests that the on_user_deactivation_status_changed module callback is called
+        correctly when processing a user's deactivation triggered by a server admin.
+        """
+        # Register a mocked callback.
+        deactivation_mock = Mock(return_value=make_awaitable(False))
+        third_party_rules = self.hs.get_third_party_event_rules()
+        third_party_rules._check_can_deactivate_user_callbacks.append(
+            deactivation_mock,
+        )
+
+        # Register an admin user.
+        self.register_user("admin", "password", admin=True)
+        admin_tok = self.login("admin", "password")
+
+        # Register a user that we'll deactivate.
+        user_id = self.register_user("altan", "password")
+
+        # Deactivate the user.
+        channel = self.make_request(
+            "PUT",
+            "/_synapse/admin/v2/users/%s" % user_id,
+            {"deactivated": True},
+            access_token=admin_tok,
+        )
+
+        # Check that the deactivation was blocked
+        self.assertEqual(channel.code, 403, channel.json_body)
+
+        # Check that the mock was called once.
+        deactivation_mock.assert_called_once()
+        args = deactivation_mock.call_args[0]
+
+        # Check that the mock was called with the right user ID
+        self.assertEqual(args[0], user_id)
+
+        # Check that the mock was made by an admin
+        self.assertEqual(args[1], True)
+
+    def test_check_can_shutdown_room(self) -> None:
+        """Tests that the check_can_shutdown_room module callback is called
+        correctly when processing an admin's shutdown room request.
+        """
+        # Register a mocked callback.
+        shutdown_mock = Mock(return_value=make_awaitable(False))
+        third_party_rules = self.hs.get_third_party_event_rules()
+        third_party_rules._check_can_shutdown_room_callbacks.append(
+            shutdown_mock,
+        )
+
+        # Register an admin user.
+        admin_user_id = self.register_user("admin", "password", admin=True)
+        admin_tok = self.login("admin", "password")
+
+        # Shutdown the room.
+        channel = self.make_request(
+            "DELETE",
+            "/_synapse/admin/v2/rooms/%s" % self.room_id,
+            {},
+            access_token=admin_tok,
+        )
+
+        # Check that the shutdown was blocked
+        self.assertEqual(channel.code, 403, channel.json_body)
+
+        # Check that the mock was called once.
+        shutdown_mock.assert_called_once()
+        args = shutdown_mock.call_args[0]
+
+        # Check that the mock was called with the right user ID
+        self.assertEqual(args[0], admin_user_id)
+
+        # Check that the mock was called with the right room ID
+        self.assertEqual(args[1], self.room_id)
diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py
index 3b5747cb12..8d8251b2ac 100644
--- a/tests/rest/client/test_transactions.py
+++ b/tests/rest/client/test_transactions.py
@@ -1,3 +1,18 @@
+# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from http import HTTPStatus
 from unittest.mock import Mock, call
 
 from twisted.internet import defer, reactor
@@ -11,14 +26,14 @@ from tests.utils import MockClock
 
 
 class HttpTransactionCacheTestCase(unittest.TestCase):
-    def setUp(self):
+    def setUp(self) -> None:
         self.clock = MockClock()
         self.hs = Mock()
         self.hs.get_clock = Mock(return_value=self.clock)
         self.hs.get_auth = Mock()
         self.cache = HttpTransactionCache(self.hs)
 
-        self.mock_http_response = (200, "GOOD JOB!")
+        self.mock_http_response = (HTTPStatus.OK, "GOOD JOB!")
         self.mock_key = "foo"
 
     @defer.inlineCallbacks
diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py
index 4672a68596..978c252f84 100644
--- a/tests/rest/key/v2/test_remote_key_resource.py
+++ b/tests/rest/key/v2/test_remote_key_resource.py
@@ -13,19 +13,24 @@
 # limitations under the License.
 import urllib.parse
 from io import BytesIO, StringIO
+from typing import Any, Dict, Optional, Union
 from unittest.mock import Mock
 
 import signedjson.key
 from canonicaljson import encode_canonical_json
-from nacl.signing import SigningKey
 from signedjson.sign import sign_json
+from signedjson.types import SigningKey
 
-from twisted.web.resource import NoResource
+from twisted.test.proto_helpers import MemoryReactor
+from twisted.web.resource import NoResource, Resource
 
 from synapse.crypto.keyring import PerspectivesKeyFetcher
 from synapse.http.site import SynapseRequest
 from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.server import HomeServer
 from synapse.storage.keys import FetchKeyResult
+from synapse.types import JsonDict
+from synapse.util import Clock
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.stringutils import random_string
 
@@ -35,11 +40,11 @@ from tests.utils import default_config
 
 
 class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase):
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
         self.http_client = Mock()
         return self.setup_test_homeserver(federation_http_client=self.http_client)
 
-    def create_test_resource(self):
+    def create_test_resource(self) -> Resource:
         return create_resource_tree(
             {"/_matrix/key/v2": KeyApiV2Resource(self.hs)}, root_resource=NoResource()
         )
@@ -51,7 +56,12 @@ class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase):
         Tell the mock http client to expect an outgoing GET request for the given key
         """
 
-        async def get_json(destination, path, ignore_backoff=False, **kwargs):
+        async def get_json(
+            destination: str,
+            path: str,
+            ignore_backoff: bool = False,
+            **kwargs: Any,
+        ) -> Union[JsonDict, list]:
             self.assertTrue(ignore_backoff)
             self.assertEqual(destination, server_name)
             key_id = "%s:%s" % (signing_key.alg, signing_key.version)
@@ -84,7 +94,8 @@ class RemoteKeyResourceTestCase(BaseRemoteKeyResourceTestCase):
         Checks that the response is a 200 and returns the decoded json body.
         """
         channel = FakeChannel(self.site, self.reactor)
-        req = SynapseRequest(channel, self.site)
+        # channel is a `FakeChannel` but `HTTPChannel` is expected
+        req = SynapseRequest(channel, self.site)  # type: ignore[arg-type]
         req.content = BytesIO(b"")
         req.requestReceived(
             b"GET",
@@ -97,7 +108,7 @@ class RemoteKeyResourceTestCase(BaseRemoteKeyResourceTestCase):
         resp = channel.json_body
         return resp
 
-    def test_get_key(self):
+    def test_get_key(self) -> None:
         """Fetch a remote key"""
         SERVER_NAME = "remote.server"
         testkey = signedjson.key.generate_signing_key("ver1")
@@ -114,7 +125,7 @@ class RemoteKeyResourceTestCase(BaseRemoteKeyResourceTestCase):
         self.assertIn(SERVER_NAME, keys[0]["signatures"])
         self.assertIn(self.hs.hostname, keys[0]["signatures"])
 
-    def test_get_own_key(self):
+    def test_get_own_key(self) -> None:
         """Fetch our own key"""
         testkey = signedjson.key.generate_signing_key("ver1")
         self.expect_outgoing_key_request(self.hs.hostname, testkey)
@@ -141,7 +152,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
     endpoint, to check that the two implementations are compatible.
     """
 
-    def default_config(self):
+    def default_config(self) -> Dict[str, Any]:
         config = super().default_config()
 
         # replace the signing key with our own
@@ -152,7 +163,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
 
         return config
 
-    def prepare(self, reactor, clock, homeserver):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         # make a second homeserver, configured to use the first one as a key notary
         self.http_client2 = Mock()
         config = default_config(name="keyclient")
@@ -175,7 +186,9 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
 
         # wire up outbound POST /key/v2/query requests from hs2 so that they
         # will be forwarded to hs1
-        async def post_json(destination, path, data):
+        async def post_json(
+            destination: str, path: str, data: Optional[JsonDict] = None
+        ) -> Union[JsonDict, list]:
             self.assertEqual(destination, self.hs.hostname)
             self.assertEqual(
                 path,
@@ -183,7 +196,8 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
             )
 
             channel = FakeChannel(self.site, self.reactor)
-            req = SynapseRequest(channel, self.site)
+            # channel is a `FakeChannel` but `HTTPChannel` is expected
+            req = SynapseRequest(channel, self.site)  # type: ignore[arg-type]
             req.content = BytesIO(encode_canonical_json(data))
 
             req.requestReceived(
@@ -198,7 +212,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
 
         self.http_client2.post_json.side_effect = post_json
 
-    def test_get_key(self):
+    def test_get_key(self) -> None:
         """Fetch a key belonging to a random server"""
         # make up a key to be fetched.
         testkey = signedjson.key.generate_signing_key("abc")
@@ -218,7 +232,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
             signedjson.key.encode_verify_key_base64(testkey.verify_key),
         )
 
-    def test_get_notary_key(self):
+    def test_get_notary_key(self) -> None:
         """Fetch a key belonging to the notary server"""
         # make up a key to be fetched. We randomise the keyid to try to get it to
         # appear before the key server signing key sometimes (otherwise we bail out
@@ -240,7 +254,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
             signedjson.key.encode_verify_key_base64(testkey.verify_key),
         )
 
-    def test_get_notary_keyserver_key(self):
+    def test_get_notary_keyserver_key(self) -> None:
         """Fetch the notary's keyserver key"""
         # we expect hs1 to make a regular key request to itself
         self.expect_outgoing_key_request(self.hs.hostname, self.hs_signing_key)
diff --git a/tests/rest/media/v1/test_base.py b/tests/rest/media/v1/test_base.py
index f761e23f1b..c73179151a 100644
--- a/tests/rest/media/v1/test_base.py
+++ b/tests/rest/media/v1/test_base.py
@@ -28,11 +28,11 @@ class GetFileNameFromHeadersTests(unittest.TestCase):
         b"inline; filename*=utf-8''foo%C2%A3bar": "foo£bar",
     }
 
-    def tests(self):
+    def tests(self) -> None:
         for hdr, expected in self.TEST_CASES.items():
             res = get_filename_from_headers({b"Content-Disposition": [hdr]})
             self.assertEqual(
                 res,
                 expected,
-                "expected output for %s to be %s but was %s" % (hdr, expected, res),
+                f"expected output for {hdr!r} to be {expected} but was {res}",
             )
diff --git a/tests/rest/media/v1/test_filepath.py b/tests/rest/media/v1/test_filepath.py
index 913bc530aa..43e6f0f70a 100644
--- a/tests/rest/media/v1/test_filepath.py
+++ b/tests/rest/media/v1/test_filepath.py
@@ -21,12 +21,12 @@ from tests import unittest
 
 
 class MediaFilePathsTestCase(unittest.TestCase):
-    def setUp(self):
+    def setUp(self) -> None:
         super().setUp()
 
         self.filepaths = MediaFilePaths("/media_store")
 
-    def test_local_media_filepath(self):
+    def test_local_media_filepath(self) -> None:
         """Test local media paths"""
         self.assertEqual(
             self.filepaths.local_media_filepath_rel("GerZNDnDZVjsOtardLuwfIBg"),
@@ -37,7 +37,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "/media_store/local_content/Ge/rZ/NDnDZVjsOtardLuwfIBg",
         )
 
-    def test_local_media_thumbnail(self):
+    def test_local_media_thumbnail(self) -> None:
         """Test local media thumbnail paths"""
         self.assertEqual(
             self.filepaths.local_media_thumbnail_rel(
@@ -52,14 +52,14 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "/media_store/local_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale",
         )
 
-    def test_local_media_thumbnail_dir(self):
+    def test_local_media_thumbnail_dir(self) -> None:
         """Test local media thumbnail directory paths"""
         self.assertEqual(
             self.filepaths.local_media_thumbnail_dir("GerZNDnDZVjsOtardLuwfIBg"),
             "/media_store/local_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg",
         )
 
-    def test_remote_media_filepath(self):
+    def test_remote_media_filepath(self) -> None:
         """Test remote media paths"""
         self.assertEqual(
             self.filepaths.remote_media_filepath_rel(
@@ -74,7 +74,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "/media_store/remote_content/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg",
         )
 
-    def test_remote_media_thumbnail(self):
+    def test_remote_media_thumbnail(self) -> None:
         """Test remote media thumbnail paths"""
         self.assertEqual(
             self.filepaths.remote_media_thumbnail_rel(
@@ -99,7 +99,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "/media_store/remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale",
         )
 
-    def test_remote_media_thumbnail_legacy(self):
+    def test_remote_media_thumbnail_legacy(self) -> None:
         """Test old-style remote media thumbnail paths"""
         self.assertEqual(
             self.filepaths.remote_media_thumbnail_rel_legacy(
@@ -108,7 +108,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg",
         )
 
-    def test_remote_media_thumbnail_dir(self):
+    def test_remote_media_thumbnail_dir(self) -> None:
         """Test remote media thumbnail directory paths"""
         self.assertEqual(
             self.filepaths.remote_media_thumbnail_dir(
@@ -117,7 +117,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "/media_store/remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg",
         )
 
-    def test_url_cache_filepath(self):
+    def test_url_cache_filepath(self) -> None:
         """Test URL cache paths"""
         self.assertEqual(
             self.filepaths.url_cache_filepath_rel("2020-01-02_GerZNDnDZVjsOtar"),
@@ -128,7 +128,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "/media_store/url_cache/2020-01-02/GerZNDnDZVjsOtar",
         )
 
-    def test_url_cache_filepath_legacy(self):
+    def test_url_cache_filepath_legacy(self) -> None:
         """Test old-style URL cache paths"""
         self.assertEqual(
             self.filepaths.url_cache_filepath_rel("GerZNDnDZVjsOtardLuwfIBg"),
@@ -139,7 +139,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "/media_store/url_cache/Ge/rZ/NDnDZVjsOtardLuwfIBg",
         )
 
-    def test_url_cache_filepath_dirs_to_delete(self):
+    def test_url_cache_filepath_dirs_to_delete(self) -> None:
         """Test URL cache cleanup paths"""
         self.assertEqual(
             self.filepaths.url_cache_filepath_dirs_to_delete(
@@ -148,7 +148,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             ["/media_store/url_cache/2020-01-02"],
         )
 
-    def test_url_cache_filepath_dirs_to_delete_legacy(self):
+    def test_url_cache_filepath_dirs_to_delete_legacy(self) -> None:
         """Test old-style URL cache cleanup paths"""
         self.assertEqual(
             self.filepaths.url_cache_filepath_dirs_to_delete(
@@ -160,7 +160,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             ],
         )
 
-    def test_url_cache_thumbnail(self):
+    def test_url_cache_thumbnail(self) -> None:
         """Test URL cache thumbnail paths"""
         self.assertEqual(
             self.filepaths.url_cache_thumbnail_rel(
@@ -175,7 +175,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "/media_store/url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar/800-600-image-jpeg-scale",
         )
 
-    def test_url_cache_thumbnail_legacy(self):
+    def test_url_cache_thumbnail_legacy(self) -> None:
         """Test old-style URL cache thumbnail paths"""
         self.assertEqual(
             self.filepaths.url_cache_thumbnail_rel(
@@ -190,7 +190,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "/media_store/url_cache_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale",
         )
 
-    def test_url_cache_thumbnail_directory(self):
+    def test_url_cache_thumbnail_directory(self) -> None:
         """Test URL cache thumbnail directory paths"""
         self.assertEqual(
             self.filepaths.url_cache_thumbnail_directory_rel(
@@ -203,7 +203,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "/media_store/url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar",
         )
 
-    def test_url_cache_thumbnail_directory_legacy(self):
+    def test_url_cache_thumbnail_directory_legacy(self) -> None:
         """Test old-style URL cache thumbnail directory paths"""
         self.assertEqual(
             self.filepaths.url_cache_thumbnail_directory_rel(
@@ -216,7 +216,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             "/media_store/url_cache_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg",
         )
 
-    def test_url_cache_thumbnail_dirs_to_delete(self):
+    def test_url_cache_thumbnail_dirs_to_delete(self) -> None:
         """Test URL cache thumbnail cleanup paths"""
         self.assertEqual(
             self.filepaths.url_cache_thumbnail_dirs_to_delete(
@@ -228,7 +228,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             ],
         )
 
-    def test_url_cache_thumbnail_dirs_to_delete_legacy(self):
+    def test_url_cache_thumbnail_dirs_to_delete_legacy(self) -> None:
         """Test old-style URL cache thumbnail cleanup paths"""
         self.assertEqual(
             self.filepaths.url_cache_thumbnail_dirs_to_delete(
@@ -241,7 +241,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             ],
         )
 
-    def test_server_name_validation(self):
+    def test_server_name_validation(self) -> None:
         """Test validation of server names"""
         self._test_path_validation(
             [
@@ -274,7 +274,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             ],
         )
 
-    def test_file_id_validation(self):
+    def test_file_id_validation(self) -> None:
         """Test validation of local, remote and legacy URL cache file / media IDs"""
         # File / media IDs get split into three parts to form paths, consisting of the
         # first two characters, next two characters and rest of the ID.
@@ -357,7 +357,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             invalid_values=invalid_file_ids,
         )
 
-    def test_url_cache_media_id_validation(self):
+    def test_url_cache_media_id_validation(self) -> None:
         """Test validation of URL cache media IDs"""
         self._test_path_validation(
             [
@@ -387,7 +387,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             ],
         )
 
-    def test_content_type_validation(self):
+    def test_content_type_validation(self) -> None:
         """Test validation of thumbnail content types"""
         self._test_path_validation(
             [
@@ -410,7 +410,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
             ],
         )
 
-    def test_thumbnail_method_validation(self):
+    def test_thumbnail_method_validation(self) -> None:
         """Test validation of thumbnail methods"""
         self._test_path_validation(
             [
@@ -440,7 +440,7 @@ class MediaFilePathsTestCase(unittest.TestCase):
         parameter: str,
         valid_values: Iterable[str],
         invalid_values: Iterable[str],
-    ):
+    ) -> None:
         """Test that the specified methods validate the named parameter as expected
 
         Args:
diff --git a/tests/rest/media/v1/test_html_preview.py b/tests/rest/media/v1/test_html_preview.py
index a4b57e3d1f..62e308814d 100644
--- a/tests/rest/media/v1/test_html_preview.py
+++ b/tests/rest/media/v1/test_html_preview.py
@@ -16,7 +16,6 @@ from synapse.rest.media.v1.preview_html import (
     _get_html_media_encodings,
     decode_body,
     parse_html_to_open_graph,
-    rebase_url,
     summarize_paragraphs,
 )
 
@@ -32,7 +31,7 @@ class SummarizeTestCase(unittest.TestCase):
     if not lxml:
         skip = "url preview feature requires lxml"
 
-    def test_long_summarize(self):
+    def test_long_summarize(self) -> None:
         example_paras = [
             """Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:
             Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in
@@ -90,7 +89,7 @@ class SummarizeTestCase(unittest.TestCase):
             " Tromsøya had a population of 36,088. Substantial parts of the urban…",
         )
 
-    def test_short_summarize(self):
+    def test_short_summarize(self) -> None:
         example_paras = [
             "Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:"
             " Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in"
@@ -117,7 +116,7 @@ class SummarizeTestCase(unittest.TestCase):
             " most of the year.",
         )
 
-    def test_small_then_large_summarize(self):
+    def test_small_then_large_summarize(self) -> None:
         example_paras = [
             "Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:"
             " Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in"
@@ -150,7 +149,7 @@ class CalcOgTestCase(unittest.TestCase):
     if not lxml:
         skip = "url preview feature requires lxml"
 
-    def test_simple(self):
+    def test_simple(self) -> None:
         html = b"""
         <html>
         <head><title>Foo</title></head>
@@ -161,11 +160,11 @@ class CalcOgTestCase(unittest.TestCase):
         """
 
         tree = decode_body(html, "http://example.com/test.html")
-        og = parse_html_to_open_graph(tree, "http://example.com/test.html")
+        og = parse_html_to_open_graph(tree)
 
         self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
 
-    def test_comment(self):
+    def test_comment(self) -> None:
         html = b"""
         <html>
         <head><title>Foo</title></head>
@@ -177,11 +176,11 @@ class CalcOgTestCase(unittest.TestCase):
         """
 
         tree = decode_body(html, "http://example.com/test.html")
-        og = parse_html_to_open_graph(tree, "http://example.com/test.html")
+        og = parse_html_to_open_graph(tree)
 
         self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
 
-    def test_comment2(self):
+    def test_comment2(self) -> None:
         html = b"""
         <html>
         <head><title>Foo</title></head>
@@ -196,7 +195,7 @@ class CalcOgTestCase(unittest.TestCase):
         """
 
         tree = decode_body(html, "http://example.com/test.html")
-        og = parse_html_to_open_graph(tree, "http://example.com/test.html")
+        og = parse_html_to_open_graph(tree)
 
         self.assertEqual(
             og,
@@ -206,7 +205,7 @@ class CalcOgTestCase(unittest.TestCase):
             },
         )
 
-    def test_script(self):
+    def test_script(self) -> None:
         html = b"""
         <html>
         <head><title>Foo</title></head>
@@ -218,11 +217,11 @@ class CalcOgTestCase(unittest.TestCase):
         """
 
         tree = decode_body(html, "http://example.com/test.html")
-        og = parse_html_to_open_graph(tree, "http://example.com/test.html")
+        og = parse_html_to_open_graph(tree)
 
         self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
 
-    def test_missing_title(self):
+    def test_missing_title(self) -> None:
         html = b"""
         <html>
         <body>
@@ -232,11 +231,11 @@ class CalcOgTestCase(unittest.TestCase):
         """
 
         tree = decode_body(html, "http://example.com/test.html")
-        og = parse_html_to_open_graph(tree, "http://example.com/test.html")
+        og = parse_html_to_open_graph(tree)
 
         self.assertEqual(og, {"og:title": None, "og:description": "Some text."})
 
-    def test_h1_as_title(self):
+    def test_h1_as_title(self) -> None:
         html = b"""
         <html>
         <meta property="og:description" content="Some text."/>
@@ -247,11 +246,11 @@ class CalcOgTestCase(unittest.TestCase):
         """
 
         tree = decode_body(html, "http://example.com/test.html")
-        og = parse_html_to_open_graph(tree, "http://example.com/test.html")
+        og = parse_html_to_open_graph(tree)
 
         self.assertEqual(og, {"og:title": "Title", "og:description": "Some text."})
 
-    def test_missing_title_and_broken_h1(self):
+    def test_missing_title_and_broken_h1(self) -> None:
         html = b"""
         <html>
         <body>
@@ -262,23 +261,23 @@ class CalcOgTestCase(unittest.TestCase):
         """
 
         tree = decode_body(html, "http://example.com/test.html")
-        og = parse_html_to_open_graph(tree, "http://example.com/test.html")
+        og = parse_html_to_open_graph(tree)
 
         self.assertEqual(og, {"og:title": None, "og:description": "Some text."})
 
-    def test_empty(self):
+    def test_empty(self) -> None:
         """Test a body with no data in it."""
         html = b""
         tree = decode_body(html, "http://example.com/test.html")
         self.assertIsNone(tree)
 
-    def test_no_tree(self):
+    def test_no_tree(self) -> None:
         """A valid body with no tree in it."""
         html = b"\x00"
         tree = decode_body(html, "http://example.com/test.html")
         self.assertIsNone(tree)
 
-    def test_xml(self):
+    def test_xml(self) -> None:
         """Test decoding XML and ensure it works properly."""
         # Note that the strip() call is important to ensure the xml tag starts
         # at the initial byte.
@@ -290,10 +289,10 @@ class CalcOgTestCase(unittest.TestCase):
         <head><title>Foo</title></head><body>Some text.</body></html>
         """.strip()
         tree = decode_body(html, "http://example.com/test.html")
-        og = parse_html_to_open_graph(tree, "http://example.com/test.html")
+        og = parse_html_to_open_graph(tree)
         self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
 
-    def test_invalid_encoding(self):
+    def test_invalid_encoding(self) -> None:
         """An invalid character encoding should be ignored and treated as UTF-8, if possible."""
         html = b"""
         <html>
@@ -304,10 +303,10 @@ class CalcOgTestCase(unittest.TestCase):
         </html>
         """
         tree = decode_body(html, "http://example.com/test.html", "invalid-encoding")
-        og = parse_html_to_open_graph(tree, "http://example.com/test.html")
+        og = parse_html_to_open_graph(tree)
         self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
 
-    def test_invalid_encoding2(self):
+    def test_invalid_encoding2(self) -> None:
         """A body which doesn't match the sent character encoding."""
         # Note that this contains an invalid UTF-8 sequence in the title.
         html = b"""
@@ -319,10 +318,10 @@ class CalcOgTestCase(unittest.TestCase):
         </html>
         """
         tree = decode_body(html, "http://example.com/test.html")
-        og = parse_html_to_open_graph(tree, "http://example.com/test.html")
+        og = parse_html_to_open_graph(tree)
         self.assertEqual(og, {"og:title": "ÿÿ Foo", "og:description": "Some text."})
 
-    def test_windows_1252(self):
+    def test_windows_1252(self) -> None:
         """A body which uses cp1252, but doesn't declare that."""
         html = b"""
         <html>
@@ -333,12 +332,12 @@ class CalcOgTestCase(unittest.TestCase):
         </html>
         """
         tree = decode_body(html, "http://example.com/test.html")
-        og = parse_html_to_open_graph(tree, "http://example.com/test.html")
+        og = parse_html_to_open_graph(tree)
         self.assertEqual(og, {"og:title": "ó", "og:description": "Some text."})
 
 
 class MediaEncodingTestCase(unittest.TestCase):
-    def test_meta_charset(self):
+    def test_meta_charset(self) -> None:
         """A character encoding is found via the meta tag."""
         encodings = _get_html_media_encodings(
             b"""
@@ -363,7 +362,7 @@ class MediaEncodingTestCase(unittest.TestCase):
         )
         self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"])
 
-    def test_meta_charset_underscores(self):
+    def test_meta_charset_underscores(self) -> None:
         """A character encoding contains underscore."""
         encodings = _get_html_media_encodings(
             b"""
@@ -376,7 +375,7 @@ class MediaEncodingTestCase(unittest.TestCase):
         )
         self.assertEqual(list(encodings), ["shift_jis", "utf-8", "cp1252"])
 
-    def test_xml_encoding(self):
+    def test_xml_encoding(self) -> None:
         """A character encoding is found via the meta tag."""
         encodings = _get_html_media_encodings(
             b"""
@@ -388,7 +387,7 @@ class MediaEncodingTestCase(unittest.TestCase):
         )
         self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"])
 
-    def test_meta_xml_encoding(self):
+    def test_meta_xml_encoding(self) -> None:
         """Meta tags take precedence over XML encoding."""
         encodings = _get_html_media_encodings(
             b"""
@@ -402,7 +401,7 @@ class MediaEncodingTestCase(unittest.TestCase):
         )
         self.assertEqual(list(encodings), ["utf-16", "ascii", "utf-8", "cp1252"])
 
-    def test_content_type(self):
+    def test_content_type(self) -> None:
         """A character encoding is found via the Content-Type header."""
         # Test a few variations of the header.
         headers = (
@@ -417,12 +416,12 @@ class MediaEncodingTestCase(unittest.TestCase):
             encodings = _get_html_media_encodings(b"", header)
             self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"])
 
-    def test_fallback(self):
+    def test_fallback(self) -> None:
         """A character encoding cannot be found in the body or header."""
         encodings = _get_html_media_encodings(b"", "text/html")
         self.assertEqual(list(encodings), ["utf-8", "cp1252"])
 
-    def test_duplicates(self):
+    def test_duplicates(self) -> None:
         """Ensure each encoding is only attempted once."""
         encodings = _get_html_media_encodings(
             b"""
@@ -436,7 +435,7 @@ class MediaEncodingTestCase(unittest.TestCase):
         )
         self.assertEqual(list(encodings), ["utf-8", "cp1252"])
 
-    def test_unknown_invalid(self):
+    def test_unknown_invalid(self) -> None:
         """A character encoding should be ignored if it is unknown or invalid."""
         encodings = _get_html_media_encodings(
             b"""
@@ -448,34 +447,3 @@ class MediaEncodingTestCase(unittest.TestCase):
             'text/html; charset="invalid"',
         )
         self.assertEqual(list(encodings), ["utf-8", "cp1252"])
-
-
-class RebaseUrlTestCase(unittest.TestCase):
-    def test_relative(self):
-        """Relative URLs should be resolved based on the context of the base URL."""
-        self.assertEqual(
-            rebase_url("subpage", "https://example.com/foo/"),
-            "https://example.com/foo/subpage",
-        )
-        self.assertEqual(
-            rebase_url("sibling", "https://example.com/foo"),
-            "https://example.com/sibling",
-        )
-        self.assertEqual(
-            rebase_url("/bar", "https://example.com/foo/"),
-            "https://example.com/bar",
-        )
-
-    def test_absolute(self):
-        """Absolute URLs should not be modified."""
-        self.assertEqual(
-            rebase_url("https://alice.com/a/", "https://example.com/foo/"),
-            "https://alice.com/a/",
-        )
-
-    def test_data(self):
-        """Data URLs should not be modified."""
-        self.assertEqual(
-            rebase_url("data:,Hello%2C%20World%21", "https://example.com/foo/"),
-            "data:,Hello%2C%20World%21",
-        )
diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py
index cba9be17c4..7204b2dfe0 100644
--- a/tests/rest/media/v1/test_media_storage.py
+++ b/tests/rest/media/v1/test_media_storage.py
@@ -16,7 +16,7 @@ import shutil
 import tempfile
 from binascii import unhexlify
 from io import BytesIO
-from typing import Optional
+from typing import Any, BinaryIO, Dict, List, Optional, Union
 from unittest.mock import Mock
 from urllib import parse
 
@@ -26,18 +26,24 @@ from PIL import Image as Image
 
 from twisted.internet import defer
 from twisted.internet.defer import Deferred
+from twisted.test.proto_helpers import MemoryReactor
 
+from synapse.events import EventBase
 from synapse.events.spamcheck import load_legacy_spam_checkers
 from synapse.logging.context import make_deferred_yieldable
+from synapse.module_api import ModuleApi
 from synapse.rest import admin
 from synapse.rest.client import login
 from synapse.rest.media.v1._base import FileInfo
 from synapse.rest.media.v1.filepath import MediaFilePaths
-from synapse.rest.media.v1.media_storage import MediaStorage
+from synapse.rest.media.v1.media_storage import MediaStorage, ReadableFileWrapper
 from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend
+from synapse.server import HomeServer
+from synapse.types import RoomAlias
+from synapse.util import Clock
 
 from tests import unittest
-from tests.server import FakeSite, make_request
+from tests.server import FakeChannel, FakeSite, make_request
 from tests.test_utils import SMALL_PNG
 from tests.utils import default_config
 
@@ -46,7 +52,7 @@ class MediaStorageTests(unittest.HomeserverTestCase):
 
     needs_threadpool = True
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-")
         self.addCleanup(shutil.rmtree, self.test_dir)
 
@@ -62,7 +68,7 @@ class MediaStorageTests(unittest.HomeserverTestCase):
             hs, self.primary_base_path, self.filepaths, storage_providers
         )
 
-    def test_ensure_media_is_in_local_cache(self):
+    def test_ensure_media_is_in_local_cache(self) -> None:
         media_id = "some_media_id"
         test_body = "Test\n"
 
@@ -105,7 +111,7 @@ class MediaStorageTests(unittest.HomeserverTestCase):
         self.assertEqual(test_body, body)
 
 
-@attr.s(slots=True, frozen=True)
+@attr.s(auto_attribs=True, slots=True, frozen=True)
 class _TestImage:
     """An image for testing thumbnailing with the expected results
 
@@ -121,18 +127,18 @@ class _TestImage:
             a 404 is expected.
     """
 
-    data = attr.ib(type=bytes)
-    content_type = attr.ib(type=bytes)
-    extension = attr.ib(type=bytes)
-    expected_cropped = attr.ib(type=Optional[bytes], default=None)
-    expected_scaled = attr.ib(type=Optional[bytes], default=None)
-    expected_found = attr.ib(default=True, type=bool)
+    data: bytes
+    content_type: bytes
+    extension: bytes
+    expected_cropped: Optional[bytes] = None
+    expected_scaled: Optional[bytes] = None
+    expected_found: bool = True
 
 
 @parameterized_class(
     ("test_image",),
     [
-        # smoll png
+        # small png
         (
             _TestImage(
                 SMALL_PNG,
@@ -193,11 +199,17 @@ class MediaRepoTests(unittest.HomeserverTestCase):
     hijack_auth = True
     user_id = "@test:user"
 
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
 
         self.fetches = []
 
-        def get_file(destination, path, output_stream, args=None, max_size=None):
+        def get_file(
+            destination: str,
+            path: str,
+            output_stream: BinaryIO,
+            args: Optional[Dict[str, Union[str, List[str]]]] = None,
+            max_size: Optional[int] = None,
+        ) -> Deferred:
             """
             Returns tuple[int,dict,str,int] of file length, response headers,
             absolute URI, and response code.
@@ -238,7 +250,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
 
         return hs
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
 
         media_resource = hs.get_media_repository_resource()
         self.download_resource = media_resource.children[b"download"]
@@ -248,8 +260,9 @@ class MediaRepoTests(unittest.HomeserverTestCase):
 
         self.media_id = "example.com/12345"
 
-    def _req(self, content_disposition, include_content_type=True):
-
+    def _req(
+        self, content_disposition: Optional[bytes], include_content_type: bool = True
+    ) -> FakeChannel:
         channel = make_request(
             self.reactor,
             FakeSite(self.download_resource, self.reactor),
@@ -288,7 +301,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
 
         return channel
 
-    def test_handle_missing_content_type(self):
+    def test_handle_missing_content_type(self) -> None:
         channel = self._req(
             b"inline; filename=out" + self.test_image.extension,
             include_content_type=False,
@@ -299,7 +312,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
             headers.getRawHeaders(b"Content-Type"), [b"application/octet-stream"]
         )
 
-    def test_disposition_filename_ascii(self):
+    def test_disposition_filename_ascii(self) -> None:
         """
         If the filename is filename=<ascii> then Synapse will decode it as an
         ASCII string, and use filename= in the response.
@@ -315,7 +328,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
             [b"inline; filename=out" + self.test_image.extension],
         )
 
-    def test_disposition_filenamestar_utf8escaped(self):
+    def test_disposition_filenamestar_utf8escaped(self) -> None:
         """
         If the filename is filename=*utf8''<utf8 escaped> then Synapse will
         correctly decode it as the UTF-8 string, and use filename* in the
@@ -335,7 +348,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
             [b"inline; filename*=utf-8''" + filename + self.test_image.extension],
         )
 
-    def test_disposition_none(self):
+    def test_disposition_none(self) -> None:
         """
         If there is no filename, one isn't passed on in the Content-Disposition
         of the request.
@@ -348,26 +361,26 @@ class MediaRepoTests(unittest.HomeserverTestCase):
         )
         self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None)
 
-    def test_thumbnail_crop(self):
+    def test_thumbnail_crop(self) -> None:
         """Test that a cropped remote thumbnail is available."""
         self._test_thumbnail(
             "crop", self.test_image.expected_cropped, self.test_image.expected_found
         )
 
-    def test_thumbnail_scale(self):
+    def test_thumbnail_scale(self) -> None:
         """Test that a scaled remote thumbnail is available."""
         self._test_thumbnail(
             "scale", self.test_image.expected_scaled, self.test_image.expected_found
         )
 
-    def test_invalid_type(self):
+    def test_invalid_type(self) -> None:
         """An invalid thumbnail type is never available."""
         self._test_thumbnail("invalid", None, False)
 
     @unittest.override_config(
         {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]}
     )
-    def test_no_thumbnail_crop(self):
+    def test_no_thumbnail_crop(self) -> None:
         """
         Override the config to generate only scaled thumbnails, but request a cropped one.
         """
@@ -376,13 +389,13 @@ class MediaRepoTests(unittest.HomeserverTestCase):
     @unittest.override_config(
         {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]}
     )
-    def test_no_thumbnail_scale(self):
+    def test_no_thumbnail_scale(self) -> None:
         """
         Override the config to generate only cropped thumbnails, but request a scaled one.
         """
         self._test_thumbnail("scale", None, False)
 
-    def test_thumbnail_repeated_thumbnail(self):
+    def test_thumbnail_repeated_thumbnail(self) -> None:
         """Test that fetching the same thumbnail works, and deleting the on disk
         thumbnail regenerates it.
         """
@@ -443,7 +456,9 @@ class MediaRepoTests(unittest.HomeserverTestCase):
                 channel.result["body"],
             )
 
-    def _test_thumbnail(self, method, expected_body, expected_found):
+    def _test_thumbnail(
+        self, method: str, expected_body: Optional[bytes], expected_found: bool
+    ) -> None:
         params = "?width=32&height=32&method=" + method
         channel = make_request(
             self.reactor,
@@ -485,7 +500,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
             )
 
     @parameterized.expand([("crop", 16), ("crop", 64), ("scale", 16), ("scale", 64)])
-    def test_same_quality(self, method, desired_size):
+    def test_same_quality(self, method: str, desired_size: int) -> None:
         """Test that choosing between thumbnails with the same quality rating succeeds.
 
         We are not particular about which thumbnail is chosen."""
@@ -521,7 +536,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
             )
         )
 
-    def test_x_robots_tag_header(self):
+    def test_x_robots_tag_header(self) -> None:
         """
         Tests that the `X-Robots-Tag` header is present, which informs web crawlers
         to not index, archive, or follow links in media.
@@ -540,29 +555,38 @@ class TestSpamChecker:
     `evil`.
     """
 
-    def __init__(self, config, api):
+    def __init__(self, config: Dict[str, Any], api: ModuleApi) -> None:
         self.config = config
         self.api = api
 
-    def parse_config(config):
+    def parse_config(config: Dict[str, Any]) -> Dict[str, Any]:
         return config
 
-    async def check_event_for_spam(self, foo):
+    async def check_event_for_spam(self, event: EventBase) -> Union[bool, str]:
         return False  # allow all events
 
-    async def user_may_invite(self, inviter_userid, invitee_userid, room_id):
+    async def user_may_invite(
+        self,
+        inviter_userid: str,
+        invitee_userid: str,
+        room_id: str,
+    ) -> bool:
         return True  # allow all invites
 
-    async def user_may_create_room(self, userid):
+    async def user_may_create_room(self, userid: str) -> bool:
         return True  # allow all room creations
 
-    async def user_may_create_room_alias(self, userid, room_alias):
+    async def user_may_create_room_alias(
+        self, userid: str, room_alias: RoomAlias
+    ) -> bool:
         return True  # allow all room aliases
 
-    async def user_may_publish_room(self, userid, room_id):
+    async def user_may_publish_room(self, userid: str, room_id: str) -> bool:
         return True  # allow publishing of all rooms
 
-    async def check_media_file_for_spam(self, file_wrapper, file_info) -> bool:
+    async def check_media_file_for_spam(
+        self, file_wrapper: ReadableFileWrapper, file_info: FileInfo
+    ) -> bool:
         buf = BytesIO()
         await file_wrapper.write_chunks_to(buf.write)
 
@@ -575,7 +599,7 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase):
         admin.register_servlets,
     ]
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.user = self.register_user("user", "pass")
         self.tok = self.login("user", "pass")
 
@@ -586,7 +610,7 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase):
 
         load_legacy_spam_checkers(hs)
 
-    def default_config(self):
+    def default_config(self) -> Dict[str, Any]:
         config = default_config("test")
 
         config.update(
@@ -602,13 +626,13 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase):
 
         return config
 
-    def test_upload_innocent(self):
+    def test_upload_innocent(self) -> None:
         """Attempt to upload some innocent data that should be allowed."""
         self.helper.upload_media(
             self.upload_resource, SMALL_PNG, tok=self.tok, expect_code=200
         )
 
-    def test_upload_ban(self):
+    def test_upload_ban(self) -> None:
         """Attempt to upload some data that includes bytes "evil", which should
         get rejected by the spam checker.
         """
diff --git a/tests/rest/media/v1/test_oembed.py b/tests/rest/media/v1/test_oembed.py
index 048d0ca44a..f38d7225f8 100644
--- a/tests/rest/media/v1/test_oembed.py
+++ b/tests/rest/media/v1/test_oembed.py
@@ -16,7 +16,7 @@ import json
 
 from twisted.test.proto_helpers import MemoryReactor
 
-from synapse.rest.media.v1.oembed import OEmbedProvider
+from synapse.rest.media.v1.oembed import OEmbedProvider, OEmbedResult
 from synapse.server import HomeServer
 from synapse.types import JsonDict
 from synapse.util import Clock
@@ -25,15 +25,15 @@ from tests.unittest import HomeserverTestCase
 
 
 class OEmbedTests(HomeserverTestCase):
-    def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer):
-        self.oembed = OEmbedProvider(homeserver)
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.oembed = OEmbedProvider(hs)
 
-    def parse_response(self, response: JsonDict):
+    def parse_response(self, response: JsonDict) -> OEmbedResult:
         return self.oembed.parse_oembed_response(
             "https://test", json.dumps(response).encode("utf-8")
         )
 
-    def test_version(self):
+    def test_version(self) -> None:
         """Accept versions that are similar to 1.0 as a string or int (or missing)."""
         for version in ("1.0", 1.0, 1):
             result = self.parse_response({"version": version, "type": "link"})
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
index da2c533260..5148c39874 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -16,16 +16,21 @@ import base64
 import json
 import os
 import re
+from typing import Any, Dict, Optional, Sequence, Tuple, Type
 from urllib.parse import urlencode
 
 from twisted.internet._resolver import HostResolution
 from twisted.internet.address import IPv4Address, IPv6Address
 from twisted.internet.error import DNSLookupError
-from twisted.test.proto_helpers import AccumulatingProtocol
+from twisted.internet.interfaces import IAddress, IResolutionReceiver
+from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor
 
 from synapse.config.oembed import OEmbedEndpointConfig
+from synapse.rest.media.v1.media_repository import MediaRepositoryResource
 from synapse.rest.media.v1.preview_url_resource import IMAGE_CACHE_EXPIRY_MS
+from synapse.server import HomeServer
 from synapse.types import JsonDict
+from synapse.util import Clock
 from synapse.util.stringutils import parse_and_validate_mxc_uri
 
 from tests import unittest
@@ -52,7 +57,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         b"</head></html>"
     )
 
-    def make_homeserver(self, reactor, clock):
+    def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
 
         config = self.default_config()
         config["url_preview_enabled"] = True
@@ -113,22 +118,22 @@ class URLPreviewTests(unittest.HomeserverTestCase):
 
         return hs
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
 
         self.media_repo = hs.get_media_repository_resource()
         self.preview_url = self.media_repo.children[b"preview_url"]
 
-        self.lookups = {}
+        self.lookups: Dict[str, Any] = {}
 
         class Resolver:
             def resolveHostName(
                 _self,
-                resolutionReceiver,
-                hostName,
-                portNumber=0,
-                addressTypes=None,
-                transportSemantics="TCP",
-            ):
+                resolutionReceiver: IResolutionReceiver,
+                hostName: str,
+                portNumber: int = 0,
+                addressTypes: Optional[Sequence[Type[IAddress]]] = None,
+                transportSemantics: str = "TCP",
+            ) -> IResolutionReceiver:
 
                 resolution = HostResolution(hostName)
                 resolutionReceiver.resolutionBegan(resolution)
@@ -140,9 +145,9 @@ class URLPreviewTests(unittest.HomeserverTestCase):
                 resolutionReceiver.resolutionComplete()
                 return resolutionReceiver
 
-        self.reactor.nameResolver = Resolver()
+        self.reactor.nameResolver = Resolver()  # type: ignore[assignment]
 
-    def create_test_resource(self):
+    def create_test_resource(self) -> MediaRepositoryResource:
         return self.hs.get_media_repository_resource()
 
     def _assert_small_png(self, json_body: JsonDict) -> None:
@@ -153,7 +158,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(json_body["og:image:type"], "image/png")
         self.assertEqual(json_body["matrix:image:size"], 67)
 
-    def test_cache_returns_correct_type(self):
+    def test_cache_returns_correct_type(self) -> None:
         self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
 
         channel = self.make_request(
@@ -207,7 +212,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
         )
 
-    def test_non_ascii_preview_httpequiv(self):
+    def test_non_ascii_preview_httpequiv(self) -> None:
         self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
 
         end_content = (
@@ -243,7 +248,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 200)
         self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430")
 
-    def test_video_rejected(self):
+    def test_video_rejected(self) -> None:
         self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
 
         end_content = b"anything"
@@ -279,7 +284,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             },
         )
 
-    def test_audio_rejected(self):
+    def test_audio_rejected(self) -> None:
         self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
 
         end_content = b"anything"
@@ -315,7 +320,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             },
         )
 
-    def test_non_ascii_preview_content_type(self):
+    def test_non_ascii_preview_content_type(self) -> None:
         self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
 
         end_content = (
@@ -350,7 +355,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 200)
         self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430")
 
-    def test_overlong_title(self):
+    def test_overlong_title(self) -> None:
         self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
 
         end_content = (
@@ -387,7 +392,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         # We should only see the `og:description` field, as `title` is too long and should be stripped out
         self.assertCountEqual(["og:description"], res.keys())
 
-    def test_ipaddr(self):
+    def test_ipaddr(self) -> None:
         """
         IP addresses can be previewed directly.
         """
@@ -417,7 +422,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
         )
 
-    def test_blacklisted_ip_specific(self):
+    def test_blacklisted_ip_specific(self) -> None:
         """
         Blacklisted IP addresses, found via DNS, are not spidered.
         """
@@ -438,7 +443,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             },
         )
 
-    def test_blacklisted_ip_range(self):
+    def test_blacklisted_ip_range(self) -> None:
         """
         Blacklisted IP ranges, IPs found over DNS, are not spidered.
         """
@@ -457,7 +462,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             },
         )
 
-    def test_blacklisted_ip_specific_direct(self):
+    def test_blacklisted_ip_specific_direct(self) -> None:
         """
         Blacklisted IP addresses, accessed directly, are not spidered.
         """
@@ -476,7 +481,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         )
         self.assertEqual(channel.code, 403)
 
-    def test_blacklisted_ip_range_direct(self):
+    def test_blacklisted_ip_range_direct(self) -> None:
         """
         Blacklisted IP ranges, accessed directly, are not spidered.
         """
@@ -493,7 +498,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             },
         )
 
-    def test_blacklisted_ip_range_whitelisted_ip(self):
+    def test_blacklisted_ip_range_whitelisted_ip(self) -> None:
         """
         Blacklisted but then subsequently whitelisted IP addresses can be
         spidered.
@@ -526,7 +531,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
         )
 
-    def test_blacklisted_ip_with_external_ip(self):
+    def test_blacklisted_ip_with_external_ip(self) -> None:
         """
         If a hostname resolves a blacklisted IP, even if there's a
         non-blacklisted one, it will be rejected.
@@ -549,7 +554,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             },
         )
 
-    def test_blacklisted_ipv6_specific(self):
+    def test_blacklisted_ipv6_specific(self) -> None:
         """
         Blacklisted IP addresses, found via DNS, are not spidered.
         """
@@ -572,7 +577,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             },
         )
 
-    def test_blacklisted_ipv6_range(self):
+    def test_blacklisted_ipv6_range(self) -> None:
         """
         Blacklisted IP ranges, IPs found over DNS, are not spidered.
         """
@@ -591,7 +596,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             },
         )
 
-    def test_OPTIONS(self):
+    def test_OPTIONS(self) -> None:
         """
         OPTIONS returns the OPTIONS.
         """
@@ -601,7 +606,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 200)
         self.assertEqual(channel.json_body, {})
 
-    def test_accept_language_config_option(self):
+    def test_accept_language_config_option(self) -> None:
         """
         Accept-Language header is sent to the remote server
         """
@@ -652,7 +657,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             server.data,
         )
 
-    def test_data_url(self):
+    def test_data_url(self) -> None:
         """
         Requesting to preview a data URL is not supported.
         """
@@ -675,7 +680,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
 
         self.assertEqual(channel.code, 500)
 
-    def test_inline_data_url(self):
+    def test_inline_data_url(self) -> None:
         """
         An inline image (as a data URL) should be parsed properly.
         """
@@ -712,7 +717,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(channel.code, 200)
         self._assert_small_png(channel.json_body)
 
-    def test_oembed_photo(self):
+    def test_oembed_photo(self) -> None:
         """Test an oEmbed endpoint which returns a 'photo' type which redirects the preview to a new URL."""
         self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
         self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")]
@@ -771,7 +776,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(body["og:url"], "http://twitter.com/matrixdotorg/status/12345")
         self._assert_small_png(body)
 
-    def test_oembed_rich(self):
+    def test_oembed_rich(self) -> None:
         """Test an oEmbed endpoint which returns HTML content via the 'rich' type."""
         self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
 
@@ -817,7 +822,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             },
         )
 
-    def test_oembed_format(self):
+    def test_oembed_format(self) -> None:
         """Test an oEmbed endpoint which requires the format in the URL."""
         self.lookups["www.hulu.com"] = [(IPv4Address, "10.1.2.3")]
 
@@ -866,7 +871,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             },
         )
 
-    def test_oembed_autodiscovery(self):
+    def test_oembed_autodiscovery(self) -> None:
         """
         Autodiscovery works by finding the link in the HTML response and then requesting an oEmbed URL.
         1. Request a preview of a URL which is not known to the oEmbed code.
@@ -962,7 +967,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         )
         self._assert_small_png(body)
 
-    def _download_image(self):
+    def _download_image(self) -> Tuple[str, str]:
         """Downloads an image into the URL cache.
         Returns:
             A (host, media_id) tuple representing the MXC URI of the image.
@@ -995,7 +1000,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertIsNone(_port)
         return host, media_id
 
-    def test_storage_providers_exclude_files(self):
+    def test_storage_providers_exclude_files(self) -> None:
         """Test that files are not stored in or fetched from storage providers."""
         host, media_id = self._download_image()
 
@@ -1037,7 +1042,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             "URL cache file was unexpectedly retrieved from a storage provider",
         )
 
-    def test_storage_providers_exclude_thumbnails(self):
+    def test_storage_providers_exclude_thumbnails(self) -> None:
         """Test that thumbnails are not stored in or fetched from storage providers."""
         host, media_id = self._download_image()
 
@@ -1090,7 +1095,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             "URL cache thumbnail was unexpectedly retrieved from a storage provider",
         )
 
-    def test_cache_expiry(self):
+    def test_cache_expiry(self) -> None:
         """Test that URL cache files and thumbnails are cleaned up properly on expiry."""
         self.preview_url.clock = MockClock()
 
diff --git a/tests/rest/test_health.py b/tests/rest/test_health.py
index 01d48c3860..da325955f8 100644
--- a/tests/rest/test_health.py
+++ b/tests/rest/test_health.py
@@ -11,7 +11,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
+from http import HTTPStatus
 
 from synapse.rest.health import HealthResource
 
@@ -19,12 +19,12 @@ from tests import unittest
 
 
 class HealthCheckTests(unittest.HomeserverTestCase):
-    def create_test_resource(self):
+    def create_test_resource(self) -> HealthResource:
         # replace the JsonResource with a HealthResource.
         return HealthResource()
 
-    def test_health(self):
+    def test_health(self) -> None:
         channel = self.make_request("GET", "/health", shorthand=False)
 
-        self.assertEqual(channel.code, 200)
+        self.assertEqual(channel.code, HTTPStatus.OK)
         self.assertEqual(channel.result["body"], b"OK")
diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py
index 118aa93a32..11f78f52b8 100644
--- a/tests/rest/test_well_known.py
+++ b/tests/rest/test_well_known.py
@@ -11,6 +11,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from http import HTTPStatus
+
 from twisted.web.resource import Resource
 
 from synapse.rest.well_known import well_known_resource
@@ -19,7 +21,7 @@ from tests import unittest
 
 
 class WellKnownTests(unittest.HomeserverTestCase):
-    def create_test_resource(self):
+    def create_test_resource(self) -> Resource:
         # replace the JsonResource with a Resource wrapping the WellKnownResource
         res = Resource()
         res.putChild(b".well-known", well_known_resource(self.hs))
@@ -31,12 +33,12 @@ class WellKnownTests(unittest.HomeserverTestCase):
             "default_identity_server": "https://testis",
         }
     )
-    def test_client_well_known(self):
+    def test_client_well_known(self) -> None:
         channel = self.make_request(
             "GET", "/.well-known/matrix/client", shorthand=False
         )
 
-        self.assertEqual(channel.code, 200)
+        self.assertEqual(channel.code, HTTPStatus.OK)
         self.assertEqual(
             channel.json_body,
             {
@@ -50,27 +52,27 @@ class WellKnownTests(unittest.HomeserverTestCase):
             "public_baseurl": None,
         }
     )
-    def test_client_well_known_no_public_baseurl(self):
+    def test_client_well_known_no_public_baseurl(self) -> None:
         channel = self.make_request(
             "GET", "/.well-known/matrix/client", shorthand=False
         )
 
-        self.assertEqual(channel.code, 404)
+        self.assertEqual(channel.code, HTTPStatus.NOT_FOUND)
 
     @unittest.override_config({"serve_server_wellknown": True})
-    def test_server_well_known(self):
+    def test_server_well_known(self) -> None:
         channel = self.make_request(
             "GET", "/.well-known/matrix/server", shorthand=False
         )
 
-        self.assertEqual(channel.code, 200)
+        self.assertEqual(channel.code, HTTPStatus.OK)
         self.assertEqual(
             channel.json_body,
             {"m.server": "test:443"},
         )
 
-    def test_server_well_known_disabled(self):
+    def test_server_well_known_disabled(self) -> None:
         channel = self.make_request(
             "GET", "/.well-known/matrix/server", shorthand=False
         )
-        self.assertEqual(channel.code, 404)
+        self.assertEqual(channel.code, HTTPStatus.NOT_FOUND)
diff --git a/tests/server.py b/tests/server.py
index 82990c2eb9..6ce2a17bf4 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -54,13 +54,18 @@ from twisted.internet.interfaces import (
     ITransport,
 )
 from twisted.python.failure import Failure
-from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
+from twisted.test.proto_helpers import (
+    AccumulatingProtocol,
+    MemoryReactor,
+    MemoryReactorClock,
+)
 from twisted.web.http_headers import Headers
 from twisted.web.resource import IResource
 from twisted.web.server import Request, Site
 
 from synapse.config.database import DatabaseConnectionConfig
 from synapse.http.site import SynapseRequest
+from synapse.logging.context import ContextResourceUsage
 from synapse.server import HomeServer
 from synapse.storage import DataStore
 from synapse.storage.engines import PostgresEngine, create_engine
@@ -88,18 +93,19 @@ class TimedOutException(Exception):
     """
 
 
-@attr.s
+@attr.s(auto_attribs=True)
 class FakeChannel:
     """
     A fake Twisted Web Channel (the part that interfaces with the
     wire).
     """
 
-    site = attr.ib(type=Union[Site, "FakeSite"])
-    _reactor = attr.ib()
-    result = attr.ib(type=dict, default=attr.Factory(dict))
-    _ip = attr.ib(type=str, default="127.0.0.1")
+    site: Union[Site, "FakeSite"]
+    _reactor: MemoryReactor
+    result: dict = attr.Factory(dict)
+    _ip: str = "127.0.0.1"
     _producer: Optional[Union[IPullProducer, IPushProducer]] = None
+    resource_usage: Optional[ContextResourceUsage] = None
 
     @property
     def json_body(self):
@@ -168,6 +174,8 @@ class FakeChannel:
 
     def requestDone(self, _self):
         self.result["done"] = True
+        if isinstance(_self, SynapseRequest):
+            self.resource_usage = _self.logcontext.get_resource_usage()
 
     def getPeer(self):
         # We give an address so that getClientIP returns a non null entry,
diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py
index 272cd35402..72bf5b3d31 100644
--- a/tests/storage/test_account_data.py
+++ b/tests/storage/test_account_data.py
@@ -47,9 +47,18 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
             expected_ignorer_user_ids,
         )
 
+    def assert_ignored(
+        self, ignorer_user_id: str, expected_ignored_user_ids: Set[str]
+    ) -> None:
+        self.assertEqual(
+            self.get_success(self.store.ignored_users(ignorer_user_id)),
+            expected_ignored_user_ids,
+        )
+
     def test_ignoring_users(self):
         """Basic adding/removing of users from the ignore list."""
         self._update_ignore_list("@other:test", "@another:remote")
+        self.assert_ignored(self.user, {"@other:test", "@another:remote"})
 
         # Check a user which no one ignores.
         self.assert_ignorers("@user:test", set())
@@ -62,6 +71,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
 
         # Add one user, remove one user, and leave one user.
         self._update_ignore_list("@foo:test", "@another:remote")
+        self.assert_ignored(self.user, {"@foo:test", "@another:remote"})
 
         # Check the removed user.
         self.assert_ignorers("@other:test", set())
@@ -76,20 +86,24 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
         """Ensure that caching works properly between different users."""
         # The first user ignores a user.
         self._update_ignore_list("@other:test")
+        self.assert_ignored(self.user, {"@other:test"})
         self.assert_ignorers("@other:test", {self.user})
 
         # The second user ignores them.
         self._update_ignore_list("@other:test", ignorer_user_id="@second:test")
+        self.assert_ignored("@second:test", {"@other:test"})
         self.assert_ignorers("@other:test", {self.user, "@second:test"})
 
         # The first user un-ignores them.
         self._update_ignore_list()
+        self.assert_ignored(self.user, set())
         self.assert_ignorers("@other:test", {"@second:test"})
 
     def test_invalid_data(self):
         """Invalid data ends up clearing out the ignored users list."""
         # Add some data and ensure it is there.
         self._update_ignore_list("@other:test")
+        self.assert_ignored(self.user, {"@other:test"})
         self.assert_ignorers("@other:test", {self.user})
 
         # No ignored_users key.
@@ -102,10 +116,12 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
         )
 
         # No one ignores the user now.
+        self.assert_ignored(self.user, set())
         self.assert_ignorers("@other:test", set())
 
         # Add some data and ensure it is there.
         self._update_ignore_list("@other:test")
+        self.assert_ignored(self.user, {"@other:test"})
         self.assert_ignorers("@other:test", {self.user})
 
         # Invalid data.
@@ -118,4 +134,5 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
         )
 
         # No one ignores the user now.
+        self.assert_ignored(self.user, set())
         self.assert_ignorers("@other:test", set())
diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py
index 39dcc094bd..fd619b64d4 100644
--- a/tests/storage/test_background_update.py
+++ b/tests/storage/test_background_update.py
@@ -14,16 +14,23 @@
 
 from unittest.mock import Mock
 
+import yaml
+
 from twisted.internet.defer import Deferred, ensureDeferred
+from twisted.test.proto_helpers import MemoryReactor
 
+from synapse.server import HomeServer
 from synapse.storage.background_updates import BackgroundUpdater
+from synapse.types import JsonDict
+from synapse.util import Clock
 
 from tests import unittest
 from tests.test_utils import make_awaitable, simple_async_mock
+from tests.unittest import override_config
 
 
 class BackgroundUpdateTestCase(unittest.HomeserverTestCase):
-    def prepare(self, reactor, clock, homeserver):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates
         # the base test class should have run the real bg updates for us
         self.assertTrue(
@@ -34,50 +41,50 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase):
         self.updates.register_background_update_handler(
             "test_update", self.update_handler
         )
+        self.store = self.hs.get_datastores().main
+
+    async def update(self, progress: JsonDict, count: int) -> int:
+        duration_ms = 10
+        await self.clock.sleep((count * duration_ms) / 1000)
+        progress = {"my_key": progress["my_key"] + 1}
+        await self.store.db_pool.runInteraction(
+            "update_progress",
+            self.updates._background_update_progress_txn,
+            "test_update",
+            progress,
+        )
+        return count
 
-    def test_do_background_update(self):
+    def test_do_background_update(self) -> None:
         # the time we claim it takes to update one item when running the update
         duration_ms = 10
 
         # the target runtime for each bg update
         target_background_update_duration_ms = 100
 
-        store = self.hs.get_datastores().main
         self.get_success(
-            store.db_pool.simple_insert(
+            self.store.db_pool.simple_insert(
                 "background_updates",
                 values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
             )
         )
 
-        # first step: make a bit of progress
-        async def update(progress, count):
-            await self.clock.sleep((count * duration_ms) / 1000)
-            progress = {"my_key": progress["my_key"] + 1}
-            await store.db_pool.runInteraction(
-                "update_progress",
-                self.updates._background_update_progress_txn,
-                "test_update",
-                progress,
-            )
-            return count
-
-        self.update_handler.side_effect = update
+        self.update_handler.side_effect = self.update
         self.update_handler.reset_mock()
         res = self.get_success(
             self.updates.do_next_background_update(False),
-            by=0.01,
+            by=0.02,
         )
         self.assertFalse(res)
 
         # on the first call, we should get run with the default background update size
         self.update_handler.assert_called_once_with(
-            {"my_key": 1}, self.updates.MINIMUM_BACKGROUND_BATCH_SIZE
+            {"my_key": 1}, self.updates.default_background_batch_size
         )
 
         # second step: complete the update
         # we should now get run with a much bigger number of items to update
-        async def update(progress, count):
+        async def update(progress: JsonDict, count: int) -> int:
             self.assertEqual(progress, {"my_key": 2})
             self.assertAlmostEqual(
                 count,
@@ -99,16 +106,234 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase):
         self.assertTrue(result)
         self.assertFalse(self.update_handler.called)
 
+    @override_config(
+        yaml.safe_load(
+            """
+            background_updates:
+                default_batch_size: 20
+            """
+        )
+    )
+    def test_background_update_default_batch_set_by_config(self) -> None:
+        """
+        Test that the background update is run with the default_batch_size set by the config
+        """
+
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+            )
+        )
+
+        self.update_handler.side_effect = self.update
+        self.update_handler.reset_mock()
+        res = self.get_success(
+            self.updates.do_next_background_update(False),
+            by=0.01,
+        )
+        self.assertFalse(res)
+
+        # on the first call, we should get run with the default background update size specified in the config
+        self.update_handler.assert_called_once_with({"my_key": 1}, 20)
+
+    def test_background_update_default_sleep_behavior(self) -> None:
+        """
+        Test default background update behavior, which is to sleep
+        """
+
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+            )
+        )
+
+        self.update_handler.side_effect = self.update
+        self.update_handler.reset_mock()
+        self.updates.start_doing_background_updates()
+
+        # 2: advance the reactor less than the default sleep duration (1000ms)
+        self.reactor.pump([0.5])
+        # check that an update has not been run
+        self.update_handler.assert_not_called()
+
+        # advance reactor past default sleep duration
+        self.reactor.pump([1])
+        # check that update has been run
+        self.update_handler.assert_called()
+
+    @override_config(
+        yaml.safe_load(
+            """
+            background_updates:
+                sleep_duration_ms: 500
+            """
+        )
+    )
+    def test_background_update_sleep_set_in_config(self) -> None:
+        """
+        Test that changing the sleep time in the config changes how long it sleeps
+        """
+
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+            )
+        )
+
+        self.update_handler.side_effect = self.update
+        self.update_handler.reset_mock()
+        self.updates.start_doing_background_updates()
+
+        # 2: advance the reactor less than the configured sleep duration (500ms)
+        self.reactor.pump([0.45])
+        # check that an update has not been run
+        self.update_handler.assert_not_called()
+
+        # advance reactor past config sleep duration but less than default duration
+        self.reactor.pump([0.75])
+        # check that update has been run
+        self.update_handler.assert_called()
+
+    @override_config(
+        yaml.safe_load(
+            """
+            background_updates:
+                sleep_enabled: false
+            """
+        )
+    )
+    def test_disabling_background_update_sleep(self) -> None:
+        """
+        Test that disabling sleep in the config results in bg update not sleeping
+        """
+
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+            )
+        )
+
+        self.update_handler.side_effect = self.update
+        self.update_handler.reset_mock()
+        self.updates.start_doing_background_updates()
+
+        # 2: advance the reactor very little
+        self.reactor.pump([0.025])
+        # check that an update has run
+        self.update_handler.assert_called()
+
+    @override_config(
+        yaml.safe_load(
+            """
+            background_updates:
+                background_update_duration_ms: 500
+            """
+        )
+    )
+    def test_background_update_duration_set_in_config(self) -> None:
+        """
+        Test that the desired duration set in the config is used in determining batch size
+        """
+        # Duration of one background update item
+        duration_ms = 10
+
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+            )
+        )
+
+        self.update_handler.side_effect = self.update
+        self.update_handler.reset_mock()
+        res = self.get_success(
+            self.updates.do_next_background_update(False),
+            by=0.02,
+        )
+        self.assertFalse(res)
+
+        # the first update was run with the default batch size, this should be run with 500ms as the
+        # desired duration
+        async def update(progress: JsonDict, count: int) -> int:
+            self.assertEqual(progress, {"my_key": 2})
+            self.assertAlmostEqual(
+                count,
+                500 / duration_ms,
+                places=0,
+            )
+            await self.updates._end_background_update("test_update")
+            return count
+
+        self.update_handler.side_effect = update
+        self.get_success(self.updates.do_next_background_update(False))
+
+    @override_config(
+        yaml.safe_load(
+            """
+            background_updates:
+                min_batch_size: 5
+            """
+        )
+    )
+    def test_background_update_min_batch_set_in_config(self) -> None:
+        """
+        Test that the minimum batch size set in the config is used
+        """
+        # a very long-running individual update
+        duration_ms = 50
+
+        self.get_success(
+            self.store.db_pool.simple_insert(
+                "background_updates",
+                values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+            )
+        )
+
+        # Run the update with the long-running update item
+        async def update_long(progress: JsonDict, count: int) -> int:
+            await self.clock.sleep((count * duration_ms) / 1000)
+            progress = {"my_key": progress["my_key"] + 1}
+            await self.store.db_pool.runInteraction(
+                "update_progress",
+                self.updates._background_update_progress_txn,
+                "test_update",
+                progress,
+            )
+            return count
+
+        self.update_handler.side_effect = update_long
+        self.update_handler.reset_mock()
+        res = self.get_success(
+            self.updates.do_next_background_update(False),
+            by=1,
+        )
+        self.assertFalse(res)
+
+        # the first update was run with the default batch size, this should be run with minimum batch size
+        # as the first items took a very long time
+        async def update_short(progress: JsonDict, count: int) -> int:
+            self.assertEqual(progress, {"my_key": 2})
+            self.assertEqual(count, 5)
+            await self.updates._end_background_update("test_update")
+            return count
+
+        self.update_handler.side_effect = update_short
+        self.get_success(self.updates.do_next_background_update(False))
+
 
 class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase):
-    def prepare(self, reactor, clock, homeserver):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates
         # the base test class should have run the real bg updates for us
         self.assertTrue(
             self.get_success(self.updates.has_completed_background_updates())
         )
 
-        self.update_deferred = Deferred()
+        self.update_deferred: Deferred[int] = Deferred()
         self.update_handler = Mock(return_value=self.update_deferred)
         self.updates.register_background_update_handler(
             "test_update", self.update_handler
@@ -137,7 +362,7 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase):
             ),
         )
 
-    def test_controller(self):
+    def test_controller(self) -> None:
         store = self.hs.get_datastores().main
         self.get_success(
             store.db_pool.simple_insert(
@@ -147,7 +372,7 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase):
         )
 
         # Set the return value for the context manager.
-        enter_defer = Deferred()
+        enter_defer: Deferred[int] = Deferred()
         self._update_ctx_manager.__aenter__ = Mock(return_value=enter_defer)
 
         # Start the background update.
diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py
index 6fbac0ab14..a40fc20ef9 100644
--- a/tests/storage/test_database.py
+++ b/tests/storage/test_database.py
@@ -12,25 +12,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.database import make_tuple_comparison_clause
-from synapse.storage.engines import BaseDatabaseEngine
+from typing import Callable, Tuple
+from unittest.mock import Mock, call
 
-from tests import unittest
+from twisted.internet import defer
+from twisted.internet.defer import CancelledError, Deferred
+from twisted.test.proto_helpers import MemoryReactor
 
+from synapse.server import HomeServer
+from synapse.storage.database import (
+    DatabasePool,
+    LoggingTransaction,
+    make_tuple_comparison_clause,
+)
+from synapse.util import Clock
 
-def _stub_db_engine(**kwargs) -> BaseDatabaseEngine:
-    # returns a DatabaseEngine, circumventing the abc mechanism
-    # any kwargs are set as attributes on the class before instantiating it
-    t = type(
-        "TestBaseDatabaseEngine",
-        (BaseDatabaseEngine,),
-        dict(BaseDatabaseEngine.__dict__),
-    )
-    # defeat the abc mechanism
-    t.__abstractmethods__ = set()
-    for k, v in kwargs.items():
-        setattr(t, k, v)
-    return t(None, None)
+from tests import unittest
 
 
 class TupleComparisonClauseTestCase(unittest.TestCase):
@@ -38,3 +35,150 @@ class TupleComparisonClauseTestCase(unittest.TestCase):
         clause, args = make_tuple_comparison_clause([("a", 1), ("b", 2)])
         self.assertEqual(clause, "(a,b) > (?,?)")
         self.assertEqual(args, [1, 2])
+
+
+class CallbacksTestCase(unittest.HomeserverTestCase):
+    """Tests for transaction callbacks."""
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.db_pool: DatabasePool = self.store.db_pool
+
+    def _run_interaction(
+        self, func: Callable[[LoggingTransaction], object]
+    ) -> Tuple[Mock, Mock]:
+        """Run the given function in a database transaction, with callbacks registered.
+
+        Args:
+            func: The function to be run in a transaction. The transaction will be
+                retried if `func` raises an `OperationalError`.
+
+        Returns:
+            Two mocks, which were registered as an `after_callback` and an
+            `exception_callback` respectively, on every transaction attempt.
+        """
+        after_callback = Mock()
+        exception_callback = Mock()
+
+        def _test_txn(txn: LoggingTransaction) -> None:
+            txn.call_after(after_callback, 123, 456, extra=789)
+            txn.call_on_exception(exception_callback, 987, 654, extra=321)
+            func(txn)
+
+        try:
+            self.get_success_or_raise(
+                self.db_pool.runInteraction("test_transaction", _test_txn)
+            )
+        except Exception:
+            pass
+
+        return after_callback, exception_callback
+
+    def test_after_callback(self) -> None:
+        """Test that the after callback is called when a transaction succeeds."""
+        after_callback, exception_callback = self._run_interaction(lambda txn: None)
+
+        after_callback.assert_called_once_with(123, 456, extra=789)
+        exception_callback.assert_not_called()
+
+    def test_exception_callback(self) -> None:
+        """Test that the exception callback is called when a transaction fails."""
+        _test_txn = Mock(side_effect=ZeroDivisionError)
+        after_callback, exception_callback = self._run_interaction(_test_txn)
+
+        after_callback.assert_not_called()
+        exception_callback.assert_called_once_with(987, 654, extra=321)
+
+    def test_failed_retry(self) -> None:
+        """Test that the exception callback is called for every failed attempt."""
+        # Always raise an `OperationalError`.
+        _test_txn = Mock(side_effect=self.db_pool.engine.module.OperationalError)
+        after_callback, exception_callback = self._run_interaction(_test_txn)
+
+        after_callback.assert_not_called()
+        exception_callback.assert_has_calls(
+            [
+                call(987, 654, extra=321),
+                call(987, 654, extra=321),
+                call(987, 654, extra=321),
+                call(987, 654, extra=321),
+                call(987, 654, extra=321),
+                call(987, 654, extra=321),
+            ]
+        )
+        self.assertEqual(exception_callback.call_count, 6)  # no additional calls
+
+    def test_successful_retry(self) -> None:
+        """Test callbacks for a failed transaction followed by a successful attempt."""
+        # Raise an `OperationalError` on the first attempt only.
+        _test_txn = Mock(
+            side_effect=[self.db_pool.engine.module.OperationalError, None]
+        )
+        after_callback, exception_callback = self._run_interaction(_test_txn)
+
+        # Calling both `after_callback`s when the first attempt failed is rather
+        # surprising (#12184). Let's document the behaviour in a test.
+        after_callback.assert_has_calls(
+            [
+                call(123, 456, extra=789),
+                call(123, 456, extra=789),
+            ]
+        )
+        self.assertEqual(after_callback.call_count, 2)  # no additional calls
+        exception_callback.assert_not_called()
+
+
+class CancellationTestCase(unittest.HomeserverTestCase):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+        self.db_pool: DatabasePool = self.store.db_pool
+
+    def test_after_callback(self) -> None:
+        """Test that the after callback is called when a transaction succeeds."""
+        d: "Deferred[None]"
+        after_callback = Mock()
+        exception_callback = Mock()
+
+        def _test_txn(txn: LoggingTransaction) -> None:
+            txn.call_after(after_callback, 123, 456, extra=789)
+            txn.call_on_exception(exception_callback, 987, 654, extra=321)
+            d.cancel()
+
+        d = defer.ensureDeferred(
+            self.db_pool.runInteraction("test_transaction", _test_txn)
+        )
+        self.get_failure(d, CancelledError)
+
+        after_callback.assert_called_once_with(123, 456, extra=789)
+        exception_callback.assert_not_called()
+
+    def test_exception_callback(self) -> None:
+        """Test that the exception callback is called when a transaction fails."""
+        d: "Deferred[None]"
+        after_callback = Mock()
+        exception_callback = Mock()
+
+        def _test_txn(txn: LoggingTransaction) -> None:
+            txn.call_after(after_callback, 123, 456, extra=789)
+            txn.call_on_exception(exception_callback, 987, 654, extra=321)
+            d.cancel()
+            # Simulate a retryable failure on every attempt.
+            raise self.db_pool.engine.module.OperationalError()
+
+        d = defer.ensureDeferred(
+            self.db_pool.runInteraction("test_transaction", _test_txn)
+        )
+        self.get_failure(d, CancelledError)
+
+        after_callback.assert_not_called()
+        exception_callback.assert_has_calls(
+            [
+                call(987, 654, extra=321),
+                call(987, 654, extra=321),
+                call(987, 654, extra=321),
+                call(987, 654, extra=321),
+                call(987, 654, extra=321),
+                call(987, 654, extra=321),
+            ]
+        )
+        self.assertEqual(exception_callback.call_count, 6)  # no additional calls
diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py
index 6ac4b93f98..395396340b 100644
--- a/tests/storage/test_id_generators.py
+++ b/tests/storage/test_id_generators.py
@@ -13,9 +13,13 @@
 # limitations under the License.
 from typing import List, Optional
 
-from synapse.storage.database import DatabasePool
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
+from synapse.storage.database import DatabasePool, LoggingTransaction
 from synapse.storage.engines import IncorrectDatabaseSetup
 from synapse.storage.util.id_generators import MultiWriterIdGenerator
+from synapse.util import Clock
 
 from tests.unittest import HomeserverTestCase
 from tests.utils import USE_POSTGRES_FOR_TESTS
@@ -25,13 +29,13 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
     if not USE_POSTGRES_FOR_TESTS:
         skip = "Requires Postgres"
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastores().main
         self.db_pool: DatabasePool = self.store.db_pool
 
         self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
 
-    def _setup_db(self, txn):
+    def _setup_db(self, txn: LoggingTransaction) -> None:
         txn.execute("CREATE SEQUENCE foobar_seq")
         txn.execute(
             """
@@ -59,12 +63,12 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
 
         return self.get_success_or_raise(self.db_pool.runWithConnection(_create))
 
-    def _insert_rows(self, instance_name: str, number: int):
+    def _insert_rows(self, instance_name: str, number: int) -> None:
         """Insert N rows as the given instance, inserting with stream IDs pulled
         from the postgres sequence.
         """
 
-        def _insert(txn):
+        def _insert(txn: LoggingTransaction) -> None:
             for _ in range(number):
                 txn.execute(
                     "INSERT INTO foobar VALUES (nextval('foobar_seq'), ?)",
@@ -80,12 +84,12 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
 
         self.get_success(self.db_pool.runInteraction("_insert_rows", _insert))
 
-    def _insert_row_with_id(self, instance_name: str, stream_id: int):
+    def _insert_row_with_id(self, instance_name: str, stream_id: int) -> None:
         """Insert one row as the given instance with given stream_id, updating
         the postgres sequence position to match.
         """
 
-        def _insert(txn):
+        def _insert(txn: LoggingTransaction) -> None:
             txn.execute(
                 "INSERT INTO foobar VALUES (?, ?)",
                 (
@@ -104,7 +108,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
 
         self.get_success(self.db_pool.runInteraction("_insert_row_with_id", _insert))
 
-    def test_empty(self):
+    def test_empty(self) -> None:
         """Test an ID generator against an empty database gives sensible
         current positions.
         """
@@ -114,7 +118,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         # The table is empty so we expect an empty map for positions
         self.assertEqual(id_gen.get_positions(), {})
 
-    def test_single_instance(self):
+    def test_single_instance(self) -> None:
         """Test that reads and writes from a single process are handled
         correctly.
         """
@@ -130,7 +134,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         # Try allocating a new ID gen and check that we only see position
         # advanced after we leave the context manager.
 
-        async def _get_next_async():
+        async def _get_next_async() -> None:
             async with id_gen.get_next() as stream_id:
                 self.assertEqual(stream_id, 8)
 
@@ -142,7 +146,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         self.assertEqual(id_gen.get_positions(), {"master": 8})
         self.assertEqual(id_gen.get_current_token_for_writer("master"), 8)
 
-    def test_out_of_order_finish(self):
+    def test_out_of_order_finish(self) -> None:
         """Test that IDs persisted out of order are correctly handled"""
 
         # Prefill table with 7 rows written by 'master'
@@ -191,7 +195,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         self.assertEqual(id_gen.get_positions(), {"master": 11})
         self.assertEqual(id_gen.get_current_token_for_writer("master"), 11)
 
-    def test_multi_instance(self):
+    def test_multi_instance(self) -> None:
         """Test that reads and writes from multiple processes are handled
         correctly.
         """
@@ -215,7 +219,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         # Try allocating a new ID gen and check that we only see position
         # advanced after we leave the context manager.
 
-        async def _get_next_async():
+        async def _get_next_async() -> None:
             async with first_id_gen.get_next() as stream_id:
                 self.assertEqual(stream_id, 8)
 
@@ -233,7 +237,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         # ... but calling `get_next` on the second instance should give a unique
         # stream ID
 
-        async def _get_next_async():
+        async def _get_next_async2() -> None:
             async with second_id_gen.get_next() as stream_id:
                 self.assertEqual(stream_id, 9)
 
@@ -241,7 +245,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
                     second_id_gen.get_positions(), {"first": 3, "second": 7}
                 )
 
-        self.get_success(_get_next_async())
+        self.get_success(_get_next_async2())
 
         self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 9})
 
@@ -249,7 +253,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         second_id_gen.advance("first", 8)
         self.assertEqual(second_id_gen.get_positions(), {"first": 8, "second": 9})
 
-    def test_get_next_txn(self):
+    def test_get_next_txn(self) -> None:
         """Test that the `get_next_txn` function works correctly."""
 
         # Prefill table with 7 rows written by 'master'
@@ -263,7 +267,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         # Try allocating a new ID gen and check that we only see position
         # advanced after we leave the context manager.
 
-        def _get_next_txn(txn):
+        def _get_next_txn(txn: LoggingTransaction) -> None:
             stream_id = id_gen.get_next_txn(txn)
             self.assertEqual(stream_id, 8)
 
@@ -275,7 +279,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         self.assertEqual(id_gen.get_positions(), {"master": 8})
         self.assertEqual(id_gen.get_current_token_for_writer("master"), 8)
 
-    def test_get_persisted_upto_position(self):
+    def test_get_persisted_upto_position(self) -> None:
         """Test that `get_persisted_upto_position` correctly tracks updates to
         positions.
         """
@@ -317,7 +321,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         id_gen.advance("second", 15)
         self.assertEqual(id_gen.get_persisted_upto_position(), 11)
 
-    def test_get_persisted_upto_position_get_next(self):
+    def test_get_persisted_upto_position_get_next(self) -> None:
         """Test that `get_persisted_upto_position` correctly tracks updates to
         positions when `get_next` is called.
         """
@@ -331,7 +335,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
 
         self.assertEqual(id_gen.get_persisted_upto_position(), 5)
 
-        async def _get_next_async():
+        async def _get_next_async() -> None:
             async with id_gen.get_next() as stream_id:
                 self.assertEqual(stream_id, 6)
                 self.assertEqual(id_gen.get_persisted_upto_position(), 5)
@@ -344,7 +348,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         # `persisted_upto_position` in this case, then it will be correct in the
         # other cases that are tested above (since they'll hit the same code).
 
-    def test_restart_during_out_of_order_persistence(self):
+    def test_restart_during_out_of_order_persistence(self) -> None:
         """Test that restarting a process while another process is writing out
         of order updates are handled correctly.
         """
@@ -388,7 +392,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         id_gen_worker.advance("master", 9)
         self.assertEqual(id_gen_worker.get_positions(), {"master": 9})
 
-    def test_writer_config_change(self):
+    def test_writer_config_change(self) -> None:
         """Test that changing the writer config correctly works."""
 
         self._insert_row_with_id("first", 3)
@@ -421,7 +425,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
 
         # Check that we get a sane next stream ID with this new config.
 
-        async def _get_next_async():
+        async def _get_next_async() -> None:
             async with id_gen_3.get_next() as stream_id:
                 self.assertEqual(stream_id, 6)
 
@@ -435,7 +439,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         self.assertEqual(id_gen_5.get_current_token_for_writer("first"), 6)
         self.assertEqual(id_gen_5.get_current_token_for_writer("third"), 6)
 
-    def test_sequence_consistency(self):
+    def test_sequence_consistency(self) -> None:
         """Test that we error out if the table and sequence diverges."""
 
         # Prefill with some rows
@@ -458,13 +462,13 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
     if not USE_POSTGRES_FOR_TESTS:
         skip = "Requires Postgres"
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastores().main
         self.db_pool: DatabasePool = self.store.db_pool
 
         self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
 
-    def _setup_db(self, txn):
+    def _setup_db(self, txn: LoggingTransaction) -> None:
         txn.execute("CREATE SEQUENCE foobar_seq")
         txn.execute(
             """
@@ -493,10 +497,10 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
 
         return self.get_success(self.db_pool.runWithConnection(_create))
 
-    def _insert_row(self, instance_name: str, stream_id: int):
+    def _insert_row(self, instance_name: str, stream_id: int) -> None:
         """Insert one row as the given instance with given stream_id."""
 
-        def _insert(txn):
+        def _insert(txn: LoggingTransaction) -> None:
             txn.execute(
                 "INSERT INTO foobar VALUES (?, ?)",
                 (
@@ -514,13 +518,13 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
 
         self.get_success(self.db_pool.runInteraction("_insert_row", _insert))
 
-    def test_single_instance(self):
+    def test_single_instance(self) -> None:
         """Test that reads and writes from a single process are handled
         correctly.
         """
         id_gen = self._create_id_generator()
 
-        async def _get_next_async():
+        async def _get_next_async() -> None:
             async with id_gen.get_next() as stream_id:
                 self._insert_row("master", stream_id)
 
@@ -530,7 +534,7 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
         self.assertEqual(id_gen.get_current_token_for_writer("master"), -1)
         self.assertEqual(id_gen.get_persisted_upto_position(), -1)
 
-        async def _get_next_async2():
+        async def _get_next_async2() -> None:
             async with id_gen.get_next_mult(3) as stream_ids:
                 for stream_id in stream_ids:
                     self._insert_row("master", stream_id)
@@ -548,14 +552,14 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
         self.assertEqual(second_id_gen.get_current_token_for_writer("master"), -4)
         self.assertEqual(second_id_gen.get_persisted_upto_position(), -4)
 
-    def test_multiple_instance(self):
+    def test_multiple_instance(self) -> None:
         """Tests that having multiple instances that get advanced over
         federation works corretly.
         """
         id_gen_1 = self._create_id_generator("first", writers=["first", "second"])
         id_gen_2 = self._create_id_generator("second", writers=["first", "second"])
 
-        async def _get_next_async():
+        async def _get_next_async() -> None:
             async with id_gen_1.get_next() as stream_id:
                 self._insert_row("first", stream_id)
                 id_gen_2.advance("first", stream_id)
@@ -567,7 +571,7 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
         self.assertEqual(id_gen_1.get_persisted_upto_position(), -1)
         self.assertEqual(id_gen_2.get_persisted_upto_position(), -1)
 
-        async def _get_next_async2():
+        async def _get_next_async2() -> None:
             async with id_gen_2.get_next() as stream_id:
                 self._insert_row("second", stream_id)
                 id_gen_1.advance("second", stream_id)
@@ -584,13 +588,13 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
     if not USE_POSTGRES_FOR_TESTS:
         skip = "Requires Postgres"
 
-    def prepare(self, reactor, clock, hs):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.store = hs.get_datastores().main
         self.db_pool: DatabasePool = self.store.db_pool
 
         self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
 
-    def _setup_db(self, txn):
+    def _setup_db(self, txn: LoggingTransaction) -> None:
         txn.execute("CREATE SEQUENCE foobar_seq")
         txn.execute(
             """
@@ -642,7 +646,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
         from the postgres sequence.
         """
 
-        def _insert(txn):
+        def _insert(txn: LoggingTransaction) -> None:
             for _ in range(number):
                 txn.execute(
                     "INSERT INTO %s VALUES (nextval('foobar_seq'), ?)" % (table,),
@@ -659,7 +663,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
 
         self.get_success(self.db_pool.runInteraction("_insert_rows", _insert))
 
-    def test_load_existing_stream(self):
+    def test_load_existing_stream(self) -> None:
         """Test creating ID gens with multiple tables that have rows from after
         the position in `stream_positions` table.
         """
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index 6a1cf33054..eaa0d7d749 100644
--- a/tests/storage/test_stream.py
+++ b/tests/storage/test_stream.py
@@ -129,21 +129,19 @@ class PaginationTestCase(HomeserverTestCase):
 
     def test_filter_relation_senders(self):
         # Messages which second user reacted to.
-        filter = {"io.element.relation_senders": [self.second_user_id]}
+        filter = {"related_by_senders": [self.second_user_id]}
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 1, chunk)
         self.assertEqual(chunk[0].event_id, self.event_id_1)
 
         # Messages which third user reacted to.
-        filter = {"io.element.relation_senders": [self.third_user_id]}
+        filter = {"related_by_senders": [self.third_user_id]}
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 1, chunk)
         self.assertEqual(chunk[0].event_id, self.event_id_2)
 
         # Messages which either user reacted to.
-        filter = {
-            "io.element.relation_senders": [self.second_user_id, self.third_user_id]
-        }
+        filter = {"related_by_senders": [self.second_user_id, self.third_user_id]}
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 2, chunk)
         self.assertCountEqual(
@@ -152,20 +150,20 @@ class PaginationTestCase(HomeserverTestCase):
 
     def test_filter_relation_type(self):
         # Messages which have annotations.
-        filter = {"io.element.relation_types": [RelationTypes.ANNOTATION]}
+        filter = {"related_by_rel_types": [RelationTypes.ANNOTATION]}
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 1, chunk)
         self.assertEqual(chunk[0].event_id, self.event_id_1)
 
         # Messages which have references.
-        filter = {"io.element.relation_types": [RelationTypes.REFERENCE]}
+        filter = {"related_by_rel_types": [RelationTypes.REFERENCE]}
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 1, chunk)
         self.assertEqual(chunk[0].event_id, self.event_id_2)
 
         # Messages which have either annotations or references.
         filter = {
-            "io.element.relation_types": [
+            "related_by_rel_types": [
                 RelationTypes.ANNOTATION,
                 RelationTypes.REFERENCE,
             ]
@@ -179,8 +177,8 @@ class PaginationTestCase(HomeserverTestCase):
     def test_filter_relation_senders_and_type(self):
         # Messages which second user reacted to.
         filter = {
-            "io.element.relation_senders": [self.second_user_id],
-            "io.element.relation_types": [RelationTypes.ANNOTATION],
+            "related_by_senders": [self.second_user_id],
+            "related_by_rel_types": [RelationTypes.ANNOTATION],
         }
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 1, chunk)
@@ -201,7 +199,7 @@ class PaginationTestCase(HomeserverTestCase):
             tok=self.second_tok,
         )
 
-        filter = {"io.element.relation_senders": [self.second_user_id]}
+        filter = {"related_by_senders": [self.second_user_id]}
         chunk = self._filter_messages(filter)
         self.assertEqual(len(chunk), 1, chunk)
         self.assertEqual(chunk[0].event_id, self.event_id_1)
diff --git a/tests/storage/test_unsafe_locale.py b/tests/storage/test_unsafe_locale.py
new file mode 100644
index 0000000000..ba53c22818
--- /dev/null
+++ b/tests/storage/test_unsafe_locale.py
@@ -0,0 +1,46 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from unittest.mock import MagicMock, patch
+
+from synapse.storage.database import make_conn
+from synapse.storage.engines._base import IncorrectDatabaseSetup
+
+from tests.unittest import HomeserverTestCase
+from tests.utils import USE_POSTGRES_FOR_TESTS
+
+
+class UnsafeLocaleTest(HomeserverTestCase):
+    if not USE_POSTGRES_FOR_TESTS:
+        skip = "Requires Postgres"
+
+    @patch("synapse.storage.engines.postgres.PostgresEngine.get_db_locale")
+    def test_unsafe_locale(self, mock_db_locale: MagicMock) -> None:
+        mock_db_locale.return_value = ("B", "B")
+        database = self.hs.get_datastores().databases[0]
+
+        db_conn = make_conn(database._database_config, database.engine, "test_unsafe")
+        with self.assertRaises(IncorrectDatabaseSetup):
+            database.engine.check_database(db_conn)
+        with self.assertRaises(IncorrectDatabaseSetup):
+            database.engine.check_new_database(db_conn)
+        db_conn.close()
+
+    def test_safe_locale(self) -> None:
+        database = self.hs.get_datastores().databases[0]
+
+        db_conn = make_conn(database._database_config, database.engine, "test_unsafe")
+        with db_conn.cursor() as txn:
+            res = database.engine.get_db_locale(txn)
+        self.assertEqual(res, ("C", "C"))
+        db_conn.close()
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index 219b5660b1..532e3fe9cd 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -13,11 +13,12 @@
 # limitations under the License.
 import logging
 from typing import Optional
+from unittest.mock import patch
 
 from synapse.api.room_versions import RoomVersions
-from synapse.events import EventBase
-from synapse.types import JsonDict
-from synapse.visibility import filter_events_for_server
+from synapse.events import EventBase, make_event_from_dict
+from synapse.types import JsonDict, create_requester
+from synapse.visibility import filter_events_for_client, filter_events_for_server
 
 from tests import unittest
 from tests.utils import create_room
@@ -185,3 +186,72 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
 
         self.get_success(self.storage.persistence.persist_event(event, context))
         return event
+
+
+class FilterEventsForClientTestCase(unittest.FederatingHomeserverTestCase):
+    def test_out_of_band_invite_rejection(self):
+        # this is where we have received an invite event over federation, and then
+        # rejected it.
+        invite_pdu = {
+            "room_id": "!room:id",
+            "depth": 1,
+            "auth_events": [],
+            "prev_events": [],
+            "origin_server_ts": 1,
+            "sender": "@someone:" + self.OTHER_SERVER_NAME,
+            "type": "m.room.member",
+            "state_key": "@user:test",
+            "content": {"membership": "invite"},
+        }
+        self.add_hashes_and_signatures(invite_pdu)
+        invite_event_id = make_event_from_dict(invite_pdu, RoomVersions.V9).event_id
+
+        self.get_success(
+            self.hs.get_federation_server().on_invite_request(
+                self.OTHER_SERVER_NAME,
+                invite_pdu,
+                "9",
+            )
+        )
+
+        # stub out do_remotely_reject_invite so that we fall back to a locally-
+        # generated rejection
+        with patch.object(
+            self.hs.get_federation_handler(),
+            "do_remotely_reject_invite",
+            side_effect=Exception(),
+        ):
+            reject_event_id, _ = self.get_success(
+                self.hs.get_room_member_handler().remote_reject_invite(
+                    invite_event_id,
+                    txn_id=None,
+                    requester=create_requester("@user:test"),
+                    content={},
+                )
+            )
+
+        invite_event, reject_event = self.get_success(
+            self.hs.get_datastores().main.get_events_as_list(
+                [invite_event_id, reject_event_id]
+            )
+        )
+
+        # the invited user should be able to see both the invite and the rejection
+        self.assertEqual(
+            self.get_success(
+                filter_events_for_client(
+                    self.hs.get_storage(), "@user:test", [invite_event, reject_event]
+                )
+            ),
+            [invite_event, reject_event],
+        )
+
+        # other users should see neither
+        self.assertEqual(
+            self.get_success(
+                filter_events_for_client(
+                    self.hs.get_storage(), "@other:test", [invite_event, reject_event]
+                )
+            ),
+            [],
+        )
diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py
index 19741ffcda..48e616ac74 100644
--- a/tests/util/caches/test_descriptors.py
+++ b/tests/util/caches/test_descriptors.py
@@ -17,7 +17,7 @@ from typing import Set
 from unittest import mock
 
 from twisted.internet import defer, reactor
-from twisted.internet.defer import Deferred
+from twisted.internet.defer import CancelledError, Deferred
 
 from synapse.api.errors import SynapseError
 from synapse.logging.context import (
@@ -28,7 +28,7 @@ from synapse.logging.context import (
     make_deferred_yieldable,
 )
 from synapse.util.caches import descriptors
-from synapse.util.caches.descriptors import cached, lru_cache
+from synapse.util.caches.descriptors import cached, cachedList, lru_cache
 
 from tests import unittest
 from tests.test_utils import get_awaitable_result
@@ -141,6 +141,84 @@ class DescriptorTestCase(unittest.TestCase):
         self.assertEqual(r, "chips")
         obj.mock.assert_not_called()
 
+    @defer.inlineCallbacks
+    def test_cache_uncached_args(self):
+        """
+        Only the arguments not named in uncached_args should matter to the cache
+
+        Note that this is identical to test_cache_num_args, but provides the
+        arguments differently.
+        """
+
+        class Cls:
+            # Note that it is important that this is not the last argument to
+            # test behaviour of skipping arguments properly.
+            @descriptors.cached(uncached_args=("arg2",))
+            def fn(self, arg1, arg2, arg3):
+                return self.mock(arg1, arg2, arg3)
+
+            def __init__(self):
+                self.mock = mock.Mock()
+
+        obj = Cls()
+        obj.mock.return_value = "fish"
+        r = yield obj.fn(1, 2, 3)
+        self.assertEqual(r, "fish")
+        obj.mock.assert_called_once_with(1, 2, 3)
+        obj.mock.reset_mock()
+
+        # a call with different params should call the mock again
+        obj.mock.return_value = "chips"
+        r = yield obj.fn(2, 3, 4)
+        self.assertEqual(r, "chips")
+        obj.mock.assert_called_once_with(2, 3, 4)
+        obj.mock.reset_mock()
+
+        # the two values should now be cached; we should be able to vary
+        # the second argument and still get the cached result.
+        r = yield obj.fn(1, 4, 3)
+        self.assertEqual(r, "fish")
+        r = yield obj.fn(2, 5, 4)
+        self.assertEqual(r, "chips")
+        obj.mock.assert_not_called()
+
+    @defer.inlineCallbacks
+    def test_cache_kwargs(self):
+        """Test that keyword arguments are treated properly"""
+
+        class Cls:
+            def __init__(self):
+                self.mock = mock.Mock()
+
+            @descriptors.cached()
+            def fn(self, arg1, kwarg1=2):
+                return self.mock(arg1, kwarg1=kwarg1)
+
+        obj = Cls()
+        obj.mock.return_value = "fish"
+        r = yield obj.fn(1, kwarg1=2)
+        self.assertEqual(r, "fish")
+        obj.mock.assert_called_once_with(1, kwarg1=2)
+        obj.mock.reset_mock()
+
+        # a call with different params should call the mock again
+        obj.mock.return_value = "chips"
+        r = yield obj.fn(1, kwarg1=3)
+        self.assertEqual(r, "chips")
+        obj.mock.assert_called_once_with(1, kwarg1=3)
+        obj.mock.reset_mock()
+
+        # the values should now be cached.
+        r = yield obj.fn(1, kwarg1=2)
+        self.assertEqual(r, "fish")
+        # We should be able to not provide kwarg1 and get the cached value back.
+        r = yield obj.fn(1)
+        self.assertEqual(r, "fish")
+        # Keyword arguments can be in any order.
+        r = yield obj.fn(kwarg1=2, arg1=1)
+        self.assertEqual(r, "fish")
+        obj.mock.assert_not_called()
+
     def test_cache_with_sync_exception(self):
         """If the wrapped function throws synchronously, things should continue to work"""
 
@@ -415,6 +493,74 @@ class DescriptorTestCase(unittest.TestCase):
         obj.invalidate()
         top_invalidate.assert_called_once()
 
+    def test_cancel(self):
+        """Test that cancelling a lookup does not cancel other lookups"""
+        complete_lookup: "Deferred[None]" = Deferred()
+
+        class Cls:
+            @cached()
+            async def fn(self, arg1):
+                await complete_lookup
+                return str(arg1)
+
+        obj = Cls()
+
+        d1 = obj.fn(123)
+        d2 = obj.fn(123)
+        self.assertFalse(d1.called)
+        self.assertFalse(d2.called)
+
+        # Cancel `d1`, which is the lookup that caused `fn` to run.
+        d1.cancel()
+
+        # `d2` should complete normally.
+        complete_lookup.callback(None)
+        self.failureResultOf(d1, CancelledError)
+        self.assertEqual(d2.result, "123")
+
+    def test_cancel_logcontexts(self):
+        """Test that cancellation does not break logcontexts.
+
+        * The `CancelledError` must be raised with the correct logcontext.
+        * The inner lookup must not resume with a finished logcontext.
+        * The inner lookup must not restore a finished logcontext when done.
+        """
+        complete_lookup: "Deferred[None]" = Deferred()
+
+        class Cls:
+            inner_context_was_finished = False
+
+            @cached()
+            async def fn(self, arg1):
+                await make_deferred_yieldable(complete_lookup)
+                self.inner_context_was_finished = current_context().finished
+                return str(arg1)
+
+        obj = Cls()
+
+        async def do_lookup():
+            with LoggingContext("c1") as c1:
+                try:
+                    await obj.fn(123)
+                    self.fail("No CancelledError thrown")
+                except CancelledError:
+                    self.assertEqual(
+                        current_context(),
+                        c1,
+                        "CancelledError was not raised with the correct logcontext",
+                    )
+                    # suppress the error and succeed
+
+        d = defer.ensureDeferred(do_lookup())
+        d.cancel()
+
+        complete_lookup.callback(None)
+        self.successResultOf(d)
+        self.assertFalse(
+            obj.inner_context_was_finished, "Tried to restart a finished logcontext"
+        )
+        self.assertEqual(current_context(), SENTINEL_CONTEXT)
+
 
 class CacheDecoratorTestCase(unittest.HomeserverTestCase):
     """More tests for @cached
@@ -656,7 +802,7 @@ class CachedListDescriptorTestCase(unittest.TestCase):
             def fn(self, arg1, arg2):
                 pass
 
-            @descriptors.cachedList("fn", "args1")
+            @descriptors.cachedList(cached_method_name="fn", list_name="args1")
             async def list_fn(self, args1, arg2):
                 assert current_context().name == "c1"
                 # we want this to behave like an asynchronous function
@@ -715,7 +861,7 @@ class CachedListDescriptorTestCase(unittest.TestCase):
             def fn(self, arg1):
                 pass
 
-            @descriptors.cachedList("fn", "args1")
+            @descriptors.cachedList(cached_method_name="fn", list_name="args1")
             def list_fn(self, args1) -> "Deferred[dict]":
                 return self.mock(args1)
 
@@ -758,7 +904,7 @@ class CachedListDescriptorTestCase(unittest.TestCase):
             def fn(self, arg1, arg2):
                 pass
 
-            @descriptors.cachedList("fn", "args1")
+            @descriptors.cachedList(cached_method_name="fn", list_name="args1")
             async def list_fn(self, args1, arg2):
                 # we want this to behave like an asynchronous function
                 await run_on_reactor()
@@ -787,3 +933,78 @@ class CachedListDescriptorTestCase(unittest.TestCase):
         obj.fn.invalidate((10, 2))
         invalidate0.assert_called_once()
         invalidate1.assert_called_once()
+
+    def test_cancel(self):
+        """Test that cancelling a lookup does not cancel other lookups"""
+        complete_lookup: "Deferred[None]" = Deferred()
+
+        class Cls:
+            @cached()
+            def fn(self, arg1):
+                pass
+
+            @cachedList(cached_method_name="fn", list_name="args")
+            async def list_fn(self, args):
+                await complete_lookup
+                return {arg: str(arg) for arg in args}
+
+        obj = Cls()
+
+        d1 = obj.list_fn([123, 456])
+        d2 = obj.list_fn([123, 456, 789])
+        self.assertFalse(d1.called)
+        self.assertFalse(d2.called)
+
+        d1.cancel()
+
+        # `d2` should complete normally.
+        complete_lookup.callback(None)
+        self.failureResultOf(d1, CancelledError)
+        self.assertEqual(d2.result, {123: "123", 456: "456", 789: "789"})
+
+    def test_cancel_logcontexts(self):
+        """Test that cancellation does not break logcontexts.
+
+        * The `CancelledError` must be raised with the correct logcontext.
+        * The inner lookup must not resume with a finished logcontext.
+        * The inner lookup must not restore a finished logcontext when done.
+        """
+        complete_lookup: "Deferred[None]" = Deferred()
+
+        class Cls:
+            inner_context_was_finished = False
+
+            @cached()
+            def fn(self, arg1):
+                pass
+
+            @cachedList(cached_method_name="fn", list_name="args")
+            async def list_fn(self, args):
+                await make_deferred_yieldable(complete_lookup)
+                self.inner_context_was_finished = current_context().finished
+                return {arg: str(arg) for arg in args}
+
+        obj = Cls()
+
+        async def do_lookup():
+            with LoggingContext("c1") as c1:
+                try:
+                    await obj.list_fn([123])
+                    self.fail("No CancelledError thrown")
+                except CancelledError:
+                    self.assertEqual(
+                        current_context(),
+                        c1,
+                        "CancelledError was not raised with the correct logcontext",
+                    )
+                    # suppress the error and succeed
+
+        d = defer.ensureDeferred(do_lookup())
+        d.cancel()
+
+        complete_lookup.callback(None)
+        self.successResultOf(d)
+        self.assertFalse(
+            obj.inner_context_was_finished, "Tried to restart a finished logcontext"
+        )
+        self.assertEqual(current_context(), SENTINEL_CONTEXT)
diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py
index 362014f4cb..e5bc416de1 100644
--- a/tests/util/test_async_helpers.py
+++ b/tests/util/test_async_helpers.py
@@ -13,6 +13,8 @@
 # limitations under the License.
 import traceback
 
+from parameterized import parameterized_class
+
 from twisted.internet import defer
 from twisted.internet.defer import CancelledError, Deferred, ensureDeferred
 from twisted.internet.task import Clock
@@ -23,10 +25,12 @@ from synapse.logging.context import (
     LoggingContext,
     PreserveLoggingContext,
     current_context,
+    make_deferred_yieldable,
 )
 from synapse.util.async_helpers import (
     ObservableDeferred,
     concurrently_execute,
+    delay_cancellation,
     stop_cancellation,
     timeout_deferred,
 )
@@ -100,6 +104,34 @@ class ObservableDeferredTest(TestCase):
         self.assertEqual(str(results[0].value), "gah!", "observer 1 errback result")
         self.assertEqual(str(results[1].value), "gah!", "observer 2 errback result")
 
+    def test_cancellation(self):
+        """Test that cancelling an observer does not affect other observers."""
+        origin_d: "Deferred[int]" = Deferred()
+        observable = ObservableDeferred(origin_d, consumeErrors=True)
+
+        observer1 = observable.observe()
+        observer2 = observable.observe()
+        observer3 = observable.observe()
+
+        self.assertFalse(observer1.called)
+        self.assertFalse(observer2.called)
+        self.assertFalse(observer3.called)
+
+        # cancel the second observer
+        observer2.cancel()
+        self.assertFalse(observer1.called)
+        self.failureResultOf(observer2, CancelledError)
+        self.assertFalse(observer3.called)
+
+        # other observers resolve as normal
+        origin_d.callback(123)
+        self.assertEqual(observer1.result, 123, "observer 1 callback result")
+        self.assertEqual(observer3.result, 123, "observer 3 callback result")
+
+        # additional observers resolve as normal
+        observer4 = observable.observe()
+        self.assertEqual(observer4.result, 123, "observer 4 callback result")
+
 
 class TimeoutDeferredTest(TestCase):
     def setUp(self):
@@ -285,13 +317,27 @@ class ConcurrentlyExecuteTest(TestCase):
         self.successResultOf(d2)
 
 
-class StopCancellationTests(TestCase):
-    """Tests for the `stop_cancellation` function."""
+@parameterized_class(
+    ("wrapper",),
+    [("stop_cancellation",), ("delay_cancellation",)],
+)
+class CancellationWrapperTests(TestCase):
+    """Common tests for the `stop_cancellation` and `delay_cancellation` functions."""
+
+    wrapper: str
+
+    def wrap_deferred(self, deferred: "Deferred[str]") -> "Deferred[str]":
+        if self.wrapper == "stop_cancellation":
+            return stop_cancellation(deferred)
+        elif self.wrapper == "delay_cancellation":
+            return delay_cancellation(deferred)
+        else:
+            raise ValueError(f"Unsupported wrapper type: {self.wrapper}")
 
     def test_succeed(self):
         """Test that the new `Deferred` receives the result."""
         deferred: "Deferred[str]" = Deferred()
-        wrapper_deferred = stop_cancellation(deferred)
+        wrapper_deferred = self.wrap_deferred(deferred)
 
         # Success should propagate through.
         deferred.callback("success")
@@ -301,7 +347,7 @@ class StopCancellationTests(TestCase):
     def test_failure(self):
         """Test that the new `Deferred` receives the `Failure`."""
         deferred: "Deferred[str]" = Deferred()
-        wrapper_deferred = stop_cancellation(deferred)
+        wrapper_deferred = self.wrap_deferred(deferred)
 
         # Failure should propagate through.
         deferred.errback(ValueError("abc"))
@@ -309,6 +355,10 @@ class StopCancellationTests(TestCase):
         self.failureResultOf(wrapper_deferred, ValueError)
         self.assertIsNone(deferred.result, "`Failure` was not consumed")
 
+
+class StopCancellationTests(TestCase):
+    """Tests for the `stop_cancellation` function."""
+
     def test_cancellation(self):
         """Test that cancellation of the new `Deferred` leaves the original running."""
         deferred: "Deferred[str]" = Deferred()
@@ -319,11 +369,101 @@ class StopCancellationTests(TestCase):
         self.assertTrue(wrapper_deferred.called)
         self.failureResultOf(wrapper_deferred, CancelledError)
         self.assertFalse(
-            deferred.called, "Original `Deferred` was unexpectedly cancelled."
+            deferred.called, "Original `Deferred` was unexpectedly cancelled"
+        )
+
+        # Now make the original `Deferred` fail.
+        # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed
+        # in logs.
+        deferred.errback(ValueError("abc"))
+        self.assertIsNone(deferred.result, "`Failure` was not consumed")
+
+
+class DelayCancellationTests(TestCase):
+    """Tests for the `delay_cancellation` function."""
+
+    def test_cancellation(self):
+        """Test that cancellation of the new `Deferred` waits for the original."""
+        deferred: "Deferred[str]" = Deferred()
+        wrapper_deferred = delay_cancellation(deferred)
+
+        # Cancel the new `Deferred`.
+        wrapper_deferred.cancel()
+        self.assertNoResult(wrapper_deferred)
+        self.assertFalse(
+            deferred.called, "Original `Deferred` was unexpectedly cancelled"
+        )
+
+        # Now make the original `Deferred` fail.
+        # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed
+        # in logs.
+        deferred.errback(ValueError("abc"))
+        self.assertIsNone(deferred.result, "`Failure` was not consumed")
+
+        # Now that the original `Deferred` has failed, we should get a `CancelledError`.
+        self.failureResultOf(wrapper_deferred, CancelledError)
+
+    def test_suppresses_second_cancellation(self):
+        """Test that a second cancellation is suppressed.
+
+        Identical to `test_cancellation` except the new `Deferred` is cancelled twice.
+        """
+        deferred: "Deferred[str]" = Deferred()
+        wrapper_deferred = delay_cancellation(deferred)
+
+        # Cancel the new `Deferred`, twice.
+        wrapper_deferred.cancel()
+        wrapper_deferred.cancel()
+        self.assertNoResult(wrapper_deferred)
+        self.assertFalse(
+            deferred.called, "Original `Deferred` was unexpectedly cancelled"
         )
 
-        # Now make the inner `Deferred` fail.
+        # Now make the original `Deferred` fail.
         # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed
         # in logs.
         deferred.errback(ValueError("abc"))
         self.assertIsNone(deferred.result, "`Failure` was not consumed")
+
+        # Now that the original `Deferred` has failed, we should get a `CancelledError`.
+        self.failureResultOf(wrapper_deferred, CancelledError)
+
+    def test_propagates_cancelled_error(self):
+        """Test that a `CancelledError` from the original `Deferred` gets propagated."""
+        deferred: "Deferred[str]" = Deferred()
+        wrapper_deferred = delay_cancellation(deferred)
+
+        # Fail the original `Deferred` with a `CancelledError`.
+        cancelled_error = CancelledError()
+        deferred.errback(cancelled_error)
+
+        # The new `Deferred` should fail with exactly the same `CancelledError`.
+        self.assertTrue(wrapper_deferred.called)
+        self.assertIs(cancelled_error, self.failureResultOf(wrapper_deferred).value)
+
+    def test_preserves_logcontext(self):
+        """Test that logging contexts are preserved."""
+        blocking_d: "Deferred[None]" = Deferred()
+
+        async def inner():
+            await make_deferred_yieldable(blocking_d)
+
+        async def outer():
+            with LoggingContext("c") as c:
+                try:
+                    await delay_cancellation(defer.ensureDeferred(inner()))
+                    self.fail("`CancelledError` was not raised")
+                except CancelledError:
+                    self.assertEqual(c, current_context())
+                    # Succeed with no error, unless the logging context is wrong.
+
+        # Run and block inside `inner()`.
+        d = defer.ensureDeferred(outer())
+        self.assertEqual(SENTINEL_CONTEXT, current_context())
+
+        d.cancel()
+
+        # Now unblock. `outer()` will consume the `CancelledError` and check the
+        # logging context.
+        blocking_d.callback(None)
+        self.successResultOf(d)
diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py
index 3c07252252..5d1aa025d1 100644
--- a/tests/util/test_check_dependencies.py
+++ b/tests/util/test_check_dependencies.py
@@ -12,7 +12,7 @@ from tests.unittest import TestCase
 
 
 class DummyDistribution(metadata.Distribution):
-    def __init__(self, version: str):
+    def __init__(self, version: object):
         self._version = version
 
     @property
@@ -27,7 +27,10 @@ class DummyDistribution(metadata.Distribution):
 
 
 old = DummyDistribution("0.1.2")
+old_release_candidate = DummyDistribution("0.1.2rc3")
 new = DummyDistribution("1.2.3")
+new_release_candidate = DummyDistribution("1.2.3rc4")
+distribution_with_no_version = DummyDistribution(None)
 
 # could probably use stdlib TestCase --- no need for twisted here
 
@@ -65,6 +68,35 @@ class TestDependencyChecker(TestCase):
                 # should not raise
                 check_requirements()
 
+    def test_version_reported_as_none(self) -> None:
+        """Complain if importlib.metadata.version() returns None.
+
+        This shouldn't normally happen, but it was seen in the wild (#12223).
+        """
+        with patch(
+            "synapse.util.check_dependencies.metadata.requires",
+            return_value=["dummypkg >= 1"],
+        ):
+            with self.mock_installed_package(distribution_with_no_version):
+                self.assertRaises(DependencyException, check_requirements)
+
+    def test_checks_ignore_dev_dependencies(self) -> None:
+        """Bot generic and per-extra checks should ignore dev dependencies."""
+        with patch(
+            "synapse.util.check_dependencies.metadata.requires",
+            return_value=["dummypkg >= 1; extra == 'mypy'"],
+        ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}):
+            # We're testing that none of these calls raise.
+            with self.mock_installed_package(None):
+                check_requirements()
+                check_requirements("cool-extra")
+            with self.mock_installed_package(old):
+                check_requirements()
+                check_requirements("cool-extra")
+            with self.mock_installed_package(new):
+                check_requirements()
+                check_requirements("cool-extra")
+
     def test_generic_check_of_optional_dependency(self) -> None:
         """Complain if an optional package is old."""
         with patch(
@@ -85,11 +117,28 @@ class TestDependencyChecker(TestCase):
         with patch(
             "synapse.util.check_dependencies.metadata.requires",
             return_value=["dummypkg >= 1; extra == 'cool-extra'"],
-        ), patch("synapse.util.check_dependencies.EXTRAS", {"cool-extra"}):
+        ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}):
             with self.mock_installed_package(None):
                 self.assertRaises(DependencyException, check_requirements, "cool-extra")
             with self.mock_installed_package(old):
                 self.assertRaises(DependencyException, check_requirements, "cool-extra")
             with self.mock_installed_package(new):
                 # should not raise
+                check_requirements("cool-extra")
+
+    def test_release_candidates_satisfy_dependency(self) -> None:
+        """
+        Tests that release candidates count as far as satisfying a dependency
+        is concerned.
+        (Regression test, see #12176.)
+        """
+        with patch(
+            "synapse.util.check_dependencies.metadata.requires",
+            return_value=["dummypkg >= 1"],
+        ):
+            with self.mock_installed_package(old_release_candidate):
+                self.assertRaises(DependencyException, check_requirements)
+
+            with self.mock_installed_package(new_release_candidate):
+                # should not raise
                 check_requirements()
diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py
index 0774625b85..0c84226197 100644
--- a/tests/util/test_rwlock.py
+++ b/tests/util/test_rwlock.py
@@ -12,8 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from typing import AsyncContextManager, Callable, Sequence, Tuple
+
 from twisted.internet import defer
-from twisted.internet.defer import Deferred
+from twisted.internet.defer import CancelledError, Deferred
 
 from synapse.util.async_helpers import ReadWriteLock
 
@@ -21,87 +23,187 @@ from tests import unittest
 
 
 class ReadWriteLockTestCase(unittest.TestCase):
-    def _assert_called_before_not_after(self, lst, first_false):
-        for i, d in enumerate(lst[:first_false]):
-            self.assertTrue(d.called, msg="%d was unexpectedly false" % i)
+    def _start_reader_or_writer(
+        self,
+        read_or_write: Callable[[str], AsyncContextManager],
+        key: str,
+        return_value: str,
+    ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]:
+        """Starts a reader or writer which acquires the lock, blocks, then completes.
+
+        Args:
+            read_or_write: A function returning a context manager for a lock.
+                Either a bound `ReadWriteLock.read` or `ReadWriteLock.write`.
+            key: The key to read or write.
+            return_value: A string that the reader or writer will resolve with when
+                done.
+
+        Returns:
+            A tuple of three `Deferred`s:
+             * A `Deferred` that resolves with `return_value` once the reader or writer
+               completes successfully.
+             * A `Deferred` that resolves once the reader or writer acquires the lock.
+             * A `Deferred` that blocks the reader or writer. Must be resolved by the
+               caller to allow the reader or writer to release the lock and complete.
+        """
+        acquired_d: "Deferred[None]" = Deferred()
+        unblock_d: "Deferred[None]" = Deferred()
+
+        async def reader_or_writer():
+            async with read_or_write(key):
+                acquired_d.callback(None)
+                await unblock_d
+            return return_value
+
+        d = defer.ensureDeferred(reader_or_writer())
+        return d, acquired_d, unblock_d
+
+    def _start_blocking_reader(
+        self, rwlock: ReadWriteLock, key: str, return_value: str
+    ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]:
+        """Starts a reader which acquires the lock, blocks, then releases the lock.
+
+        See the docstring for `_start_reader_or_writer` for details about the arguments
+        and return values.
+        """
+        return self._start_reader_or_writer(rwlock.read, key, return_value)
+
+    def _start_blocking_writer(
+        self, rwlock: ReadWriteLock, key: str, return_value: str
+    ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]:
+        """Starts a writer which acquires the lock, blocks, then releases the lock.
+
+        See the docstring for `_start_reader_or_writer` for details about the arguments
+        and return values.
+        """
+        return self._start_reader_or_writer(rwlock.write, key, return_value)
+
+    def _start_nonblocking_reader(
+        self, rwlock: ReadWriteLock, key: str, return_value: str
+    ) -> Tuple["Deferred[str]", "Deferred[None]"]:
+        """Starts a reader which acquires the lock, then releases it immediately.
+
+        See the docstring for `_start_reader_or_writer` for details about the arguments.
+
+        Returns:
+            A tuple of two `Deferred`s:
+             * A `Deferred` that resolves with `return_value` once the reader completes
+               successfully.
+             * A `Deferred` that resolves once the reader acquires the lock.
+        """
+        d, acquired_d, unblock_d = self._start_reader_or_writer(
+            rwlock.read, key, return_value
+        )
+        unblock_d.callback(None)
+        return d, acquired_d
+
+    def _start_nonblocking_writer(
+        self, rwlock: ReadWriteLock, key: str, return_value: str
+    ) -> Tuple["Deferred[str]", "Deferred[None]"]:
+        """Starts a writer which acquires the lock, then releases it immediately.
+
+        See the docstring for `_start_reader_or_writer` for details about the arguments.
+
+        Returns:
+            A tuple of two `Deferred`s:
+             * A `Deferred` that resolves with `return_value` once the writer completes
+               successfully.
+             * A `Deferred` that resolves once the writer acquires the lock.
+        """
+        d, acquired_d, unblock_d = self._start_reader_or_writer(
+            rwlock.write, key, return_value
+        )
+        unblock_d.callback(None)
+        return d, acquired_d
+
+    def _assert_first_n_resolved(
+        self, deferreds: Sequence["defer.Deferred[None]"], n: int
+    ) -> None:
+        """Assert that exactly the first n `Deferred`s in the given list are resolved.
 
-        for i, d in enumerate(lst[first_false:]):
+        Args:
+            deferreds: The list of `Deferred`s to be checked.
+            n: The number of `Deferred`s at the start of `deferreds` that should be
+                resolved.
+        """
+        for i, d in enumerate(deferreds[:n]):
+            self.assertTrue(d.called, msg="deferred %d was unexpectedly unresolved" % i)
+
+        for i, d in enumerate(deferreds[n:]):
             self.assertFalse(
-                d.called, msg="%d was unexpectedly true" % (i + first_false)
+                d.called, msg="deferred %d was unexpectedly resolved" % (i + n)
             )
 
     def test_rwlock(self):
         rwlock = ReadWriteLock()
-
-        key = object()
+        key = "key"
 
         ds = [
-            rwlock.read(key),  # 0
-            rwlock.read(key),  # 1
-            rwlock.write(key),  # 2
-            rwlock.write(key),  # 3
-            rwlock.read(key),  # 4
-            rwlock.read(key),  # 5
-            rwlock.write(key),  # 6
+            self._start_blocking_reader(rwlock, key, "0"),
+            self._start_blocking_reader(rwlock, key, "1"),
+            self._start_blocking_writer(rwlock, key, "2"),
+            self._start_blocking_writer(rwlock, key, "3"),
+            self._start_blocking_reader(rwlock, key, "4"),
+            self._start_blocking_reader(rwlock, key, "5"),
+            self._start_blocking_writer(rwlock, key, "6"),
         ]
-        ds = [defer.ensureDeferred(d) for d in ds]
+        # `Deferred`s that resolve when each reader or writer acquires the lock.
+        acquired_ds = [acquired_d for _, acquired_d, _ in ds]
+        # `Deferred`s that will trigger the release of locks when resolved.
+        release_ds = [release_d for _, _, release_d in ds]
 
-        self._assert_called_before_not_after(ds, 2)
+        # The first two readers should acquire their locks.
+        self._assert_first_n_resolved(acquired_ds, 2)
 
-        with ds[0].result:
-            self._assert_called_before_not_after(ds, 2)
-        self._assert_called_before_not_after(ds, 2)
+        # Release one of the read locks. The next writer should not acquire the lock,
+        # because there is another reader holding the lock.
+        self._assert_first_n_resolved(acquired_ds, 2)
+        release_ds[0].callback(None)
+        self._assert_first_n_resolved(acquired_ds, 2)
 
-        with ds[1].result:
-            self._assert_called_before_not_after(ds, 2)
-        self._assert_called_before_not_after(ds, 3)
+        # Release the other read lock. The next writer should acquire the lock.
+        self._assert_first_n_resolved(acquired_ds, 2)
+        release_ds[1].callback(None)
+        self._assert_first_n_resolved(acquired_ds, 3)
 
-        with ds[2].result:
-            self._assert_called_before_not_after(ds, 3)
-        self._assert_called_before_not_after(ds, 4)
+        # Release the write lock. The next writer should acquire the lock.
+        self._assert_first_n_resolved(acquired_ds, 3)
+        release_ds[2].callback(None)
+        self._assert_first_n_resolved(acquired_ds, 4)
 
-        with ds[3].result:
-            self._assert_called_before_not_after(ds, 4)
-        self._assert_called_before_not_after(ds, 6)
+        # Release the write lock. The next two readers should acquire locks.
+        self._assert_first_n_resolved(acquired_ds, 4)
+        release_ds[3].callback(None)
+        self._assert_first_n_resolved(acquired_ds, 6)
 
-        with ds[5].result:
-            self._assert_called_before_not_after(ds, 6)
-        self._assert_called_before_not_after(ds, 6)
+        # Release one of the read locks. The next writer should not acquire the lock,
+        # because there is another reader holding the lock.
+        self._assert_first_n_resolved(acquired_ds, 6)
+        release_ds[5].callback(None)
+        self._assert_first_n_resolved(acquired_ds, 6)
 
-        with ds[4].result:
-            self._assert_called_before_not_after(ds, 6)
-        self._assert_called_before_not_after(ds, 7)
+        # Release the other read lock. The next writer should acquire the lock.
+        self._assert_first_n_resolved(acquired_ds, 6)
+        release_ds[4].callback(None)
+        self._assert_first_n_resolved(acquired_ds, 7)
 
-        with ds[6].result:
-            pass
+        # Release the write lock.
+        release_ds[6].callback(None)
 
-        d = defer.ensureDeferred(rwlock.write(key))
-        self.assertTrue(d.called)
-        with d.result:
-            pass
+        # Acquire and release the write and read locks one last time for good measure.
+        _, acquired_d = self._start_nonblocking_writer(rwlock, key, "last writer")
+        self.assertTrue(acquired_d.called)
 
-        d = defer.ensureDeferred(rwlock.read(key))
-        self.assertTrue(d.called)
-        with d.result:
-            pass
+        _, acquired_d = self._start_nonblocking_reader(rwlock, key, "last reader")
+        self.assertTrue(acquired_d.called)
 
     def test_lock_handoff_to_nonblocking_writer(self):
         """Test a writer handing the lock to another writer that completes instantly."""
         rwlock = ReadWriteLock()
         key = "key"
 
-        unblock: "Deferred[None]" = Deferred()
-
-        async def blocking_write():
-            with await rwlock.write(key):
-                await unblock
-
-        async def nonblocking_write():
-            with await rwlock.write(key):
-                pass
-
-        d1 = defer.ensureDeferred(blocking_write())
-        d2 = defer.ensureDeferred(nonblocking_write())
+        d1, _, unblock = self._start_blocking_writer(rwlock, key, "write 1 completed")
+        d2, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed")
         self.assertFalse(d1.called)
         self.assertFalse(d2.called)
 
@@ -111,5 +213,182 @@ class ReadWriteLockTestCase(unittest.TestCase):
         self.assertTrue(d2.called)
 
         # The `ReadWriteLock` should operate as normal.
-        d3 = defer.ensureDeferred(nonblocking_write())
+        d3, _ = self._start_nonblocking_writer(rwlock, key, "write 3 completed")
         self.assertTrue(d3.called)
+
+    def test_cancellation_while_holding_read_lock(self):
+        """Test cancellation while holding a read lock.
+
+        A waiting writer should be given the lock when the reader holding the lock is
+        cancelled.
+        """
+        rwlock = ReadWriteLock()
+        key = "key"
+
+        # 1. A reader takes the lock and blocks.
+        reader_d, _, _ = self._start_blocking_reader(rwlock, key, "read completed")
+
+        # 2. A writer waits for the reader to complete.
+        writer_d, _ = self._start_nonblocking_writer(rwlock, key, "write completed")
+        self.assertFalse(writer_d.called)
+
+        # 3. The reader is cancelled.
+        reader_d.cancel()
+        self.failureResultOf(reader_d, CancelledError)
+
+        # 4. The writer should take the lock and complete.
+        self.assertTrue(
+            writer_d.called, "Writer is stuck waiting for a cancelled reader"
+        )
+        self.assertEqual("write completed", self.successResultOf(writer_d))
+
+    def test_cancellation_while_holding_write_lock(self):
+        """Test cancellation while holding a write lock.
+
+        A waiting reader should be given the lock when the writer holding the lock is
+        cancelled.
+        """
+        rwlock = ReadWriteLock()
+        key = "key"
+
+        # 1. A writer takes the lock and blocks.
+        writer_d, _, _ = self._start_blocking_writer(rwlock, key, "write completed")
+
+        # 2. A reader waits for the writer to complete.
+        reader_d, _ = self._start_nonblocking_reader(rwlock, key, "read completed")
+        self.assertFalse(reader_d.called)
+
+        # 3. The writer is cancelled.
+        writer_d.cancel()
+        self.failureResultOf(writer_d, CancelledError)
+
+        # 4. The reader should take the lock and complete.
+        self.assertTrue(
+            reader_d.called, "Reader is stuck waiting for a cancelled writer"
+        )
+        self.assertEqual("read completed", self.successResultOf(reader_d))
+
+    def test_cancellation_while_waiting_for_read_lock(self):
+        """Test cancellation while waiting for a read lock.
+
+        Tests that cancelling a waiting reader:
+         * does not cancel the writer it is waiting on
+         * does not cancel the next writer waiting on it
+         * does not allow the next writer to acquire the lock before an earlier writer
+           has finished
+         * does not keep the next writer waiting indefinitely
+
+        These correspond to the asserts with explicit messages.
+        """
+        rwlock = ReadWriteLock()
+        key = "key"
+
+        # 1. A writer takes the lock and blocks.
+        writer1_d, _, unblock_writer1 = self._start_blocking_writer(
+            rwlock, key, "write 1 completed"
+        )
+
+        # 2. A reader waits for the first writer to complete.
+        #    This reader will be cancelled later.
+        reader_d, _ = self._start_nonblocking_reader(rwlock, key, "read completed")
+        self.assertFalse(reader_d.called)
+
+        # 3. A second writer waits for both the first writer and the reader to complete.
+        writer2_d, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed")
+        self.assertFalse(writer2_d.called)
+
+        # 4. The waiting reader is cancelled.
+        #    Neither of the writers should be cancelled.
+        #    The second writer should still be waiting, but only on the first writer.
+        reader_d.cancel()
+        self.failureResultOf(reader_d, CancelledError)
+        self.assertFalse(writer1_d.called, "First writer was unexpectedly cancelled")
+        self.assertFalse(
+            writer2_d.called,
+            "Second writer was unexpectedly cancelled or given the lock before the "
+            "first writer finished",
+        )
+
+        # 5. Unblock the first writer, which should complete.
+        unblock_writer1.callback(None)
+        self.assertEqual("write 1 completed", self.successResultOf(writer1_d))
+
+        # 6. The second writer should take the lock and complete.
+        self.assertTrue(
+            writer2_d.called, "Second writer is stuck waiting for a cancelled reader"
+        )
+        self.assertEqual("write 2 completed", self.successResultOf(writer2_d))
+
+    def test_cancellation_while_waiting_for_write_lock(self):
+        """Test cancellation while waiting for a write lock.
+
+        Tests that cancelling a waiting writer:
+         * does not cancel the reader or writer it is waiting on
+         * does not cancel the next writer waiting on it
+         * does not allow the next writer to acquire the lock before an earlier reader
+           and writer have finished
+         * does not keep the next writer waiting indefinitely
+
+        These correspond to the asserts with explicit messages.
+        """
+        rwlock = ReadWriteLock()
+        key = "key"
+
+        # 1. A reader takes the lock and blocks.
+        reader_d, _, unblock_reader = self._start_blocking_reader(
+            rwlock, key, "read completed"
+        )
+
+        # 2. A writer waits for the reader to complete.
+        writer1_d, _, unblock_writer1 = self._start_blocking_writer(
+            rwlock, key, "write 1 completed"
+        )
+
+        # 3. A second writer waits for both the reader and first writer to complete.
+        #    This writer will be cancelled later.
+        writer2_d, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed")
+        self.assertFalse(writer2_d.called)
+
+        # 4. A third writer waits for the second writer to complete.
+        writer3_d, _ = self._start_nonblocking_writer(rwlock, key, "write 3 completed")
+        self.assertFalse(writer3_d.called)
+
+        # 5. The second writer is cancelled, but continues waiting for the lock.
+        #    The reader, first writer and third writer should not be cancelled.
+        #    The first writer should still be waiting on the reader.
+        #    The third writer should still be waiting on the second writer.
+        writer2_d.cancel()
+        self.assertNoResult(writer2_d)
+        self.assertFalse(reader_d.called, "Reader was unexpectedly cancelled")
+        self.assertFalse(writer1_d.called, "First writer was unexpectedly cancelled")
+        self.assertFalse(
+            writer3_d.called,
+            "Third writer was unexpectedly cancelled or given the lock before the first "
+            "writer finished",
+        )
+
+        # 6. Unblock the reader, which should complete.
+        #    The first writer should be given the lock and block.
+        #    The third writer should still be waiting on the second writer.
+        unblock_reader.callback(None)
+        self.assertEqual("read completed", self.successResultOf(reader_d))
+        self.assertNoResult(writer2_d)
+        self.assertFalse(
+            writer3_d.called,
+            "Third writer was unexpectedly given the lock before the first writer "
+            "finished",
+        )
+
+        # 7. Unblock the first writer, which should complete.
+        unblock_writer1.callback(None)
+        self.assertEqual("write 1 completed", self.successResultOf(writer1_d))
+
+        # 8. The second writer should take the lock and release it immediately, since it
+        #    has been cancelled.
+        self.failureResultOf(writer2_d, CancelledError)
+
+        # 9. The third writer should take the lock and complete.
+        self.assertTrue(
+            writer3_d.called, "Third writer is stuck waiting for a cancelled writer"
+        )
+        self.assertEqual("write 3 completed", self.successResultOf(writer3_d))
diff --git a/tests/utils.py b/tests/utils.py
index ef99c72e0b..f6b1d60371 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -15,13 +15,8 @@
 
 import atexit
 import os
-from unittest.mock import Mock, patch
-from urllib import parse as urlparse
-
-from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
-from synapse.api.errors import CodeMessageException, cs_error
 from synapse.api.room_versions import RoomVersions
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.server import DEFAULT_ROOM_VERSION
@@ -187,111 +182,6 @@ def mock_getRawHeaders(headers=None):
     return getRawHeaders
 
 
-# This is a mock /resource/ not an entire server
-class MockHttpResource:
-    def __init__(self, prefix=""):
-        self.callbacks = []  # 3-tuple of method/pattern/function
-        self.prefix = prefix
-
-    def trigger_get(self, path):
-        return self.trigger(b"GET", path, None)
-
-    @patch("twisted.web.http.Request")
-    @defer.inlineCallbacks
-    def trigger(
-        self, http_method, path, content, mock_request, federation_auth_origin=None
-    ):
-        """Fire an HTTP event.
-
-        Args:
-            http_method : The HTTP method
-            path : The HTTP path
-            content : The HTTP body
-            mock_request : Mocked request to pass to the event so it can get
-                           content.
-            federation_auth_origin (bytes|None): domain to authenticate as, for federation
-        Returns:
-            A tuple of (code, response)
-        Raises:
-            KeyError If no event is found which will handle the path.
-        """
-        path = self.prefix + path
-
-        # annoyingly we return a twisted http request which has chained calls
-        # to get at the http content, hence mock it here.
-        mock_content = Mock()
-        config = {"read.return_value": content}
-        mock_content.configure_mock(**config)
-        mock_request.content = mock_content
-
-        mock_request.method = http_method.encode("ascii")
-        mock_request.uri = path.encode("ascii")
-
-        mock_request.getClientIP.return_value = "-"
-
-        headers = {}
-        if federation_auth_origin is not None:
-            headers[b"Authorization"] = [
-                b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,)
-            ]
-        mock_request.requestHeaders.getRawHeaders = mock_getRawHeaders(headers)
-
-        # return the right path if the event requires it
-        mock_request.path = path
-
-        # add in query params to the right place
-        try:
-            mock_request.args = urlparse.parse_qs(path.split("?")[1])
-            mock_request.path = path.split("?")[0]
-            path = mock_request.path
-        except Exception:
-            pass
-
-        if isinstance(path, bytes):
-            path = path.decode("utf8")
-
-        for (method, pattern, func) in self.callbacks:
-            if http_method != method:
-                continue
-
-            matcher = pattern.match(path)
-            if matcher:
-                try:
-                    args = [urlparse.unquote(u) for u in matcher.groups()]
-
-                    (code, response) = yield defer.ensureDeferred(
-                        func(mock_request, *args)
-                    )
-                    return code, response
-                except CodeMessageException as e:
-                    return e.code, cs_error(e.msg, code=e.errcode)
-
-        raise KeyError("No event can handle %s" % path)
-
-    def register_paths(self, method, path_patterns, callback, servlet_name):
-        for path_pattern in path_patterns:
-            self.callbacks.append((method, path_pattern, callback))
-
-
-class MockKey:
-    alg = "mock_alg"
-    version = "mock_version"
-    signature = b"\x9a\x87$"
-
-    @property
-    def verify_key(self):
-        return self
-
-    def sign(self, message):
-        return self
-
-    def verify(self, message, sig):
-        assert sig == b"\x9a\x87$"
-
-    def encode(self):
-        return b"<fake_encoded_key>"
-
-
 class MockClock:
     now = 1000
 
diff --git a/tox.ini b/tox.ini
index 8d6aa7580b..3ffd2c3e97 100644
--- a/tox.ini
+++ b/tox.ini
@@ -40,11 +40,8 @@ lint_targets =
     tests
     # annoyingly, black doesn't find these so we have to list them
     scripts-dev
-    scripts-dev/build_debian_packages
-    scripts-dev/sign_json
     stubs
     contrib
-    synctl
     synmark
     .ci
     docker
@@ -154,38 +151,11 @@ extras = lint
 commands =
     python -m black --check --diff {[base]lint_targets}
     flake8 {[base]lint_targets} {env:PEP8SUFFIX:}
-    {toxinidir}/scripts-dev/config-lint.sh
 
 [testenv:check_isort]
 extras = lint
 commands = isort -c --df {[base]lint_targets}
 
-[testenv:combine]
-skip_install = true
-usedevelop = false
-deps =
-    coverage
-    pip>=10
-commands=
-    coverage combine
-    coverage report
-
-[testenv:cov-erase]
-skip_install = true
-usedevelop = false
-deps =
-    coverage
-commands=
-    coverage erase
-
-[testenv:cov-html]
-skip_install = true
-usedevelop = false
-deps =
-    coverage
-commands=
-    coverage html
-
 [testenv:mypy]
 deps =
     {[base]deps}