summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/tests.yml3
-rw-r--r--.gitignore3
-rw-r--r--changelog.d/11881.feature1
-rw-r--r--changelog.d/12040.feature1
-rw-r--r--changelog.d/12165.misc1
-rw-r--r--changelog.d/12191.misc1
-rw-r--r--changelog.d/12193.misc1
-rw-r--r--changelog.d/12209.misc1
-rw-r--r--changelog.d/12251.feature1
-rw-r--r--changelog.d/12267.misc1
-rw-r--r--changelog.d/12271.doc1
-rw-r--r--changelog.d/12293.removal1
-rw-r--r--changelog.d/12295.misc1
-rw-r--r--changelog.d/12302.feature1
-rw-r--r--changelog.d/12310.feature1
-rw-r--r--changelog.d/12315.doc1
-rw-r--r--changelog.d/12316.misc1
-rw-r--r--changelog.d/12317.misc1
-rw-r--r--changelog.d/12320.misc1
-rw-r--r--changelog.d/12321.misc1
-rw-r--r--changelog.d/12326.misc1
-rw-r--r--changelog.d/12327.feature1
-rw-r--r--changelog.d/12329.bugfix1
-rw-r--r--changelog.d/12330.misc1
-rw-r--r--changelog.d/12331.doc1
-rw-r--r--changelog.d/12332.misc1
-rw-r--r--changelog.d/12333.bugfix1
-rw-r--r--changelog.d/12334.misc1
-rw-r--r--changelog.d/12335.misc1
-rw-r--r--changelog.d/12336.misc1
-rw-r--r--changelog.d/12338.misc1
-rw-r--r--changelog.d/12341.feature1
-rw-r--r--changelog.d/12346.misc1
-rw-r--r--changelog.d/12347.misc1
-rw-r--r--changelog.d/12348.misc1
-rw-r--r--changelog.d/12349.misc1
-rw-r--r--changelog.d/12350.bugfix1
-rw-r--r--changelog.d/12351.misc1
-rw-r--r--changelog.d/12355.misc1
-rw-r--r--changelog.d/12367.feature1
-rw-r--r--changelog.d/12369.doc1
-rw-r--r--contrib/jitsimeetbridge/jitsimeetbridge.py15
-rw-r--r--docker/Dockerfile-pgtests30
-rw-r--r--docker/README-testing.md2
-rwxr-xr-xdocker/run_pg_tests.sh19
-rw-r--r--docs/SUMMARY.md1
-rw-r--r--docs/development/contributing_guide.md29
-rw-r--r--docs/modules/account_data_callbacks.md106
-rw-r--r--docs/modules/third_party_rules_callbacks.md18
-rw-r--r--docs/modules/writing_a_module.md2
-rw-r--r--docs/sample_config.yaml9
-rw-r--r--docs/upgrade.md13
-rw-r--r--docs/workers.md2
-rw-r--r--mypy.ini10
-rwxr-xr-xscripts-dev/check-newsfragment.sh4
-rw-r--r--scripts-dev/check_signature.py72
-rwxr-xr-xscripts-dev/complement.sh2
-rwxr-xr-xscripts-dev/definitions.py208
-rw-r--r--scripts-dev/hash_history.py81
-rwxr-xr-xscripts-dev/list_url_patterns.py60
-rw-r--r--scripts-dev/tail-synapse.py67
-rwxr-xr-xscripts-dev/test_postgresql.sh19
-rwxr-xr-xsetup.py4
-rwxr-xr-xsynapse/_scripts/synapse_port_db.py1
-rw-r--r--synapse/api/constants.py2
-rw-r--r--synapse/app/admin_cmd.py2
-rw-r--r--synapse/app/generic_worker.py2
-rw-r--r--synapse/appservice/__init__.py12
-rw-r--r--synapse/appservice/api.py10
-rw-r--r--synapse/appservice/scheduler.py55
-rw-r--r--synapse/config/appservice.py1
-rw-r--r--synapse/config/experimental.py8
-rw-r--r--synapse/config/key.py13
-rw-r--r--synapse/config/server.py21
-rw-r--r--synapse/crypto/keyring.py4
-rw-r--r--synapse/events/builder.py2
-rw-r--r--synapse/events/third_party_rules.py26
-rw-r--r--synapse/events/utils.py2
-rw-r--r--synapse/handlers/account_data.py52
-rw-r--r--synapse/handlers/appservice.py148
-rw-r--r--synapse/handlers/auth.py3
-rw-r--r--synapse/handlers/device.py132
-rw-r--r--synapse/handlers/federation_event.py49
-rw-r--r--synapse/handlers/presence.py2
-rw-r--r--synapse/handlers/relations.py41
-rw-r--r--synapse/handlers/room.py4
-rw-r--r--synapse/handlers/room_batch.py38
-rw-r--r--synapse/handlers/sync.py63
-rw-r--r--synapse/http/proxyagent.py2
-rw-r--r--synapse/module_api/__init__.py29
-rw-r--r--synapse/replication/http/_base.py2
-rw-r--r--synapse/replication/slave/storage/client_ips.py59
-rw-r--r--synapse/replication/slave/storage/devices.py13
-rw-r--r--synapse/replication/tcp/commands.py8
-rw-r--r--synapse/replication/tcp/handler.py48
-rw-r--r--synapse/rest/client/relations.py170
-rw-r--r--synapse/rest/client/room_batch.py21
-rw-r--r--synapse/rest/client/sync.py4
-rw-r--r--synapse/rest/key/v2/local_key_resource.py12
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py8
-rw-r--r--synapse/rest/media/v1/media_storage.py2
-rw-r--r--synapse/rest/media/v1/preview_html.py4
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py9
-rw-r--r--synapse/storage/database.py25
-rw-r--r--synapse/storage/databases/main/__init__.py21
-rw-r--r--synapse/storage/databases/main/appservice.py79
-rw-r--r--synapse/storage/databases/main/client_ips.py101
-rw-r--r--synapse/storage/databases/main/devices.py265
-rw-r--r--synapse/storage/databases/main/events_worker.py8
-rw-r--r--synapse/storage/databases/main/monthly_active_users.py60
-rw-r--r--synapse/storage/databases/main/registration.py24
-rw-r--r--synapse/storage/databases/main/relations.py93
-rw-r--r--synapse/storage/databases/main/roommember.py21
-rw-r--r--synapse/storage/databases/main/state.py30
-rw-r--r--synapse/storage/databases/main/stream.py30
-rw-r--r--synapse/storage/relations.py84
-rw-r--r--synapse/storage/schema/__init__.py6
-rw-r--r--synapse/storage/schema/main/delta/68/06_msc3202_add_device_list_appservice_stream_type.sql23
-rw-r--r--synapse/storage/schema/main/delta/69/01as_txn_seq.py44
-rw-r--r--synapse/storage/schema/main/delta/69/01device_list_oubound_by_room.sql38
-rw-r--r--synapse/storage/state.py20
-rw-r--r--synapse/types.py121
-rw-r--r--synapse/util/caches/__init__.py6
-rw-r--r--synapse/util/patch_inline_callbacks.py17
-rw-r--r--synapse/util/retryutils.py2
-rw-r--r--synapse/visibility.py234
-rw-r--r--tests/appservice/test_scheduler.py54
-rw-r--r--tests/crypto/test_event_signing.py6
-rw-r--r--tests/federation/test_federation_sender.py23
-rw-r--r--tests/federation/transport/test_knocking.py3
-rw-r--r--tests/handlers/test_appservice.py121
-rw-r--r--tests/handlers/test_deactivate_account.py25
-rw-r--r--tests/handlers/test_e2e_keys.py6
-rw-r--r--tests/handlers/test_federation.py83
-rw-r--r--tests/handlers/test_federation_event.py225
-rw-r--r--tests/handlers/test_oidc.py7
-rw-r--r--tests/handlers/test_profile.py2
-rw-r--r--tests/handlers/test_sync.py4
-rw-r--r--tests/handlers/test_user_directory.py2
-rw-r--r--tests/module_api/test_api.py22
-rw-r--r--tests/replication/_base.py2
-rw-r--r--tests/replication/slave/storage/test_events.py4
-rw-r--r--tests/rest/admin/test_media.py8
-rw-r--r--tests/rest/admin/test_server_notice.py12
-rw-r--r--tests/rest/admin/test_user.py15
-rw-r--r--tests/rest/client/test_account.py11
-rw-r--r--tests/rest/client/test_account_data.py75
-rw-r--r--tests/rest/client/test_relations.py207
-rw-r--r--tests/rest/client/test_rooms.py2
-rw-r--r--tests/rest/client/test_sync.py68
-rw-r--r--tests/rest/client/test_third_party_rules.py41
-rw-r--r--tests/rest/client/utils.py11
-rw-r--r--tests/rest/key/v2/test_remote_key_resource.py16
-rw-r--r--tests/rest/media/v1/test_url_preview.py43
-rw-r--r--tests/server.py6
-rw-r--r--tests/storage/databases/main/test_lock.py8
-rw-r--r--tests/storage/test_appservice.py75
-rw-r--r--tests/storage/test_cleanup_extrems.py20
-rw-r--r--tests/storage/test_devices.py14
-rw-r--r--tests/storage/test_id_generators.py12
-rw-r--r--tests/storage/test_redaction.py62
-rw-r--r--tests/storage/test_stream.py4
-rw-r--r--tests/test_visibility.py71
-rw-r--r--tests/unittest.py127
-rw-r--r--tox.ini10
165 files changed, 2752 insertions, 2009 deletions
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 2afddf58d1..5c29867cc8 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -27,7 +27,6 @@ jobs:
           - "check_codestyle"
           - "check_isort"
           - "mypy"
-          - "packaging"
 
     steps:
       - uses: actions/checkout@v2
@@ -377,7 +376,7 @@ jobs:
       # Run Complement
       - run: |
           set -o pipefail
-          go test -v -json -tags synapse_blacklist,msc2403,msc2716,msc3030 ./tests/... 2>&1 | gotestfmt
+          go test -v -json -tags synapse_blacklist,msc2716,msc3030 ./tests/... 2>&1 | gotestfmt
         shell: bash
         name: Run Complement Tests
         env:
diff --git a/.gitignore b/.gitignore
index 3bd6b1a08c..61bbd2dfa0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,6 +30,9 @@ __pycache__/
 /media_store/
 /uploads
 
+# For direnv users
+/.envrc
+
 # IDEs
 /.idea/
 /.ropeproject/
diff --git a/changelog.d/11881.feature b/changelog.d/11881.feature
new file mode 100644
index 0000000000..392294ffc3
--- /dev/null
+++ b/changelog.d/11881.feature
@@ -0,0 +1 @@
+Send device list changes to application services as specified by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202), using unstable prefixes. The `msc3202_transaction_extensions` experimental homeserver config option must be enabled and `org.matrix.msc3202: true` must be present in the application service registration file for device list changes to be sent. The "left" field is currently always empty.
\ No newline at end of file
diff --git a/changelog.d/12040.feature b/changelog.d/12040.feature
new file mode 100644
index 0000000000..0a88c6e80c
--- /dev/null
+++ b/changelog.d/12040.feature
@@ -0,0 +1 @@
+Optimise fetching large quantities of missing room state over federation.
diff --git a/changelog.d/12165.misc b/changelog.d/12165.misc
new file mode 100644
index 0000000000..4b80b0562e
--- /dev/null
+++ b/changelog.d/12165.misc
@@ -0,0 +1 @@
+Remove lingering unstable references to MSC2403 (knocking).
diff --git a/changelog.d/12191.misc b/changelog.d/12191.misc
new file mode 100644
index 0000000000..9f333e718a
--- /dev/null
+++ b/changelog.d/12191.misc
@@ -0,0 +1 @@
+Avoid trying to calculate the state at outlier events.
diff --git a/changelog.d/12193.misc b/changelog.d/12193.misc
new file mode 100644
index 0000000000..a721254d22
--- /dev/null
+++ b/changelog.d/12193.misc
@@ -0,0 +1 @@
+Omit sending "offline" presence updates to application services after they are initially configured.
\ No newline at end of file
diff --git a/changelog.d/12209.misc b/changelog.d/12209.misc
new file mode 100644
index 0000000000..d145b5eb04
--- /dev/null
+++ b/changelog.d/12209.misc
@@ -0,0 +1 @@
+Switch to using a sequence to generate AS transaction IDs. Contributed by Nick Beeper. If running synapse with a dedicated appservice worker, this MUST be stopped before upgrading the main process and database.
diff --git a/changelog.d/12251.feature b/changelog.d/12251.feature
new file mode 100644
index 0000000000..ba9ede03c6
--- /dev/null
+++ b/changelog.d/12251.feature
@@ -0,0 +1 @@
+Offload the `update_client_ip` background job from the main process to the background worker, when using Redis-based replication.
diff --git a/changelog.d/12267.misc b/changelog.d/12267.misc
new file mode 100644
index 0000000000..e43844d44a
--- /dev/null
+++ b/changelog.d/12267.misc
@@ -0,0 +1 @@
+Add missing type hints for storage.
diff --git a/changelog.d/12271.doc b/changelog.d/12271.doc
new file mode 100644
index 0000000000..d9696fc5d5
--- /dev/null
+++ b/changelog.d/12271.doc
@@ -0,0 +1 @@
+Clarify documentation for running SyTest against Synapse, including use of Postgres and worker mode.
\ No newline at end of file
diff --git a/changelog.d/12293.removal b/changelog.d/12293.removal
new file mode 100644
index 0000000000..25214a4b49
--- /dev/null
+++ b/changelog.d/12293.removal
@@ -0,0 +1 @@
+Remove the unused and unstable `/aggregations` endpoint which was removed from [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675).
diff --git a/changelog.d/12295.misc b/changelog.d/12295.misc
new file mode 100644
index 0000000000..9c34e16909
--- /dev/null
+++ b/changelog.d/12295.misc
@@ -0,0 +1 @@
+Move [MSC2654](https://github.com/matrix-org/matrix-doc/pull/2654) support behind an experimental configuration flag.
diff --git a/changelog.d/12302.feature b/changelog.d/12302.feature
new file mode 100644
index 0000000000..603fa2d23a
--- /dev/null
+++ b/changelog.d/12302.feature
@@ -0,0 +1 @@
+Add a module callback to react to new 3PID (email address, phone number) associations.
diff --git a/changelog.d/12310.feature b/changelog.d/12310.feature
new file mode 100644
index 0000000000..f3fbb298f7
--- /dev/null
+++ b/changelog.d/12310.feature
@@ -0,0 +1 @@
+Add a configuration option to remove a specific set of rooms from sync responses.
diff --git a/changelog.d/12315.doc b/changelog.d/12315.doc
new file mode 100644
index 0000000000..ed72f55cba
--- /dev/null
+++ b/changelog.d/12315.doc
@@ -0,0 +1 @@
+Document the behaviour of `LoggingTransaction.call_after` and `LoggingTransaction.call_on_exception` methods when transactions are retried.
diff --git a/changelog.d/12316.misc b/changelog.d/12316.misc
new file mode 100644
index 0000000000..9f333e718a
--- /dev/null
+++ b/changelog.d/12316.misc
@@ -0,0 +1 @@
+Avoid trying to calculate the state at outlier events.
diff --git a/changelog.d/12317.misc b/changelog.d/12317.misc
new file mode 100644
index 0000000000..1dfee496d8
--- /dev/null
+++ b/changelog.d/12317.misc
@@ -0,0 +1 @@
+Update docstrings to explain how to decipher live and historic pagination tokens.
diff --git a/changelog.d/12320.misc b/changelog.d/12320.misc
new file mode 100644
index 0000000000..7b4748d230
--- /dev/null
+++ b/changelog.d/12320.misc
@@ -0,0 +1 @@
+Bump the version of `black` for compatibility with the latest `click` release.
diff --git a/changelog.d/12321.misc b/changelog.d/12321.misc
new file mode 100644
index 0000000000..200e7c44fe
--- /dev/null
+++ b/changelog.d/12321.misc
@@ -0,0 +1 @@
+Add ground work for speeding up device list updates for users in large numbers of rooms.
diff --git a/changelog.d/12326.misc b/changelog.d/12326.misc
new file mode 100644
index 0000000000..2d2a00e571
--- /dev/null
+++ b/changelog.d/12326.misc
@@ -0,0 +1 @@
+Fix typechecker problems exposed by signedjson 1.1.2.
diff --git a/changelog.d/12327.feature b/changelog.d/12327.feature
new file mode 100644
index 0000000000..4fe294f1b1
--- /dev/null
+++ b/changelog.d/12327.feature
@@ -0,0 +1 @@
+Add a module callback to react to account data changes.
diff --git a/changelog.d/12329.bugfix b/changelog.d/12329.bugfix
new file mode 100644
index 0000000000..aef4117343
--- /dev/null
+++ b/changelog.d/12329.bugfix
@@ -0,0 +1 @@
+Fix non-member state events not resolving for historical events when used in [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) `/batch_send` `state_events_at_start`.
diff --git a/changelog.d/12330.misc b/changelog.d/12330.misc
new file mode 100644
index 0000000000..9f333e718a
--- /dev/null
+++ b/changelog.d/12330.misc
@@ -0,0 +1 @@
+Avoid trying to calculate the state at outlier events.
diff --git a/changelog.d/12331.doc b/changelog.d/12331.doc
new file mode 100644
index 0000000000..ec0ca3ea95
--- /dev/null
+++ b/changelog.d/12331.doc
@@ -0,0 +1 @@
+Update dead links in `check-newsfragment.sh` to point to the correct documentation URL.
diff --git a/changelog.d/12332.misc b/changelog.d/12332.misc
new file mode 100644
index 0000000000..9f333e718a
--- /dev/null
+++ b/changelog.d/12332.misc
@@ -0,0 +1 @@
+Avoid trying to calculate the state at outlier events.
diff --git a/changelog.d/12333.bugfix b/changelog.d/12333.bugfix
new file mode 100644
index 0000000000..2c073a77d5
--- /dev/null
+++ b/changelog.d/12333.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug affecting URL previews that would generate a 500 response instead of a 403 if the previewed URL includes a port that isn't allowed by the relevant blacklist.
diff --git a/changelog.d/12334.misc b/changelog.d/12334.misc
new file mode 100644
index 0000000000..10a57e23b3
--- /dev/null
+++ b/changelog.d/12334.misc
@@ -0,0 +1 @@
+Remove the `tox` packaging job: it will be redundant once #11537 lands.
diff --git a/changelog.d/12335.misc b/changelog.d/12335.misc
new file mode 100644
index 0000000000..39ea361159
--- /dev/null
+++ b/changelog.d/12335.misc
@@ -0,0 +1 @@
+Ignore `.envrc` for `direnv` users.
diff --git a/changelog.d/12336.misc b/changelog.d/12336.misc
new file mode 100644
index 0000000000..0aecd543f9
--- /dev/null
+++ b/changelog.d/12336.misc
@@ -0,0 +1 @@
+Remove the (broadly unused, dev-only) dockerfile for pg tests.
diff --git a/changelog.d/12338.misc b/changelog.d/12338.misc
new file mode 100644
index 0000000000..376089f327
--- /dev/null
+++ b/changelog.d/12338.misc
@@ -0,0 +1 @@
+Refactor relations code to remove an unnecessary class.
diff --git a/changelog.d/12341.feature b/changelog.d/12341.feature
new file mode 100644
index 0000000000..ebb96ee486
--- /dev/null
+++ b/changelog.d/12341.feature
@@ -0,0 +1 @@
+Allow setting user admin status using the module API. Contributed by Famedly.
diff --git a/changelog.d/12346.misc b/changelog.d/12346.misc
new file mode 100644
index 0000000000..6561b3be82
--- /dev/null
+++ b/changelog.d/12346.misc
@@ -0,0 +1 @@
+Remove redundant `get_success` calls in test code.
diff --git a/changelog.d/12347.misc b/changelog.d/12347.misc
new file mode 100644
index 0000000000..1f6f584e6d
--- /dev/null
+++ b/changelog.d/12347.misc
@@ -0,0 +1 @@
+Add type annotations for `tests/unittest.py`.
diff --git a/changelog.d/12348.misc b/changelog.d/12348.misc
new file mode 100644
index 0000000000..2bfeadb7f8
--- /dev/null
+++ b/changelog.d/12348.misc
@@ -0,0 +1 @@
+Move single-use methods out of `TestCase`.
diff --git a/changelog.d/12349.misc b/changelog.d/12349.misc
new file mode 100644
index 0000000000..b2d83a7f78
--- /dev/null
+++ b/changelog.d/12349.misc
@@ -0,0 +1 @@
+Remove broken and unused development scripts.
diff --git a/changelog.d/12350.bugfix b/changelog.d/12350.bugfix
new file mode 100644
index 0000000000..9cbdc28038
--- /dev/null
+++ b/changelog.d/12350.bugfix
@@ -0,0 +1 @@
+Default to `private` room visibility rather than `public` when a client does not specify one, according to spec.
\ No newline at end of file
diff --git a/changelog.d/12351.misc b/changelog.d/12351.misc
new file mode 100644
index 0000000000..b1dd1e2696
--- /dev/null
+++ b/changelog.d/12351.misc
@@ -0,0 +1 @@
+Remove broken and unused development scripts.
\ No newline at end of file
diff --git a/changelog.d/12355.misc b/changelog.d/12355.misc
new file mode 100644
index 0000000000..b1dd1e2696
--- /dev/null
+++ b/changelog.d/12355.misc
@@ -0,0 +1 @@
+Remove broken and unused development scripts.
\ No newline at end of file
diff --git a/changelog.d/12367.feature b/changelog.d/12367.feature
new file mode 100644
index 0000000000..34bb60e966
--- /dev/null
+++ b/changelog.d/12367.feature
@@ -0,0 +1 @@
+Reduce overhead of restarting synchrotrons.
diff --git a/changelog.d/12369.doc b/changelog.d/12369.doc
new file mode 100644
index 0000000000..c34271b2ce
--- /dev/null
+++ b/changelog.d/12369.doc
@@ -0,0 +1 @@
+Update the link to Redis pub/sub documentation in the workers documentation..
\ No newline at end of file
diff --git a/contrib/jitsimeetbridge/jitsimeetbridge.py b/contrib/jitsimeetbridge/jitsimeetbridge.py
index 495fd4e10a..b3de468687 100644
--- a/contrib/jitsimeetbridge/jitsimeetbridge.py
+++ b/contrib/jitsimeetbridge/jitsimeetbridge.py
@@ -193,12 +193,15 @@ class TrivialXmppClient:
         time.sleep(7)
         print("SSRC spammer started")
         while self.running:
-            ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % {
-                "tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
-                "nick": self.userId,
-                "assrc": self.ssrcs["audio"],
-                "vssrc": self.ssrcs["video"],
-            }
+            ssrcMsg = (
+                "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
+                % {
+                    "tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
+                    "nick": self.userId,
+                    "assrc": self.ssrcs["audio"],
+                    "vssrc": self.ssrcs["video"],
+                }
+            )
             res = self.sendIq(ssrcMsg)
             print("reply from ssrc announce: ", res)
             time.sleep(10)
diff --git a/docker/Dockerfile-pgtests b/docker/Dockerfile-pgtests
deleted file mode 100644
index b94484ea7f..0000000000
--- a/docker/Dockerfile-pgtests
+++ /dev/null
@@ -1,30 +0,0 @@
-# Use the Sytest image that comes with a lot of the build dependencies
-# pre-installed
-FROM matrixdotorg/sytest:focal
-
-# The Sytest image doesn't come with python, so install that
-RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
-
-# We need tox to run the tests in run_pg_tests.sh
-RUN python3 -m pip install tox
-
-# Initialise the db
-RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres
-
-# Add a user with our UID and GID so that files get created on the host owned
-# by us, not root.
-ARG UID
-ARG GID
-RUN groupadd --gid $GID user
-RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user
-
-# Ensure we can start postgres by sudo-ing as the postgres user.
-RUN apt-get update && apt-get -qq install -y sudo
-RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-
-ADD run_pg_tests.sh /run_pg_tests.sh
-# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
-# so that we can `docker run` this container and pass arguments to pg_tests.sh
-ENTRYPOINT ["/run_pg_tests.sh"]
-
-USER user
diff --git a/docker/README-testing.md b/docker/README-testing.md
index 6a5baf9e28..b010509275 100644
--- a/docker/README-testing.md
+++ b/docker/README-testing.md
@@ -78,7 +78,7 @@ the root of your Complement checkout and run:
 docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles
 ```
 
-This will build an image with the tag `complement-synapse`, which can be handed to
+This will build an image with the tag `complement-synapse-workers`, which can be handed to
 Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
 [Complement's documentation](https://github.com/matrix-org/complement/#running) for
 how to run the tests, as well as the various available command line flags.
diff --git a/docker/run_pg_tests.sh b/docker/run_pg_tests.sh
deleted file mode 100755
index b22b6ef16b..0000000000
--- a/docker/run_pg_tests.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env bash
-
-# This script runs the PostgreSQL tests inside a Docker container. It expects
-# the relevant source files to be mounted into /src (done automatically by the
-# caller script). It will set up the database, run it, and then use the tox
-# configuration to run the tests.
-
-set -e
-
-# Set PGUSER so Synapse's tests know what user to connect to the database with
-export PGUSER=postgres
-
-# Start the database
-sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/data start
-
-# Run the tests
-cd /src
-export TRIAL_FLAGS="-j 4"
-tox --workdir=./.tox-pg-container -e py37-postgres "$@"
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 21f80efc99..6aa48e1919 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -45,6 +45,7 @@
         - [Account validity callbacks](modules/account_validity_callbacks.md)
         - [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
         - [Background update controller callbacks](modules/background_update_controller_callbacks.md)
+        - [Account data callbacks](modules/account_data_callbacks.md)
         - [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
     - [Workers](workers.md)
       - [Using `synctl` with Workers](synctl_workers.md)
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 071202e196..9db9352c9e 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -220,27 +220,6 @@ export SYNAPSE_POSTGRES_PASSWORD=mydevenvpassword
 trial
 ```
 
-#### Prebuilt container
-
-Since configuring PostgreSQL can be fiddly, we can make use of a pre-made
-Docker container to set up PostgreSQL and run our tests for us. To do so, run
-
-```shell
-scripts-dev/test_postgresql.sh
-```
-
-Any extra arguments to the script will be passed to `tox` and then to `trial`,
-so we can run a specific test in this container with e.g.
-
-```shell
-scripts-dev/test_postgresql.sh tests.replication.test_sharded_event_persister.EventPersisterShardTestCase
-```
-
-The container creates a folder in your Synapse checkout called
-`.tox-pg-container` and uses this as a tox environment. The output of any
-`trial` runs goes into `_trial_temp` in your synapse source directory — the same
-as running `trial` directly on your host machine.
-
 ## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
 
 The integration tests are a more comprehensive suite of tests. They
@@ -254,8 +233,14 @@ configuration:
 ```sh
 $ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
 ```
+(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
+
+This configuration should generally cover your needs.
+
+- To run with Postgres, supply the `-e POSTGRES=1 -e MULTI_POSTGRES=1` environment flags.
+- To run with Synapse in worker mode, supply the `-e WORKERS=1 -e REDIS=1` environment flags (in addition to the Postgres flags).
 
-This configuration should generally cover  your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
+For more details about other configurations, see the [Docker-specific documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
 
 
 ## Run the integration tests ([Complement](https://github.com/matrix-org/complement)).
diff --git a/docs/modules/account_data_callbacks.md b/docs/modules/account_data_callbacks.md
new file mode 100644
index 0000000000..25de911627
--- /dev/null
+++ b/docs/modules/account_data_callbacks.md
@@ -0,0 +1,106 @@
+# Account data callbacks
+
+Account data callbacks allow module developers to react to changes of the account data
+of local users. Account data callbacks can be registered using the module API's
+`register_account_data_callbacks` method.
+
+## Callbacks
+
+The available account data callbacks are:
+
+### `on_account_data_updated`
+
+_First introduced in Synapse v1.57.0_
+
+```python
+async def on_account_data_updated(
+    user_id: str,
+    room_id: Optional[str],
+    account_data_type: str,
+    content: "synapse.module_api.JsonDict",
+) -> None:
+```
+
+Called after user's account data has been updated. The module is given the
+Matrix ID of the user whose account data is changing, the room ID the data is associated
+with, the type associated with the change, as well as the new content. If the account
+data is not associated with a specific room, then the room ID is `None`.
+
+This callback is triggered when new account data is added or when the data associated with
+a given type (and optionally room) changes. This includes deletion, since in Matrix,
+deleting account data consists of replacing the data associated with a given type
+(and optionally room) with an empty dictionary (`{}`).
+
+Note that this doesn't trigger when changing the tags associated with a room, as these are
+processed separately by Synapse.
+
+If multiple modules implement this callback, Synapse runs them all in order.
+
+## Example
+
+The example below is a module that implements the `on_account_data_updated` callback, and
+sends an event to an audit room when a user changes their account data.
+
+```python
+import json
+import attr
+from typing import Any, Dict, Optional
+
+from synapse.module_api import JsonDict, ModuleApi
+from synapse.module_api.errors import ConfigError
+
+
+@attr.s(auto_attribs=True)
+class CustomAccountDataConfig:
+    audit_room: str
+    sender: str
+
+
+class CustomAccountDataModule:
+    def __init__(self, config: CustomAccountDataConfig, api: ModuleApi):
+        self.api = api
+        self.config = config
+
+        self.api.register_account_data_callbacks(
+            on_account_data_updated=self.log_new_account_data,
+        )
+
+    @staticmethod
+    def parse_config(config: Dict[str, Any]) -> CustomAccountDataConfig:
+        def check_in_config(param: str):
+            if param not in config:
+                raise ConfigError(f"'{param}' is required")
+
+        check_in_config("audit_room")
+        check_in_config("sender")
+
+        return CustomAccountDataConfig(
+            audit_room=config["audit_room"],
+            sender=config["sender"],
+        )
+
+    async def log_new_account_data(
+        self,
+        user_id: str,
+        room_id: Optional[str],
+        account_data_type: str,
+        content: JsonDict,
+    ) -> None:
+        content_raw = json.dumps(content)
+        msg_content = f"{user_id} has changed their account data for type {account_data_type} to: {content_raw}"
+
+        if room_id is not None:
+            msg_content += f" (in room {room_id})"
+
+        await self.api.create_and_send_event_into_room(
+            {
+                "room_id": self.config.audit_room,
+                "sender": self.config.sender,
+                "type": "m.room.message",
+                "content": {
+                    "msgtype": "m.text",
+                    "body": msg_content
+                }
+            }
+        )
+```
diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md
index 1d3c39967f..e1a5b6524f 100644
--- a/docs/modules/third_party_rules_callbacks.md
+++ b/docs/modules/third_party_rules_callbacks.md
@@ -247,6 +247,24 @@ admin API.
 
 If multiple modules implement this callback, Synapse runs them all in order.
 
+### `on_threepid_bind`
+
+_First introduced in Synapse v1.56.0_
+
+```python
+async def on_threepid_bind(user_id: str, medium: str, address: str) -> None:
+```
+
+Called after creating an association between a local user and a third-party identifier
+(email address, phone number). The module is given the Matrix ID of the user the
+association is for, as well as the medium (`email` or `msisdn`) and address of the
+third-party identifier.
+
+Note that this callback is _not_ called after a successful association on an _identity
+server_.
+
+If multiple modules implement this callback, Synapse runs them all in order.
+
 ## Example
 
 The example below is a module that implements the third-party rules callback
diff --git a/docs/modules/writing_a_module.md b/docs/modules/writing_a_module.md
index e7c0ffad58..e6303b739e 100644
--- a/docs/modules/writing_a_module.md
+++ b/docs/modules/writing_a_module.md
@@ -33,7 +33,7 @@ A module can implement the following static method:
 
 ```python
 @staticmethod
-def parse_config(config: dict) -> dict
+def parse_config(config: dict) -> Any
 ```
 
 This method is given a dictionary resulting from parsing the YAML configuration for the
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index a21b48ab2e..b8d8c0dbf0 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -539,6 +539,15 @@ templates:
   #
   #custom_template_directory: /path/to/custom/templates/
 
+# List of rooms to exclude from sync responses. This is useful for server
+# administrators wishing to group users into a room without these users being able
+# to see it from their client.
+#
+# By default, no room is excluded.
+#
+#exclude_rooms_from_sync:
+#    - !foo:example.com
+
 
 # Message retention policy at the server level.
 #
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 062e823333..f6d226526a 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -85,6 +85,19 @@ process, for example:
     dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
     ```
 
+# Upgrading to v1.57.0
+
+## Changes to database schema for application services
+
+Synapse v1.57.0 includes a [change](https://github.com/matrix-org/synapse/pull/12209) to the
+way transaction IDs are managed for application services. If your deployment uses a dedicated
+worker for application service traffic, **it must be stopped** when the database is upgraded
+(which normally happens when the main process is upgraded), to ensure the change is made safely
+without any risk of reusing transaction IDs.
+
+Deployments which do not use separate worker processes can be upgraded as normal. Similarly,
+deployments where no applciation services are in use can be upgraded as normal.
+
 # Upgrading to v1.56.0
 
 ## Groups/communities feature has been deprecated
diff --git a/docs/workers.md b/docs/workers.md
index 8ac95e39bb..caef44b614 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -27,7 +27,7 @@ feeds streams of newly written data between processes so they can be kept in
 sync with the database state.
 
 When configured to do so, Synapse uses a
-[Redis pub/sub channel](https://redis.io/topics/pubsub) to send the replication
+[Redis pub/sub channel](https://redis.io/docs/manual/pubsub/) to send the replication
 stream between all configured Synapse processes. Additionally, processes may
 make HTTP requests to each other, primarily for operations which need to wait
 for a reply ─ such as sending an event.
diff --git a/mypy.ini b/mypy.ini
index 4781ce56f2..ac2a5c753d 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -24,13 +24,8 @@ files =
 exclude = (?x)
   ^(
    |scripts-dev/build_debian_packages.py
-   |scripts-dev/check_signature.py
-   |scripts-dev/definitions.py
    |scripts-dev/federation_client.py
-   |scripts-dev/hash_history.py
-   |scripts-dev/list_url_patterns.py
    |scripts-dev/release.py
-   |scripts-dev/tail-synapse.py
 
    |synapse/_scripts/export_signing_key.py
    |synapse/_scripts/move_remote_media_to_new_store.py
@@ -43,7 +38,6 @@ exclude = (?x)
    |synapse/storage/databases/main/event_federation.py
    |synapse/storage/databases/main/push_rule.py
    |synapse/storage/databases/main/roommember.py
-   |synapse/storage/databases/main/state.py
    |synapse/storage/schema/
 
    |tests/api/test_auth.py
@@ -86,7 +80,6 @@ exclude = (?x)
    |tests/test_server.py
    |tests/test_state.py
    |tests/test_terms_auth.py
-   |tests/unittest.py
    |tests/util/caches/test_cached_call.py
    |tests/util/caches/test_deferred_cache.py
    |tests/util/caches/test_descriptors.py
@@ -273,6 +266,9 @@ ignore_missing_imports = True
 [mypy-ijson.*]
 ignore_missing_imports = True
 
+[mypy-importlib_metadata.*]
+ignore_missing_imports = True
+
 [mypy-jaeger_client.*]
 ignore_missing_imports = True
 
diff --git a/scripts-dev/check-newsfragment.sh b/scripts-dev/check-newsfragment.sh
index 493558ad65..effea0929c 100755
--- a/scripts-dev/check-newsfragment.sh
+++ b/scripts-dev/check-newsfragment.sh
@@ -19,7 +19,7 @@ if ! git diff --quiet FETCH_HEAD... -- debian; then
     if git diff --quiet FETCH_HEAD... -- debian/changelog; then
         echo "Updates to debian directory, but no update to the changelog." >&2
         echo "!! Please see the contributing guide for help writing your changelog entry:" >&2
-        echo "https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#debian-changelog" >&2
+        echo "https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#debian-changelog" >&2
         exit 1
     fi
 fi
@@ -32,7 +32,7 @@ fi
 
 # Print a link to the contributing guide if the user makes a mistake
 CONTRIBUTING_GUIDE_TEXT="!! Please see the contributing guide for help writing your changelog entry:
-https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#changelog"
+https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#changelog"
 
 # If check-newsfragment returns a non-zero exit code, print the contributing guide and exit
 python -m towncrier.check --compare-with=origin/develop || (echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 && exit 1)
diff --git a/scripts-dev/check_signature.py b/scripts-dev/check_signature.py
deleted file mode 100644
index 6755bc5282..0000000000
--- a/scripts-dev/check_signature.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import argparse
-import json
-import logging
-import sys
-
-import dns.resolver
-import urllib2
-from signedjson.key import decode_verify_key_bytes, write_signing_keys
-from signedjson.sign import verify_signed_json
-from unpaddedbase64 import decode_base64
-
-
-def get_targets(server_name):
-    if ":" in server_name:
-        target, port = server_name.split(":")
-        yield (target, int(port))
-        return
-    try:
-        answers = dns.resolver.query("_matrix._tcp." + server_name, "SRV")
-        for srv in answers:
-            yield (srv.target, srv.port)
-    except dns.resolver.NXDOMAIN:
-        yield (server_name, 8448)
-
-
-def get_server_keys(server_name, target, port):
-    url = "https://%s:%i/_matrix/key/v1" % (target, port)
-    keys = json.load(urllib2.urlopen(url))
-    verify_keys = {}
-    for key_id, key_base64 in keys["verify_keys"].items():
-        verify_key = decode_verify_key_bytes(key_id, decode_base64(key_base64))
-        verify_signed_json(keys, server_name, verify_key)
-        verify_keys[key_id] = verify_key
-    return verify_keys
-
-
-def main():
-
-    parser = argparse.ArgumentParser()
-    parser.add_argument("signature_name")
-    parser.add_argument(
-        "input_json", nargs="?", type=argparse.FileType("r"), default=sys.stdin
-    )
-
-    args = parser.parse_args()
-    logging.basicConfig()
-
-    server_name = args.signature_name
-    keys = {}
-    for target, port in get_targets(server_name):
-        try:
-            keys = get_server_keys(server_name, target, port)
-            print("Using keys from https://%s:%s/_matrix/key/v1" % (target, port))
-            write_signing_keys(sys.stdout, keys.values())
-            break
-        except Exception:
-            logging.exception("Error talking to %s:%s", target, port)
-
-    json_to_check = json.load(args.input_json)
-    print("Checking JSON:")
-    for key_id in json_to_check["signatures"][args.signature_name]:
-        try:
-            key = keys[key_id]
-            verify_signed_json(json_to_check, args.signature_name, key)
-            print("PASS %s" % (key_id,))
-        except Exception:
-            logging.exception("Check for key %s failed" % (key_id,))
-            print("FAIL %s" % (key_id,))
-
-
-if __name__ == "__main__":
-    main()
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index 0a79a4063f..d1b59ff040 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -71,4 +71,4 @@ fi
 
 # Run the tests!
 echo "Images built; running complement"
-go test -v -tags synapse_blacklist,msc2403,msc2716,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
+go test -v -tags synapse_blacklist,msc2716,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py
deleted file mode 100755
index c82ddd9677..0000000000
--- a/scripts-dev/definitions.py
+++ /dev/null
@@ -1,208 +0,0 @@
-#! /usr/bin/python
-
-import argparse
-import ast
-import os
-import re
-import sys
-
-import yaml
-
-
-class DefinitionVisitor(ast.NodeVisitor):
-    def __init__(self):
-        super().__init__()
-        self.functions = {}
-        self.classes = {}
-        self.names = {}
-        self.attrs = set()
-        self.definitions = {
-            "def": self.functions,
-            "class": self.classes,
-            "names": self.names,
-            "attrs": self.attrs,
-        }
-
-    def visit_Name(self, node):
-        self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
-
-    def visit_Attribute(self, node):
-        self.attrs.add(node.attr)
-        for child in ast.iter_child_nodes(node):
-            self.visit(child)
-
-    def visit_ClassDef(self, node):
-        visitor = DefinitionVisitor()
-        self.classes[node.name] = visitor.definitions
-        for child in ast.iter_child_nodes(node):
-            visitor.visit(child)
-
-    def visit_FunctionDef(self, node):
-        visitor = DefinitionVisitor()
-        self.functions[node.name] = visitor.definitions
-        for child in ast.iter_child_nodes(node):
-            visitor.visit(child)
-
-
-def non_empty(defs):
-    functions = {name: non_empty(f) for name, f in defs["def"].items()}
-    classes = {name: non_empty(f) for name, f in defs["class"].items()}
-    result = {}
-    if functions:
-        result["def"] = functions
-    if classes:
-        result["class"] = classes
-    names = defs["names"]
-    uses = []
-    for name in names.get("Load", ()):
-        if name not in names.get("Param", ()) and name not in names.get("Store", ()):
-            uses.append(name)
-    uses.extend(defs["attrs"])
-    if uses:
-        result["uses"] = uses
-    result["names"] = names
-    result["attrs"] = defs["attrs"]
-    return result
-
-
-def definitions_in_code(input_code):
-    input_ast = ast.parse(input_code)
-    visitor = DefinitionVisitor()
-    visitor.visit(input_ast)
-    definitions = non_empty(visitor.definitions)
-    return definitions
-
-
-def definitions_in_file(filepath):
-    with open(filepath) as f:
-        return definitions_in_code(f.read())
-
-
-def defined_names(prefix, defs, names):
-    for name, funcs in defs.get("def", {}).items():
-        names.setdefault(name, {"defined": []})["defined"].append(prefix + name)
-        defined_names(prefix + name + ".", funcs, names)
-
-    for name, funcs in defs.get("class", {}).items():
-        names.setdefault(name, {"defined": []})["defined"].append(prefix + name)
-        defined_names(prefix + name + ".", funcs, names)
-
-
-def used_names(prefix, item, defs, names):
-    for name, funcs in defs.get("def", {}).items():
-        used_names(prefix + name + ".", name, funcs, names)
-
-    for name, funcs in defs.get("class", {}).items():
-        used_names(prefix + name + ".", name, funcs, names)
-
-    path = prefix.rstrip(".")
-    for used in defs.get("uses", ()):
-        if used in names:
-            if item:
-                names[item].setdefault("uses", []).append(used)
-            names[used].setdefault("used", {}).setdefault(item, []).append(path)
-
-
-if __name__ == "__main__":
-
-    parser = argparse.ArgumentParser(description="Find definitions.")
-    parser.add_argument(
-        "--unused", action="store_true", help="Only list unused definitions"
-    )
-    parser.add_argument(
-        "--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
-    )
-    parser.add_argument(
-        "--pattern", action="append", metavar="REGEXP", help="Search for a pattern"
-    )
-    parser.add_argument(
-        "directories",
-        nargs="+",
-        metavar="DIR",
-        help="Directories to search for definitions",
-    )
-    parser.add_argument(
-        "--referrers",
-        default=0,
-        type=int,
-        help="Include referrers up to the given depth",
-    )
-    parser.add_argument(
-        "--referred",
-        default=0,
-        type=int,
-        help="Include referred down to the given depth",
-    )
-    parser.add_argument(
-        "--format", default="yaml", help="Output format, one of 'yaml' or 'dot'"
-    )
-    args = parser.parse_args()
-
-    definitions = {}
-    for directory in args.directories:
-        for root, _, files in os.walk(directory):
-            for filename in files:
-                if filename.endswith(".py"):
-                    filepath = os.path.join(root, filename)
-                    definitions[filepath] = definitions_in_file(filepath)
-
-    names = {}
-    for filepath, defs in definitions.items():
-        defined_names(filepath + ":", defs, names)
-
-    for filepath, defs in definitions.items():
-        used_names(filepath + ":", None, defs, names)
-
-    patterns = [re.compile(pattern) for pattern in args.pattern or ()]
-    ignore = [re.compile(pattern) for pattern in args.ignore or ()]
-
-    result = {}
-    for name, definition in names.items():
-        if patterns and not any(pattern.match(name) for pattern in patterns):
-            continue
-        if ignore and any(pattern.match(name) for pattern in ignore):
-            continue
-        if args.unused and definition.get("used"):
-            continue
-        result[name] = definition
-
-    referrer_depth = args.referrers
-    referrers = set()
-    while referrer_depth:
-        referrer_depth -= 1
-        for entry in result.values():
-            for used_by in entry.get("used", ()):
-                referrers.add(used_by)
-        for name, definition in names.items():
-            if name not in referrers:
-                continue
-            if ignore and any(pattern.match(name) for pattern in ignore):
-                continue
-            result[name] = definition
-
-    referred_depth = args.referred
-    referred = set()
-    while referred_depth:
-        referred_depth -= 1
-        for entry in result.values():
-            for uses in entry.get("uses", ()):
-                referred.add(uses)
-        for name, definition in names.items():
-            if name not in referred:
-                continue
-            if ignore and any(pattern.match(name) for pattern in ignore):
-                continue
-            result[name] = definition
-
-    if args.format == "yaml":
-        yaml.dump(result, sys.stdout, default_flow_style=False)
-    elif args.format == "dot":
-        print("digraph {")
-        for name, entry in result.items():
-            print(name)
-            for used_by in entry.get("used", ()):
-                if used_by in result:
-                    print(used_by, "->", name)
-        print("}")
-    else:
-        raise ValueError("Unknown format %r" % (args.format))
diff --git a/scripts-dev/hash_history.py b/scripts-dev/hash_history.py
deleted file mode 100644
index 8d6c3d24db..0000000000
--- a/scripts-dev/hash_history.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import sqlite3
-import sys
-
-from unpaddedbase64 import decode_base64, encode_base64
-
-from synapse.crypto.event_signing import (
-    add_event_pdu_content_hash,
-    compute_pdu_event_reference_hash,
-)
-from synapse.federation.units import Pdu
-from synapse.storage._base import SQLBaseStore
-from synapse.storage.pdu import PduStore
-from synapse.storage.signatures import SignatureStore
-
-
-class Store:
-    _get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"]
-    _get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"]
-    _get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"]
-    _get_pdu_origin_signatures_txn = SignatureStore.__dict__[
-        "_get_pdu_origin_signatures_txn"
-    ]
-    _store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"]
-    _store_pdu_reference_hash_txn = SignatureStore.__dict__[
-        "_store_pdu_reference_hash_txn"
-    ]
-    _store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
-    simple_insert_txn = SQLBaseStore.__dict__["simple_insert_txn"]
-
-
-store = Store()
-
-
-def select_pdus(cursor):
-    cursor.execute("SELECT pdu_id, origin FROM pdus ORDER BY depth ASC")
-
-    ids = cursor.fetchall()
-
-    pdu_tuples = store._get_pdu_tuples(cursor, ids)
-
-    pdus = [Pdu.from_pdu_tuple(p) for p in pdu_tuples]
-
-    reference_hashes = {}
-
-    for pdu in pdus:
-        try:
-            if pdu.prev_pdus:
-                print("PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
-                for pdu_id, origin, hashes in pdu.prev_pdus:
-                    ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)]
-                    hashes[ref_alg] = encode_base64(ref_hsh)
-                    store._store_prev_pdu_hash_txn(
-                        cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh
-                    )
-                print("SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
-            pdu = add_event_pdu_content_hash(pdu)
-            ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu)
-            reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh)
-            store._store_pdu_reference_hash_txn(
-                cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh
-            )
-
-            for alg, hsh_base64 in pdu.hashes.items():
-                print(alg, hsh_base64)
-                store._store_pdu_content_hash_txn(
-                    cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64)
-                )
-
-        except Exception:
-            print("FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
-
-
-def main():
-    conn = sqlite3.connect(sys.argv[1])
-    cursor = conn.cursor()
-    select_pdus(cursor)
-    conn.commit()
-
-
-if __name__ == "__main__":
-    main()
diff --git a/scripts-dev/list_url_patterns.py b/scripts-dev/list_url_patterns.py
deleted file mode 100755
index e85420dea8..0000000000
--- a/scripts-dev/list_url_patterns.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#! /usr/bin/python
-
-import argparse
-import ast
-import os
-import sys
-
-import yaml
-
-PATTERNS_V1 = []
-PATTERNS_V2 = []
-
-RESULT = {"v1": PATTERNS_V1, "v2": PATTERNS_V2}
-
-
-class CallVisitor(ast.NodeVisitor):
-    def visit_Call(self, node):
-        if isinstance(node.func, ast.Name):
-            name = node.func.id
-        else:
-            return
-
-        if name == "client_patterns":
-            PATTERNS_V2.append(node.args[0].s)
-
-
-def find_patterns_in_code(input_code):
-    input_ast = ast.parse(input_code)
-    visitor = CallVisitor()
-    visitor.visit(input_ast)
-
-
-def find_patterns_in_file(filepath):
-    with open(filepath) as f:
-        find_patterns_in_code(f.read())
-
-
-parser = argparse.ArgumentParser(description="Find url patterns.")
-
-parser.add_argument(
-    "directories",
-    nargs="+",
-    metavar="DIR",
-    help="Directories to search for definitions",
-)
-
-args = parser.parse_args()
-
-
-for directory in args.directories:
-    for root, _, files in os.walk(directory):
-        for filename in files:
-            if filename.endswith(".py"):
-                filepath = os.path.join(root, filename)
-                find_patterns_in_file(filepath)
-
-PATTERNS_V1.sort()
-PATTERNS_V2.sort()
-
-yaml.dump(RESULT, sys.stdout, default_flow_style=False)
diff --git a/scripts-dev/tail-synapse.py b/scripts-dev/tail-synapse.py
deleted file mode 100644
index 44e3a6dbf1..0000000000
--- a/scripts-dev/tail-synapse.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import collections
-import json
-import sys
-import time
-
-import requests
-
-Entry = collections.namedtuple("Entry", "name position rows")
-
-ROW_TYPES = {}
-
-
-def row_type_for_columns(name, column_names):
-    column_names = tuple(column_names)
-    row_type = ROW_TYPES.get((name, column_names))
-    if row_type is None:
-        row_type = collections.namedtuple(name, column_names)
-        ROW_TYPES[(name, column_names)] = row_type
-    return row_type
-
-
-def parse_response(content):
-    streams = json.loads(content)
-    result = {}
-    for name, value in streams.items():
-        row_type = row_type_for_columns(name, value["field_names"])
-        position = value["position"]
-        rows = [row_type(*row) for row in value["rows"]]
-        result[name] = Entry(name, position, rows)
-    return result
-
-
-def replicate(server, streams):
-    return parse_response(
-        requests.get(
-            server + "/_synapse/replication", verify=False, params=streams
-        ).content
-    )
-
-
-def main():
-    server = sys.argv[1]
-
-    streams = None
-    while not streams:
-        try:
-            streams = {
-                row.name: row.position
-                for row in replicate(server, {"streams": "-1"})["streams"].rows
-            }
-        except requests.exceptions.ConnectionError:
-            time.sleep(0.1)
-
-    while True:
-        try:
-            results = replicate(server, streams)
-        except Exception:
-            sys.stdout.write("connection_lost(" + repr(streams) + ")\n")
-            break
-        for update in results.values():
-            for row in update.rows:
-                sys.stdout.write(repr(row) + "\n")
-            streams[update.name] = update.position
-
-
-if __name__ == "__main__":
-    main()
diff --git a/scripts-dev/test_postgresql.sh b/scripts-dev/test_postgresql.sh
deleted file mode 100755
index 43cfa256e4..0000000000
--- a/scripts-dev/test_postgresql.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env bash
-
-# This script builds the Docker image to run the PostgreSQL tests, and then runs
-# the tests. It uses a dedicated tox environment so that we don't have to
-# rebuild it each time.
-
-# Command line arguments to this script are forwarded to "tox" and then to "trial".
-
-set -e
-
-# Build, and tag
-docker build docker/ \
-  --build-arg "UID=$(id -u)" \
-  --build-arg "GID=$(id -g)" \
-  -f docker/Dockerfile-pgtests \
-  -t synapsepgtests
-
-# Run, mounting the current directory into /src
-docker run --rm -it -v "$(pwd):/src" -v synapse-pg-test-tox:/tox synapsepgtests "$@"
diff --git a/setup.py b/setup.py
index 63da71ad7b..48bd418bb8 100755
--- a/setup.py
+++ b/setup.py
@@ -95,7 +95,7 @@ CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
 # We pin black so that our tests don't start failing on new releases.
 CONDITIONAL_REQUIREMENTS["lint"] = [
     "isort==5.7.0",
-    "black==21.12b0",
+    "black==22.3.0",
     "flake8-comprehensions",
     "flake8-bugbear==21.3.2",
     "flake8",
@@ -128,7 +128,7 @@ CONDITIONAL_REQUIREMENTS["dev"] = (
     + CONDITIONAL_REQUIREMENTS["test"]
     + [
         # The following are used by the release script
-        "click==7.1.2",
+        "click==8.1.0",
         "redbaron==0.9.2",
         "GitPython==3.1.14",
         "commonmark==0.9.1",
diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index c38666da18..6324df883b 100755
--- a/synapse/_scripts/synapse_port_db.py
+++ b/synapse/_scripts/synapse_port_db.py
@@ -97,6 +97,7 @@ BOOLEAN_COLUMNS = {
     "users": ["shadow_banned"],
     "e2e_fallback_keys_json": ["used"],
     "access_tokens": ["used"],
+    "device_lists_changes_in_room": ["converted_to_destinations"],
 }
 
 
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index b0c08a074d..92907415e6 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -23,7 +23,7 @@ from typing_extensions import Final
 MAX_PDU_SIZE = 65536
 
 # the "depth" field on events is limited to 2**63 - 1
-MAX_DEPTH = 2 ** 63 - 1
+MAX_DEPTH = 2**63 - 1
 
 # the maximum length for a room alias is 255 characters
 MAX_ALIAS_LENGTH = 255
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 6f8e33a156..2b0d92cbae 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -33,7 +33,6 @@ from synapse.handlers.admin import ExfiltrationWriter
 from synapse.replication.slave.storage._base import BaseSlavedStore
 from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
 from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
-from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
 from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
 from synapse.replication.slave.storage.devices import SlavedDeviceStore
 from synapse.replication.slave.storage.events import SlavedEventStore
@@ -61,7 +60,6 @@ class AdminCmdSlavedStore(
     SlavedDeviceStore,
     SlavedPushRuleStore,
     SlavedEventStore,
-    SlavedClientIpStore,
     BaseSlavedStore,
     RoomWorkerStore,
 ):
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index b6f510ed30..1865c671f4 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -53,7 +53,6 @@ from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
 from synapse.replication.slave.storage._base import BaseSlavedStore
 from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
 from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
-from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
 from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
 from synapse.replication.slave.storage.devices import SlavedDeviceStore
 from synapse.replication.slave.storage.directory import DirectoryStore
@@ -247,7 +246,6 @@ class GenericWorkerSlavedStore(
     SlavedApplicationServiceStore,
     SlavedRegistrationStore,
     SlavedProfileStore,
-    SlavedClientIpStore,
     SlavedFilteringStore,
     MonthlyActiveUsersWorkerStore,
     MediaRepositoryStore,
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 07ec95f1d6..d23d9221bc 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -1,4 +1,5 @@
 # Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2022 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +23,13 @@ from netaddr import IPSet
 
 from synapse.api.constants import EventTypes
 from synapse.events import EventBase
-from synapse.types import GroupID, JsonDict, UserID, get_domain_from_id
+from synapse.types import (
+    DeviceListUpdates,
+    GroupID,
+    JsonDict,
+    UserID,
+    get_domain_from_id,
+)
 from synapse.util.caches.descriptors import _CacheContext, cached
 
 if TYPE_CHECKING:
@@ -400,6 +407,7 @@ class AppServiceTransaction:
         to_device_messages: List[JsonDict],
         one_time_key_counts: TransactionOneTimeKeyCounts,
         unused_fallback_keys: TransactionUnusedFallbackKeys,
+        device_list_summary: DeviceListUpdates,
     ):
         self.service = service
         self.id = id
@@ -408,6 +416,7 @@ class AppServiceTransaction:
         self.to_device_messages = to_device_messages
         self.one_time_key_counts = one_time_key_counts
         self.unused_fallback_keys = unused_fallback_keys
+        self.device_list_summary = device_list_summary
 
     async def send(self, as_api: "ApplicationServiceApi") -> bool:
         """Sends this transaction using the provided AS API interface.
@@ -424,6 +433,7 @@ class AppServiceTransaction:
             to_device_messages=self.to_device_messages,
             one_time_key_counts=self.one_time_key_counts,
             unused_fallback_keys=self.unused_fallback_keys,
+            device_list_summary=self.device_list_summary,
             txn_id=self.id,
         )
 
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index 98fe354014..0cdbb04bfb 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -1,4 +1,5 @@
 # Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2022 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -27,7 +28,7 @@ from synapse.appservice import (
 from synapse.events import EventBase
 from synapse.events.utils import SerializeEventConfig, serialize_event
 from synapse.http.client import SimpleHttpClient
-from synapse.types import JsonDict, ThirdPartyInstanceID
+from synapse.types import DeviceListUpdates, JsonDict, ThirdPartyInstanceID
 from synapse.util.caches.response_cache import ResponseCache
 
 if TYPE_CHECKING:
@@ -225,6 +226,7 @@ class ApplicationServiceApi(SimpleHttpClient):
         to_device_messages: List[JsonDict],
         one_time_key_counts: TransactionOneTimeKeyCounts,
         unused_fallback_keys: TransactionUnusedFallbackKeys,
+        device_list_summary: DeviceListUpdates,
         txn_id: Optional[int] = None,
     ) -> bool:
         """
@@ -268,6 +270,7 @@ class ApplicationServiceApi(SimpleHttpClient):
                 }
             )
 
+        # TODO: Update to stable prefixes once MSC3202 completes FCP merge
         if service.msc3202_transaction_extensions:
             if one_time_key_counts:
                 body[
@@ -277,6 +280,11 @@ class ApplicationServiceApi(SimpleHttpClient):
                 body[
                     "org.matrix.msc3202.device_unused_fallback_keys"
                 ] = unused_fallback_keys
+            if device_list_summary:
+                body["org.matrix.msc3202.device_lists"] = {
+                    "changed": list(device_list_summary.changed),
+                    "left": list(device_list_summary.left),
+                }
 
         try:
             await self.put_json(
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index 72417151ba..3b49e60716 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -72,7 +72,7 @@ from synapse.events import EventBase
 from synapse.logging.context import run_in_background
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.databases.main import DataStore
-from synapse.types import JsonDict
+from synapse.types import DeviceListUpdates, JsonDict
 from synapse.util import Clock
 
 if TYPE_CHECKING:
@@ -122,6 +122,7 @@ class ApplicationServiceScheduler:
         events: Optional[Collection[EventBase]] = None,
         ephemeral: Optional[Collection[JsonDict]] = None,
         to_device_messages: Optional[Collection[JsonDict]] = None,
+        device_list_summary: Optional[DeviceListUpdates] = None,
     ) -> None:
         """
         Enqueue some data to be sent off to an application service.
@@ -133,10 +134,18 @@ class ApplicationServiceScheduler:
             to_device_messages: The to-device messages to send. These differ from normal
                 to-device messages sent to clients, as they have 'to_device_id' and
                 'to_user_id' fields.
+            device_list_summary: A summary of users that the application service either needs
+                to refresh the device lists of, or those that the application service need no
+                longer track the device lists of.
         """
         # We purposefully allow this method to run with empty events/ephemeral
         # collections, so that callers do not need to check iterable size themselves.
-        if not events and not ephemeral and not to_device_messages:
+        if (
+            not events
+            and not ephemeral
+            and not to_device_messages
+            and not device_list_summary
+        ):
             return
 
         if events:
@@ -147,6 +156,10 @@ class ApplicationServiceScheduler:
             self.queuer.queued_to_device_messages.setdefault(appservice.id, []).extend(
                 to_device_messages
             )
+        if device_list_summary:
+            self.queuer.queued_device_list_summaries.setdefault(
+                appservice.id, []
+            ).append(device_list_summary)
 
         # Kick off a new application service transaction
         self.queuer.start_background_request(appservice)
@@ -169,6 +182,8 @@ class _ServiceQueuer:
         self.queued_ephemeral: Dict[str, List[JsonDict]] = {}
         # dict of {service_id: [to_device_message_json]}
         self.queued_to_device_messages: Dict[str, List[JsonDict]] = {}
+        # dict of {service_id: [device_list_summary]}
+        self.queued_device_list_summaries: Dict[str, List[DeviceListUpdates]] = {}
 
         # the appservices which currently have a transaction in flight
         self.requests_in_flight: Set[str] = set()
@@ -212,7 +227,35 @@ class _ServiceQueuer:
                 ]
                 del all_to_device_messages[:MAX_TO_DEVICE_MESSAGES_PER_TRANSACTION]
 
-                if not events and not ephemeral and not to_device_messages_to_send:
+                # Consolidate any pending device list summaries into a single, up-to-date
+                # summary.
+                # Note: this code assumes that in a single DeviceListUpdates, a user will
+                # never be in both "changed" and "left" sets.
+                device_list_summary = DeviceListUpdates()
+                for summary in self.queued_device_list_summaries.get(service.id, []):
+                    # For every user in the incoming "changed" set:
+                    #   * Remove them from the existing "left" set if necessary
+                    #     (as we need to start tracking them again)
+                    #   * Add them to the existing "changed" set if necessary.
+                    device_list_summary.left.difference_update(summary.changed)
+                    device_list_summary.changed.update(summary.changed)
+
+                    # For every user in the incoming "left" set:
+                    #   * Remove them from the existing "changed" set if necessary
+                    #     (we no longer need to track them)
+                    #   * Add them to the existing "left" set if necessary.
+                    device_list_summary.changed.difference_update(summary.left)
+                    device_list_summary.left.update(summary.left)
+                self.queued_device_list_summaries.clear()
+
+                if (
+                    not events
+                    and not ephemeral
+                    and not to_device_messages_to_send
+                    # DeviceListUpdates is True if either the 'changed' or 'left' sets have
+                    # at least one entry, otherwise False
+                    and not device_list_summary
+                ):
                     return
 
                 one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None
@@ -240,6 +283,7 @@ class _ServiceQueuer:
                         to_device_messages_to_send,
                         one_time_key_counts,
                         unused_fallback_keys,
+                        device_list_summary,
                     )
                 except Exception:
                     logger.exception("AS request failed")
@@ -322,6 +366,7 @@ class _TransactionController:
         to_device_messages: Optional[List[JsonDict]] = None,
         one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None,
         unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None,
+        device_list_summary: Optional[DeviceListUpdates] = None,
     ) -> None:
         """
         Create a transaction with the given data and send to the provided
@@ -336,6 +381,7 @@ class _TransactionController:
                 appservice devices in the transaction.
             unused_fallback_keys: Lists of unused fallback keys for relevant
                 appservice devices in the transaction.
+            device_list_summary: The device list summary to include in the transaction.
         """
         try:
             txn = await self.store.create_appservice_txn(
@@ -345,6 +391,7 @@ class _TransactionController:
                 to_device_messages=to_device_messages or [],
                 one_time_key_counts=one_time_key_counts or {},
                 unused_fallback_keys=unused_fallback_keys or {},
+                device_list_summary=device_list_summary or DeviceListUpdates(),
             )
             service_is_up = await self._is_service_up(service)
             if service_is_up:
@@ -428,7 +475,7 @@ class _Recoverer:
                 "as-recoverer-%s" % (self.service.id,), self.retry
             )
 
-        delay = 2 ** self.backoff_counter
+        delay = 2**self.backoff_counter
         logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
         self.clock.call_later(delay, _retry)
 
diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py
index 439bfe1526..ada165f238 100644
--- a/synapse/config/appservice.py
+++ b/synapse/config/appservice.py
@@ -170,6 +170,7 @@ def _load_appservice(
     # When enabled, appservice transactions contain the following information:
     #  - device One-Time Key counts
     #  - device unused fallback key usage states
+    #  - device list changes
     msc3202_transaction_extensions = as_info.get("org.matrix.msc3202", False)
     if not isinstance(msc3202_transaction_extensions, bool):
         raise ValueError(
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 064db4487c..43db5fcdd9 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -59,8 +59,9 @@ class ExperimentalConfig(Config):
             "msc3202_device_masquerading", False
         )
 
-        # Portion of MSC3202 related to transaction extensions:
-        # sending one-time key counts and fallback key usage to application services.
+        # The portion of MSC3202 related to transaction extensions:
+        # sending device list changes, one-time key counts and fallback key
+        # usage to application services.
         self.msc3202_transaction_extensions: bool = experimental.get(
             "msc3202_transaction_extensions", False
         )
@@ -77,3 +78,6 @@ class ExperimentalConfig(Config):
 
         # The deprecated groups feature.
         self.groups_enabled: bool = experimental.get("groups_enabled", True)
+
+        # MSC2654: Unread counts
+        self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
diff --git a/synapse/config/key.py b/synapse/config/key.py
index ee83c6c06b..f5377e7d9c 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -16,7 +16,7 @@
 import hashlib
 import logging
 import os
-from typing import Any, Dict, Iterator, List, Optional
+from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
 
 import attr
 import jsonschema
@@ -38,6 +38,9 @@ from synapse.util.stringutils import random_string, random_string_with_symbols
 
 from ._base import Config, ConfigError
 
+if TYPE_CHECKING:
+    from signedjson.key import VerifyKeyWithExpiry
+
 INSECURE_NOTARY_ERROR = """\
 Your server is configured to accept key server responses without signature
 validation or TLS certificate validation. This is likely to be very insecure. If
@@ -300,7 +303,7 @@ class KeyConfig(Config):
 
     def read_old_signing_keys(
         self, old_signing_keys: Optional[JsonDict]
-    ) -> Dict[str, VerifyKey]:
+    ) -> Dict[str, "VerifyKeyWithExpiry"]:
         if old_signing_keys is None:
             return {}
         keys = {}
@@ -308,8 +311,8 @@ class KeyConfig(Config):
             if is_signing_algorithm_supported(key_id):
                 key_base64 = key_data["key"]
                 key_bytes = decode_base64(key_base64)
-                verify_key = decode_verify_key_bytes(key_id, key_bytes)
-                verify_key.expired_ts = key_data["expired_ts"]
+                verify_key: "VerifyKeyWithExpiry" = decode_verify_key_bytes(key_id, key_bytes)  # type: ignore[assignment]
+                verify_key.expired = key_data["expired_ts"]
                 keys[key_id] = verify_key
             else:
                 raise ConfigError(
@@ -422,7 +425,7 @@ def _parse_key_servers(
         server_name = server["server_name"]
         result = TrustedKeyServer(server_name=server_name)
 
-        verify_keys = server.get("verify_keys")
+        verify_keys: Optional[Dict[str, str]] = server.get("verify_keys")
         if verify_keys is not None:
             result.verify_keys = {}
             for key_id, key_base64 in verify_keys.items():
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 38de4b8000..b3a9e50752 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -680,6 +680,18 @@ class ServerConfig(Config):
             config.get("use_account_validity_in_account_status") or False
         )
 
+        # This is a temporary option that enables fully using the new
+        # `device_lists_changes_in_room` without the backwards compat code. This
+        # is primarily for testing. If enabled the server should *not* be
+        # downgraded, as it may lead to missing device list updates.
+        self.use_new_device_lists_changes_in_room = (
+            config.get("use_new_device_lists_changes_in_room") or False
+        )
+
+        self.rooms_to_exclude_from_sync: List[str] = (
+            config.get("exclude_rooms_from_sync") or []
+        )
+
     def has_tls_listener(self) -> bool:
         return any(listener.tls for listener in self.listeners)
 
@@ -1234,6 +1246,15 @@ class ServerConfig(Config):
           # information about using custom templates.
           #
           #custom_template_directory: /path/to/custom/templates/
+
+        # List of rooms to exclude from sync responses. This is useful for server
+        # administrators wishing to group users into a room without these users being able
+        # to see it from their client.
+        #
+        # By default, no room is excluded.
+        #
+        #exclude_rooms_from_sync:
+        #    - !foo:example.com
         """
             % locals()
         )
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 93d56c077a..c88afb2986 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -176,13 +176,13 @@ class Keyring:
         self._local_verify_keys: Dict[str, FetchKeyResult] = {}
         for key_id, key in hs.config.key.old_signing_keys.items():
             self._local_verify_keys[key_id] = FetchKeyResult(
-                verify_key=key, valid_until_ts=key.expired_ts
+                verify_key=key, valid_until_ts=key.expired
             )
 
         vk = get_verify_key(hs.signing_key)
         self._local_verify_keys[f"{vk.alg}:{vk.version}"] = FetchKeyResult(
             verify_key=vk,
-            valid_until_ts=2 ** 63,  # fake future timestamp
+            valid_until_ts=2**63,  # fake future timestamp
         )
 
     async def verify_json_for_server(
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index 1ea1bb7d37..98c203ada0 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -15,7 +15,7 @@ import logging
 from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
 
 import attr
-from nacl.signing import SigningKey
+from signedjson.types import SigningKey
 
 from synapse.api.constants import MAX_DEPTH
 from synapse.api.room_versions import (
diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index bfca454f51..ef68e20282 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -42,6 +42,7 @@ CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
 CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]]
 ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable]
 ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable]
+ON_THREEPID_BIND_CALLBACK = Callable[[str, str, str], Awaitable]
 
 
 def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
@@ -169,6 +170,7 @@ class ThirdPartyEventRules:
         self._on_user_deactivation_status_changed_callbacks: List[
             ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
         ] = []
+        self._on_threepid_bind_callbacks: List[ON_THREEPID_BIND_CALLBACK] = []
 
     def register_third_party_rules_callbacks(
         self,
@@ -187,6 +189,7 @@ class ThirdPartyEventRules:
         on_user_deactivation_status_changed: Optional[
             ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
         ] = None,
+        on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None,
     ) -> None:
         """Register callbacks from modules for each hook."""
         if check_event_allowed is not None:
@@ -221,6 +224,9 @@ class ThirdPartyEventRules:
                 on_user_deactivation_status_changed,
             )
 
+        if on_threepid_bind is not None:
+            self._on_threepid_bind_callbacks.append(on_threepid_bind)
+
     async def check_event_allowed(
         self, event: EventBase, context: EventContext
     ) -> Tuple[bool, Optional[dict]]:
@@ -479,3 +485,23 @@ class ThirdPartyEventRules:
                 logger.exception(
                     "Failed to run module API callback %s: %s", callback, e
                 )
+
+    async def on_threepid_bind(self, user_id: str, medium: str, address: str) -> None:
+        """Called after a threepid association has been verified and stored.
+
+        Note that this callback is called when an association is created on the
+        local homeserver, not when it's created on an identity server (and then kept track
+        of so that it can be unbound on the same IS later on).
+
+        Args:
+            user_id: the user being associated with the threepid.
+            medium: the threepid's medium.
+            address: the threepid's address.
+        """
+        for callback in self._on_threepid_bind_callbacks:
+            try:
+                await callback(user_id, medium, address)
+            except Exception as e:
+                logger.exception(
+                    "Failed to run module API callback %s: %s", callback, e
+                )
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 7120062127..918e87ed9c 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -49,7 +49,7 @@ if TYPE_CHECKING:
 #       the literal fields "foo\" and "bar" but will instead be treated as "foo\\.bar"
 SPLIT_FIELD_REGEX = re.compile(r"(?<!\\)\.")
 
-CANONICALJSON_MAX_INT = (2 ** 53) - 1
+CANONICALJSON_MAX_INT = (2**53) - 1
 CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
 
 
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 177b4f8991..4af9fbc5d1 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -12,8 +12,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import logging
 import random
-from typing import TYPE_CHECKING, Collection, List, Optional, Tuple
+from typing import TYPE_CHECKING, Awaitable, Callable, Collection, List, Optional, Tuple
 
 from synapse.replication.http.account_data import (
     ReplicationAddTagRestServlet,
@@ -27,6 +28,12 @@ from synapse.types import JsonDict, UserID
 if TYPE_CHECKING:
     from synapse.server import HomeServer
 
+logger = logging.getLogger(__name__)
+
+ON_ACCOUNT_DATA_UPDATED_CALLBACK = Callable[
+    [str, Optional[str], str, JsonDict], Awaitable
+]
+
 
 class AccountDataHandler:
     def __init__(self, hs: "HomeServer"):
@@ -40,6 +47,44 @@ class AccountDataHandler:
         self._remove_tag_client = ReplicationRemoveTagRestServlet.make_client(hs)
         self._account_data_writers = hs.config.worker.writers.account_data
 
+        self._on_account_data_updated_callbacks: List[
+            ON_ACCOUNT_DATA_UPDATED_CALLBACK
+        ] = []
+
+    def register_module_callbacks(
+        self, on_account_data_updated: Optional[ON_ACCOUNT_DATA_UPDATED_CALLBACK] = None
+    ) -> None:
+        """Register callbacks from modules."""
+        if on_account_data_updated is not None:
+            self._on_account_data_updated_callbacks.append(on_account_data_updated)
+
+    async def _notify_modules(
+        self,
+        user_id: str,
+        room_id: Optional[str],
+        account_data_type: str,
+        content: JsonDict,
+    ) -> None:
+        """Notifies modules about new account data changes.
+
+        A change can be either a new account data type being added, or the content
+        associated with a type being changed. Account data for a given type is removed by
+        changing the associated content to an empty dictionary.
+
+        Note that this is not called when the tags associated with a room change.
+
+        Args:
+            user_id: The user whose account data is changing.
+            room_id: The ID of the room the account data change concerns, if any.
+            account_data_type: The type of the account data.
+            content: The content that is now associated with this type.
+        """
+        for callback in self._on_account_data_updated_callbacks:
+            try:
+                await callback(user_id, room_id, account_data_type, content)
+            except Exception as e:
+                logger.exception("Failed to run module callback %s: %s", callback, e)
+
     async def add_account_data_to_room(
         self, user_id: str, room_id: str, account_data_type: str, content: JsonDict
     ) -> int:
@@ -63,6 +108,8 @@ class AccountDataHandler:
                 "account_data_key", max_stream_id, users=[user_id]
             )
 
+            await self._notify_modules(user_id, room_id, account_data_type, content)
+
             return max_stream_id
         else:
             response = await self._room_data_client(
@@ -96,6 +143,9 @@ class AccountDataHandler:
             self._notifier.on_new_event(
                 "account_data_key", max_stream_id, users=[user_id]
             )
+
+            await self._notify_modules(user_id, None, account_data_type, content)
+
             return max_stream_id
         else:
             response = await self._user_data_client(
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index bd913e524e..316c4b677c 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -33,7 +33,13 @@ from synapse.metrics.background_process_metrics import (
     wrap_as_background_process,
 )
 from synapse.storage.databases.main.directory import RoomAliasMapping
-from synapse.types import JsonDict, RoomAlias, RoomStreamToken, UserID
+from synapse.types import (
+    DeviceListUpdates,
+    JsonDict,
+    RoomAlias,
+    RoomStreamToken,
+    UserID,
+)
 from synapse.util.async_helpers import Linearizer
 from synapse.util.metrics import Measure
 
@@ -58,6 +64,9 @@ class ApplicationServicesHandler:
         self._msc2409_to_device_messages_enabled = (
             hs.config.experimental.msc2409_to_device_messages_enabled
         )
+        self._msc3202_transaction_extensions_enabled = (
+            hs.config.experimental.msc3202_transaction_extensions
+        )
 
         self.current_max = 0
         self.is_processing = False
@@ -204,9 +213,9 @@ class ApplicationServicesHandler:
         Args:
             stream_key: The stream the event came from.
 
-                `stream_key` can be "typing_key", "receipt_key", "presence_key" or
-                "to_device_key". Any other value for `stream_key` will cause this function
-                to return early.
+                `stream_key` can be "typing_key", "receipt_key", "presence_key",
+                "to_device_key" or "device_list_key". Any other value for `stream_key`
+                will cause this function to return early.
 
                 Ephemeral events will only be pushed to appservices that have opted into
                 receiving them by setting `push_ephemeral` to true in their registration
@@ -230,6 +239,7 @@ class ApplicationServicesHandler:
             "receipt_key",
             "presence_key",
             "to_device_key",
+            "device_list_key",
         ):
             return
 
@@ -253,15 +263,37 @@ class ApplicationServicesHandler:
         ):
             return
 
+        # Ignore device lists if the feature flag is not enabled
+        if (
+            stream_key == "device_list_key"
+            and not self._msc3202_transaction_extensions_enabled
+        ):
+            return
+
         # Check whether there are any appservices which have registered to receive
         # ephemeral events.
         #
         # Note that whether these events are actually relevant to these appservices
         # is decided later on.
+        services = self.store.get_app_services()
         services = [
             service
-            for service in self.store.get_app_services()
-            if service.supports_ephemeral
+            for service in services
+            # Different stream keys require different support booleans
+            if (
+                stream_key
+                in (
+                    "typing_key",
+                    "receipt_key",
+                    "presence_key",
+                    "to_device_key",
+                )
+                and service.supports_ephemeral
+            )
+            or (
+                stream_key == "device_list_key"
+                and service.msc3202_transaction_extensions
+            )
         ]
         if not services:
             # Bail out early if none of the target appservices have explicitly registered
@@ -336,6 +368,20 @@ class ApplicationServicesHandler:
                             service, "to_device", new_token
                         )
 
+                    elif stream_key == "device_list_key":
+                        device_list_summary = await self._get_device_list_summary(
+                            service, new_token
+                        )
+                        if device_list_summary:
+                            self.scheduler.enqueue_for_appservice(
+                                service, device_list_summary=device_list_summary
+                            )
+
+                        # Persist the latest handled stream token for this appservice
+                        await self.store.set_appservice_stream_type_pos(
+                            service, "device_list", new_token
+                        )
+
     async def _handle_typing(
         self, service: ApplicationService, new_token: int
     ) -> List[JsonDict]:
@@ -542,6 +588,96 @@ class ApplicationServicesHandler:
 
         return message_payload
 
+    async def _get_device_list_summary(
+        self,
+        appservice: ApplicationService,
+        new_key: int,
+    ) -> DeviceListUpdates:
+        """
+        Retrieve a list of users who have changed their device lists.
+
+        Args:
+            appservice: The application service to retrieve device list changes for.
+            new_key: The stream key of the device list change that triggered this method call.
+
+        Returns:
+            A set of device list updates, comprised of users that the appservices needs to:
+                * resync the device list of, and
+                * stop tracking the device list of.
+        """
+        # Fetch the last successfully processed device list update stream ID
+        # for this appservice.
+        from_key = await self.store.get_type_stream_id_for_appservice(
+            appservice, "device_list"
+        )
+
+        # Fetch the users who have modified their device list since then.
+        users_with_changed_device_lists = (
+            await self.store.get_users_whose_devices_changed(from_key, to_key=new_key)
+        )
+
+        # Filter out any users the application service is not interested in
+        #
+        # For each user who changed their device list, we want to check whether this
+        # appservice would be interested in the change.
+        filtered_users_with_changed_device_lists = {
+            user_id
+            for user_id in users_with_changed_device_lists
+            if await self._is_appservice_interested_in_device_lists_of_user(
+                appservice, user_id
+            )
+        }
+
+        # Create a summary of "changed" and "left" users.
+        # TODO: Calculate "left" users.
+        device_list_summary = DeviceListUpdates(
+            changed=filtered_users_with_changed_device_lists
+        )
+
+        return device_list_summary
+
+    async def _is_appservice_interested_in_device_lists_of_user(
+        self,
+        appservice: ApplicationService,
+        user_id: str,
+    ) -> bool:
+        """
+        Returns whether a given application service is interested in the device list
+        updates of a given user.
+
+        The application service is interested in the user's device list updates if any
+        of the following are true:
+            * The user is the appservice's sender localpart user.
+            * The user is in the appservice's user namespace.
+            * At least one member of one room that the user is a part of is in the
+              appservice's user namespace.
+            * The appservice is explicitly (via room ID or alias) interested in at
+              least one room that the user is in.
+
+        Args:
+            appservice: The application service to gauge interest of.
+            user_id: The ID of the user whose device list interest is in question.
+
+        Returns:
+            True if the application service is interested in the user's device lists, False
+            otherwise.
+        """
+        # This method checks against both the sender localpart user as well as if the
+        # user is in the appservice's user namespace.
+        if appservice.is_interested_in_user(user_id):
+            return True
+
+        # Determine whether any of the rooms the user is in justifies sending this
+        # device list update to the application service.
+        room_ids = await self.store.get_rooms_for_user(user_id)
+        for room_id in room_ids:
+            # This method covers checking room members for appservice interest as well as
+            # room ID and alias checks.
+            if await appservice.is_interested_in_room(room_id, self.store):
+                return True
+
+        return False
+
     async def query_user_exists(self, user_id: str) -> bool:
         """Check if any application service knows this user_id exists.
 
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 3e29c96a49..86991d26ce 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -211,6 +211,7 @@ class AuthHandler:
         self.macaroon_gen = hs.get_macaroon_generator()
         self._password_enabled = hs.config.auth.password_enabled
         self._password_localdb_enabled = hs.config.auth.password_localdb_enabled
+        self._third_party_rules = hs.get_third_party_event_rules()
 
         # Ratelimiter for failed auth during UIA. Uses same ratelimit config
         # as per `rc_login.failed_attempts`.
@@ -1505,6 +1506,8 @@ class AuthHandler:
             user_id, medium, address, validated_at, self.hs.get_clock().time_msec()
         )
 
+        await self._third_party_rules.on_threepid_bind(user_id, medium, address)
+
     async def delete_threepid(
         self, user_id: str, medium: str, address: str, id_server: Optional[str] = None
     ) -> bool:
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index d5ccaa0c37..c710c02cf9 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -37,7 +37,10 @@ from synapse.api.errors import (
     SynapseError,
 )
 from synapse.logging.opentracing import log_kv, set_tag, trace
-from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.metrics.background_process_metrics import (
+    run_as_background_process,
+    wrap_as_background_process,
+)
 from synapse.types import (
     JsonDict,
     StreamToken,
@@ -278,6 +281,22 @@ class DeviceHandler(DeviceWorkerHandler):
 
         hs.get_distributor().observe("user_left_room", self.user_left_room)
 
+        # Whether `_handle_new_device_update_async` is currently processing.
+        self._handle_new_device_update_is_processing = False
+
+        # If a new device update may have happened while the loop was
+        # processing.
+        self._handle_new_device_update_new_data = False
+
+        # On start up check if there are any updates pending.
+        hs.get_reactor().callWhenRunning(self._handle_new_device_update_async)
+
+        # Used to decide if we calculate outbound pokes up front or not. By
+        # default we do to allow safely downgrading Synapse.
+        self.use_new_device_lists_changes_in_room = (
+            hs.config.server.use_new_device_lists_changes_in_room
+        )
+
     def _check_device_name_length(self, name: Optional[str]) -> None:
         """
         Checks whether a device name is longer than the maximum allowed length.
@@ -469,19 +488,26 @@ class DeviceHandler(DeviceWorkerHandler):
             # No changes to notify about, so this is a no-op.
             return
 
-        users_who_share_room = await self.store.get_users_who_share_room_with_user(
-            user_id
-        )
+        room_ids = await self.store.get_rooms_for_user(user_id)
+
+        hosts: Optional[Set[str]] = None
+        if not self.use_new_device_lists_changes_in_room:
+            hosts = set()
 
-        hosts: Set[str] = set()
-        if self.hs.is_mine_id(user_id):
-            hosts.update(get_domain_from_id(u) for u in users_who_share_room)
-            hosts.discard(self.server_name)
+            if self.hs.is_mine_id(user_id):
+                for room_id in room_ids:
+                    joined_users = await self.store.get_users_in_room(room_id)
+                    hosts.update(get_domain_from_id(u) for u in joined_users)
 
-        set_tag("target_hosts", hosts)
+                set_tag("target_hosts", hosts)
+
+                hosts.discard(self.server_name)
 
         position = await self.store.add_device_change_to_streams(
-            user_id, device_ids, list(hosts)
+            user_id,
+            device_ids,
+            hosts=hosts,
+            room_ids=room_ids,
         )
 
         if not position:
@@ -495,9 +521,12 @@ class DeviceHandler(DeviceWorkerHandler):
 
         # specify the user ID too since the user should always get their own device list
         # updates, even if they aren't in any rooms.
-        users_to_notify = users_who_share_room.union({user_id})
+        self.notifier.on_new_event(
+            "device_list_key", position, users={user_id}, rooms=room_ids
+        )
 
-        self.notifier.on_new_event("device_list_key", position, users=users_to_notify)
+        # We may need to do some processing asynchronously.
+        self._handle_new_device_update_async()
 
         if hosts:
             logger.info(
@@ -614,6 +643,85 @@ class DeviceHandler(DeviceWorkerHandler):
 
         return {"success": True}
 
+    @wrap_as_background_process("_handle_new_device_update_async")
+    async def _handle_new_device_update_async(self) -> None:
+        """Called when we have a new local device list update that we need to
+        send out over federation.
+
+        This happens in the background so as not to block the original request
+        that generated the device update.
+        """
+        if self._handle_new_device_update_is_processing:
+            self._handle_new_device_update_new_data = True
+            return
+
+        self._handle_new_device_update_is_processing = True
+
+        # The stream ID we processed previous iteration (if any), and the set of
+        # hosts we've already poked about for this update. This is so that we
+        # don't poke the same remote server about the same update repeatedly.
+        current_stream_id = None
+        hosts_already_sent_to: Set[str] = set()
+
+        try:
+            while True:
+                self._handle_new_device_update_new_data = False
+                rows = await self.store.get_uncoverted_outbound_room_pokes()
+                if not rows:
+                    # If the DB returned nothing then there is nothing left to
+                    # do, *unless* a new device list update happened during the
+                    # DB query.
+                    if self._handle_new_device_update_new_data:
+                        continue
+                    else:
+                        return
+
+                for user_id, device_id, room_id, stream_id, opentracing_context in rows:
+                    joined_user_ids = await self.store.get_users_in_room(room_id)
+                    hosts = {get_domain_from_id(u) for u in joined_user_ids}
+                    hosts.discard(self.server_name)
+
+                    # Check if we've already sent this update to some hosts
+                    if current_stream_id == stream_id:
+                        hosts -= hosts_already_sent_to
+
+                    await self.store.add_device_list_outbound_pokes(
+                        user_id=user_id,
+                        device_id=device_id,
+                        room_id=room_id,
+                        stream_id=stream_id,
+                        hosts=hosts,
+                        context=opentracing_context,
+                    )
+
+                    # Notify replication that we've updated the device list stream.
+                    self.notifier.notify_replication()
+
+                    if hosts:
+                        logger.info(
+                            "Sending device list update notif for %r to: %r",
+                            user_id,
+                            hosts,
+                        )
+                        for host in hosts:
+                            self.federation_sender.send_device_messages(
+                                host, immediate=False
+                            )
+                            log_kv(
+                                {"message": "sent device update to host", "host": host}
+                            )
+
+                    if current_stream_id != stream_id:
+                        # Clear the set of hosts we've already sent to as we're
+                        # processing a new update.
+                        hosts_already_sent_to.clear()
+
+                    hosts_already_sent_to.update(hosts)
+                    current_stream_id = stream_id
+
+        finally:
+            self._handle_new_device_update_is_processing = False
+
 
 def _update_device_from_client_ips(
     device: JsonDict, client_ips: Mapping[Tuple[str, str], Mapping[str, Any]]
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 4bd87709f3..e7b9f15e13 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -469,6 +469,12 @@ class FederationEventHandler:
             if context.rejected:
                 raise SynapseError(400, "Join event was rejected")
 
+            # the remote server is responsible for sending our join event to the rest
+            # of the federation. Indeed, attempting to do so will result in problems
+            # when we try to look up the state before the join (to get the server list)
+            # and discover that we do not have it.
+            event.internal_metadata.proactively_send = False
+
             return await self.persist_events_and_notify(room_id, [(event, context)])
 
     async def backfill(
@@ -891,10 +897,24 @@ class FederationEventHandler:
         logger.debug("We are also missing %i auth events", len(missing_auth_events))
 
         missing_events = missing_desired_events | missing_auth_events
-        logger.debug("Fetching %i events from remote", len(missing_events))
-        await self._get_events_and_persist(
-            destination=destination, room_id=room_id, event_ids=missing_events
-        )
+
+        # Making an individual request for each of 1000s of events has a lot of
+        # overhead. On the other hand, we don't really want to fetch all of the events
+        # if we already have most of them.
+        #
+        # As an arbitrary heuristic, if we are missing more than 10% of the events, then
+        # we fetch the whole state.
+        #
+        # TODO: might it be better to have an API which lets us do an aggregate event
+        #   request
+        if (len(missing_events) * 10) >= len(auth_event_ids) + len(state_event_ids):
+            logger.debug("Requesting complete state from remote")
+            await self._get_state_and_persist(destination, room_id, event_id)
+        else:
+            logger.debug("Fetching %i events from remote", len(missing_events))
+            await self._get_events_and_persist(
+                destination=destination, room_id=room_id, event_ids=missing_events
+            )
 
         # we need to make sure we re-load from the database to get the rejected
         # state correct.
@@ -953,6 +973,27 @@ class FederationEventHandler:
 
         return remote_state
 
+    async def _get_state_and_persist(
+        self, destination: str, room_id: str, event_id: str
+    ) -> None:
+        """Get the complete room state at a given event, and persist any new events
+        as outliers"""
+        room_version = await self._store.get_room_version(room_id)
+        auth_events, state_events = await self._federation_client.get_room_state(
+            destination, room_id, event_id=event_id, room_version=room_version
+        )
+        logger.info("/state returned %i events", len(auth_events) + len(state_events))
+
+        await self._auth_and_persist_outliers(
+            room_id, itertools.chain(auth_events, state_events)
+        )
+
+        # we also need the event itself.
+        if not await self._store.have_seen_event(room_id, event_id):
+            await self._get_events_and_persist(
+                destination=destination, room_id=room_id, event_ids=(event_id,)
+            )
+
     async def _process_received_pdu(
         self,
         origin: str,
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 34d9411bbf..dace31d87e 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -1625,7 +1625,7 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
             # We'll actually pull the presence updates for these users at the end.
             interested_and_updated_users: Union[Set[str], FrozenSet[str]] = set()
 
-            if from_key:
+            if from_key is not None:
                 # First get all users that have had a presence update
                 updated_users = stream_change_cache.get_all_entities_changed(from_key)
 
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index 73217d135d..a36936b520 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import TYPE_CHECKING, Dict, Iterable, Optional, cast
+from typing import TYPE_CHECKING, Dict, Iterable, Optional
 
 import attr
 from frozendict import frozendict
@@ -25,7 +25,6 @@ from synapse.visibility import filter_events_for_client
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
-    from synapse.storage.databases.main import DataStore
 
 
 logger = logging.getLogger(__name__)
@@ -116,7 +115,7 @@ class RelationsHandler:
         if event is None:
             raise SynapseError(404, "Unknown parent event.")
 
-        pagination_chunk = await self._main_store.get_relations_for_event(
+        related_events, next_token = await self._main_store.get_relations_for_event(
             event_id=event_id,
             event=event,
             room_id=room_id,
@@ -129,9 +128,7 @@ class RelationsHandler:
             to_token=to_token,
         )
 
-        events = await self._main_store.get_events_as_list(
-            [c["event_id"] for c in pagination_chunk.chunk]
-        )
+        events = await self._main_store.get_events_as_list(related_events)
 
         events = await filter_events_for_client(
             self._storage, user_id, events, is_peeking=(member_event_id is None)
@@ -152,9 +149,16 @@ class RelationsHandler:
             events, now, bundle_aggregations=aggregations
         )
 
-        return_value = await pagination_chunk.to_dict(self._main_store)
-        return_value["chunk"] = serialized_events
-        return_value["original_event"] = original_event
+        return_value = {
+            "chunk": serialized_events,
+            "original_event": original_event,
+        }
+
+        if next_token:
+            return_value["next_batch"] = await next_token.to_string(self._main_store)
+
+        if from_token:
+            return_value["prev_batch"] = await from_token.to_string(self._main_store)
 
         return return_value
 
@@ -193,16 +197,21 @@ class RelationsHandler:
         annotations = await self._main_store.get_aggregation_groups_for_event(
             event_id, room_id
         )
-        if annotations.chunk:
-            aggregations.annotations = await annotations.to_dict(
-                cast("DataStore", self)
-            )
+        if annotations:
+            aggregations.annotations = {"chunk": annotations}
 
-        references = await self._main_store.get_relations_for_event(
+        references, next_token = await self._main_store.get_relations_for_event(
             event_id, event, room_id, RelationTypes.REFERENCE, direction="f"
         )
-        if references.chunk:
-            aggregations.references = await references.to_dict(cast("DataStore", self))
+        if references:
+            aggregations.references = {
+                "chunk": [{"event_id": event_id} for event_id in references]
+            }
+
+            if next_token:
+                aggregations.references["next_batch"] = await next_token.to_string(
+                    self._main_store
+                )
 
         # Store the bundled aggregations in the event metadata for later use.
         return aggregations
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 092e185c99..51a08fd2c0 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -771,7 +771,9 @@ class RoomCreationHandler:
                 % (user_id,),
             )
 
-        visibility = config.get("visibility", None)
+        # The spec says rooms should default to private visibility if
+        # `visibility` is not specified.
+        visibility = config.get("visibility", "private")
         is_public = visibility == "public"
 
         room_id = await self._generate_room_id(
diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py
index a0255bd143..78e299d3a5 100644
--- a/synapse/handlers/room_batch.py
+++ b/synapse/handlers/room_batch.py
@@ -156,8 +156,8 @@ class RoomBatchHandler:
     ) -> List[str]:
         """Takes all `state_events_at_start` event dictionaries and creates/persists
         them in a floating state event chain which don't resolve into the current room
-        state. They are floating because they reference no prev_events and are marked
-        as outliers which disconnects them from the normal DAG.
+        state. They are floating because they reference no prev_events which disconnects
+        them from the normal DAG.
 
         Args:
             state_events_at_start:
@@ -213,31 +213,23 @@ class RoomBatchHandler:
                     room_id=room_id,
                     action=membership,
                     content=event_dict["content"],
-                    # Mark as an outlier to disconnect it from the normal DAG
-                    # and not show up between batches of history.
-                    outlier=True,
                     historical=True,
                     # Only the first event in the state chain should be floating.
                     # The rest should hang off each other in a chain.
                     allow_no_prev_events=index == 0,
                     prev_event_ids=prev_event_ids_for_state_chain,
-                    # Since each state event is marked as an outlier, the
-                    # `EventContext.for_outlier()` won't have any `state_ids`
-                    # set and therefore can't derive any state even though the
-                    # prev_events are set. Also since the first event in the
-                    # state chain is floating with no `prev_events`, it can't
-                    # derive state from anywhere automatically. So we need to
-                    # set some state explicitly.
+                    # The first event in the state chain is floating with no
+                    # `prev_events` which means it can't derive state from
+                    # anywhere automatically. So we need to set some state
+                    # explicitly.
                     #
                     # Make sure to use a copy of this list because we modify it
                     # later in the loop here. Otherwise it will be the same
-                    # reference and also update in the event when we append later.
+                    # reference and also update in the event when we append
+                    # later.
                     state_event_ids=state_event_ids.copy(),
                 )
             else:
-                # TODO: Add some complement tests that adds state that is not member joins
-                # and will use this code path. Maybe we only want to support join state events
-                # and can get rid of this `else`?
                 (
                     event,
                     _,
@@ -246,21 +238,15 @@ class RoomBatchHandler:
                         state_event["sender"], app_service_requester.app_service
                     ),
                     event_dict,
-                    # Mark as an outlier to disconnect it from the normal DAG
-                    # and not show up between batches of history.
-                    outlier=True,
                     historical=True,
                     # Only the first event in the state chain should be floating.
                     # The rest should hang off each other in a chain.
                     allow_no_prev_events=index == 0,
                     prev_event_ids=prev_event_ids_for_state_chain,
-                    # Since each state event is marked as an outlier, the
-                    # `EventContext.for_outlier()` won't have any `state_ids`
-                    # set and therefore can't derive any state even though the
-                    # prev_events are set. Also since the first event in the
-                    # state chain is floating with no `prev_events`, it can't
-                    # derive state from anywhere automatically. So we need to
-                    # set some state explicitly.
+                    # The first event in the state chain is floating with no
+                    # `prev_events` which means it can't derive state from
+                    # anywhere automatically. So we need to set some state
+                    # explicitly.
                     #
                     # Make sure to use a copy of this list because we modify it
                     # later in the loop here. Otherwise it will be the same
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 6c569cfb1c..303c38c746 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -13,17 +13,7 @@
 # limitations under the License.
 import itertools
 import logging
-from typing import (
-    TYPE_CHECKING,
-    Any,
-    Collection,
-    Dict,
-    FrozenSet,
-    List,
-    Optional,
-    Set,
-    Tuple,
-)
+from typing import TYPE_CHECKING, Any, Dict, FrozenSet, List, Optional, Set, Tuple
 
 import attr
 from prometheus_client import Counter
@@ -41,6 +31,7 @@ from synapse.storage.databases.main.event_push_actions import NotifCounts
 from synapse.storage.roommember import MemberSummary
 from synapse.storage.state import StateFilter
 from synapse.types import (
+    DeviceListUpdates,
     JsonDict,
     MutableStateMap,
     Requester,
@@ -184,21 +175,6 @@ class GroupsSyncResult:
         return bool(self.join or self.invite or self.leave)
 
 
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class DeviceLists:
-    """
-    Attributes:
-        changed: List of user_ids whose devices may have changed
-        left: List of user_ids whose devices we no longer track
-    """
-
-    changed: Collection[str]
-    left: Collection[str]
-
-    def __bool__(self) -> bool:
-        return bool(self.changed or self.left)
-
-
 @attr.s(slots=True, auto_attribs=True)
 class _RoomChanges:
     """The set of room entries to include in the sync, plus the set of joined
@@ -240,7 +216,7 @@ class SyncResult:
     knocked: List[KnockedSyncResult]
     archived: List[ArchivedSyncResult]
     to_device: List[JsonDict]
-    device_lists: DeviceLists
+    device_lists: DeviceListUpdates
     device_one_time_keys_count: JsonDict
     device_unused_fallback_key_types: List[str]
     groups: Optional[GroupsSyncResult]
@@ -298,6 +274,8 @@ class SyncHandler:
             expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE,
         )
 
+        self.rooms_to_exclude = hs.config.server.rooms_to_exclude_from_sync
+
     async def wait_for_sync_for_user(
         self,
         requester: Requester,
@@ -1262,8 +1240,8 @@ class SyncHandler:
         newly_joined_or_invited_or_knocked_users: Set[str],
         newly_left_rooms: Set[str],
         newly_left_users: Set[str],
-    ) -> DeviceLists:
-        """Generate the DeviceLists section of sync
+    ) -> DeviceListUpdates:
+        """Generate the DeviceListUpdates section of sync
 
         Args:
             sync_result_builder
@@ -1381,9 +1359,11 @@ class SyncHandler:
                 if any(e.room_id in joined_rooms for e in entries):
                     newly_left_users.discard(user_id)
 
-            return DeviceLists(changed=users_that_have_changed, left=newly_left_users)
+            return DeviceListUpdates(
+                changed=users_that_have_changed, left=newly_left_users
+            )
         else:
-            return DeviceLists(changed=[], left=[])
+            return DeviceListUpdates()
 
     async def _generate_sync_entry_for_to_device(
         self, sync_result_builder: "SyncResultBuilder"
@@ -1607,13 +1587,15 @@ class SyncHandler:
         ignored_users = await self.store.ignored_users(user_id)
         if since_token:
             room_changes = await self._get_rooms_changed(
-                sync_result_builder, ignored_users
+                sync_result_builder, ignored_users, self.rooms_to_exclude
             )
             tags_by_room = await self.store.get_updated_tags(
                 user_id, since_token.account_data_key
             )
         else:
-            room_changes = await self._get_all_rooms(sync_result_builder, ignored_users)
+            room_changes = await self._get_all_rooms(
+                sync_result_builder, ignored_users, self.rooms_to_exclude
+            )
             tags_by_room = await self.store.get_tags_for_user(user_id)
 
         log_kv({"rooms_changed": len(room_changes.room_entries)})
@@ -1689,7 +1671,10 @@ class SyncHandler:
         return False
 
     async def _get_rooms_changed(
-        self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str]
+        self,
+        sync_result_builder: "SyncResultBuilder",
+        ignored_users: FrozenSet[str],
+        excluded_rooms: List[str],
     ) -> _RoomChanges:
         """Determine the changes in rooms to report to the user.
 
@@ -1721,7 +1706,7 @@ class SyncHandler:
         #       _have_rooms_changed. We could keep the results in memory to avoid a
         #       second query, at the cost of more complicated source code.
         membership_change_events = await self.store.get_membership_changes_for_user(
-            user_id, since_token.room_key, now_token.room_key
+            user_id, since_token.room_key, now_token.room_key, excluded_rooms
         )
 
         mem_change_events_by_room_id: Dict[str, List[EventBase]] = {}
@@ -1922,7 +1907,10 @@ class SyncHandler:
         )
 
     async def _get_all_rooms(
-        self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str]
+        self,
+        sync_result_builder: "SyncResultBuilder",
+        ignored_users: FrozenSet[str],
+        ignored_rooms: List[str],
     ) -> _RoomChanges:
         """Returns entries for all rooms for the user.
 
@@ -1933,7 +1921,7 @@ class SyncHandler:
         Args:
             sync_result_builder
             ignored_users: Set of users ignored by user.
-
+            ignored_rooms: List of rooms to ignore.
         """
 
         user_id = sync_result_builder.sync_config.user.to_string()
@@ -1944,6 +1932,7 @@ class SyncHandler:
         room_list = await self.store.get_rooms_for_local_user_where_membership_is(
             user_id=user_id,
             membership_list=Membership.LIST,
+            excluded_rooms=ignored_rooms,
         )
 
         room_entries = []
diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
index 6fd88bde20..a16dde2380 100644
--- a/synapse/http/proxyagent.py
+++ b/synapse/http/proxyagent.py
@@ -41,7 +41,7 @@ from synapse.types import ISynapseReactor
 
 logger = logging.getLogger(__name__)
 
-_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
+_VALID_URI = re.compile(rb"\A[\x21-\x7e]+\Z")
 
 
 @implementer(IAgent)
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index ba9755f08b..9a61593ff5 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -62,8 +62,10 @@ from synapse.events.third_party_rules import (
     ON_CREATE_ROOM_CALLBACK,
     ON_NEW_EVENT_CALLBACK,
     ON_PROFILE_UPDATE_CALLBACK,
+    ON_THREEPID_BIND_CALLBACK,
     ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK,
 )
+from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK
 from synapse.handlers.account_validity import (
     IS_USER_EXPIRED_CALLBACK,
     ON_LEGACY_ADMIN_REQUEST,
@@ -215,6 +217,7 @@ class ModuleApi:
         self._third_party_event_rules = hs.get_third_party_event_rules()
         self._password_auth_provider = hs.get_password_auth_provider()
         self._presence_router = hs.get_presence_router()
+        self._account_data_handler = hs.get_account_data_handler()
 
     #################################################################################
     # The following methods should only be called during the module's initialisation.
@@ -293,6 +296,7 @@ class ModuleApi:
         on_user_deactivation_status_changed: Optional[
             ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
         ] = None,
+        on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None,
     ) -> None:
         """Registers callbacks for third party event rules capabilities.
 
@@ -308,6 +312,7 @@ class ModuleApi:
             check_can_deactivate_user=check_can_deactivate_user,
             on_profile_update=on_profile_update,
             on_user_deactivation_status_changed=on_user_deactivation_status_changed,
+            on_threepid_bind=on_threepid_bind,
         )
 
     def register_presence_router_callbacks(
@@ -373,6 +378,19 @@ class ModuleApi:
                 min_batch_size=min_batch_size,
             )
 
+    def register_account_data_callbacks(
+        self,
+        *,
+        on_account_data_updated: Optional[ON_ACCOUNT_DATA_UPDATED_CALLBACK] = None,
+    ) -> None:
+        """Registers account data callbacks.
+
+        Added in Synapse 1.57.0.
+        """
+        return self._account_data_handler.register_module_callbacks(
+            on_account_data_updated=on_account_data_updated,
+        )
+
     def register_web_resource(self, path: str, resource: Resource) -> None:
         """Registers a web resource to be served at the given path.
 
@@ -512,6 +530,17 @@ class ModuleApi:
         """
         return await self._store.is_server_admin(UserID.from_string(user_id))
 
+    async def set_user_admin(self, user_id: str, admin: bool) -> None:
+        """Sets if a user is a server admin.
+
+        Added in Synapse v1.56.0.
+
+        Args:
+            user_id: The Matrix ID of the user to set admin status for.
+            admin: True iff the user is to be a server admin, false otherwise.
+        """
+        await self._store.set_server_admin(UserID.from_string(user_id), admin)
+
     def get_qualified_user_id(self, username: str) -> str:
         """Qualify a user id, if necessary
 
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index f1abb98653..2bd244ed79 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -275,7 +275,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
                             if attempts > cls.RETRY_ON_CONNECT_ERROR_ATTEMPTS:
                                 raise
 
-                            delay = 2 ** attempts
+                            delay = 2**attempts
                             logger.warning(
                                 "%s request connection failed; retrying in %ds: %r",
                                 cls.NAME,
diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py
deleted file mode 100644
index 14706a0817..0000000000
--- a/synapse/replication/slave/storage/client_ips.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2017 Vector Creations Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import TYPE_CHECKING
-
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY
-from synapse.util.caches.lrucache import LruCache
-
-from ._base import BaseSlavedStore
-
-if TYPE_CHECKING:
-    from synapse.server import HomeServer
-
-
-class SlavedClientIpStore(BaseSlavedStore):
-    def __init__(
-        self,
-        database: DatabasePool,
-        db_conn: LoggingDatabaseConnection,
-        hs: "HomeServer",
-    ):
-        super().__init__(database, db_conn, hs)
-
-        self.client_ip_last_seen: LruCache[tuple, int] = LruCache(
-            cache_name="client_ip_last_seen", max_size=50000
-        )
-
-    async def insert_client_ip(
-        self, user_id: str, access_token: str, ip: str, user_agent: str, device_id: str
-    ) -> None:
-        now = int(self._clock.time_msec())
-        key = (user_id, access_token, ip)
-
-        try:
-            last_seen = self.client_ip_last_seen.get(key)
-        except KeyError:
-            last_seen = None
-
-        # Rate-limited inserts
-        if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
-            return
-
-        self.client_ip_last_seen.set(key, now)
-
-        self.hs.get_replication_command_handler().send_user_ip(
-            user_id, access_token, ip, user_agent, device_id, now
-        )
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
index 0ffd34f1da..00a634d3a9 100644
--- a/synapse/replication/slave/storage/devices.py
+++ b/synapse/replication/slave/storage/devices.py
@@ -44,11 +44,22 @@ class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedSto
             extra_tables=[
                 ("user_signature_stream", "stream_id"),
                 ("device_lists_outbound_pokes", "stream_id"),
+                ("device_lists_changes_in_room", "stream_id"),
             ],
         )
         device_list_max = self._device_list_id_gen.get_current_token()
+        device_list_prefill, min_device_list_id = self.db_pool.get_cache_dict(
+            db_conn,
+            "device_lists_stream",
+            entity_column="user_id",
+            stream_column="stream_id",
+            max_value=device_list_max,
+            limit=1000,
+        )
         self._device_list_stream_cache = StreamChangeCache(
-            "DeviceListStreamChangeCache", device_list_max
+            "DeviceListStreamChangeCache",
+            min_device_list_id,
+            prefilled_cache=device_list_prefill,
         )
         self._user_signature_stream_cache = StreamChangeCache(
             "UserSignatureStreamChangeCache", device_list_max
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index 3654f6c03c..fe34948168 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -356,7 +356,7 @@ class UserIpCommand(Command):
         access_token: str,
         ip: str,
         user_agent: str,
-        device_id: str,
+        device_id: Optional[str],
         last_seen: int,
     ):
         self.user_id = user_id
@@ -389,6 +389,12 @@ class UserIpCommand(Command):
             )
         )
 
+    def __repr__(self) -> str:
+        return (
+            f"UserIpCommand({self.user_id!r}, .., {self.ip!r}, "
+            f"{self.user_agent!r}, {self.device_id!r}, {self.last_seen})"
+        )
+
 
 class RemoteServerUpCommand(_SimpleCommand):
     """Sent when a worker has detected that a remote server is no longer
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index b217c35f99..615f1828dd 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -235,6 +235,14 @@ class ReplicationCommandHandler:
         if self._is_master:
             self._server_notices_sender = hs.get_server_notices_sender()
 
+        if hs.config.redis.redis_enabled:
+            # If we're using Redis, it's the background worker that should
+            # receive USER_IP commands and store the relevant client IPs.
+            self._should_insert_client_ips = hs.config.worker.run_background_tasks
+        else:
+            # If we're NOT using Redis, this must be handled by the master
+            self._should_insert_client_ips = hs.get_instance_name() == "master"
+
     def _add_command_to_stream_queue(
         self, conn: IReplicationConnection, cmd: Union[RdataCommand, PositionCommand]
     ) -> None:
@@ -401,23 +409,37 @@ class ReplicationCommandHandler:
     ) -> Optional[Awaitable[None]]:
         user_ip_cache_counter.inc()
 
-        if self._is_master:
+        if self._is_master or self._should_insert_client_ips:
+            # We make a point of only returning an awaitable if there's actually
+            # something to do; on_USER_IP is not an async function, but
+            # _handle_user_ip is.
+            # If on_USER_IP returns an awaitable, it gets scheduled as a
+            # background process (see `BaseReplicationStreamProtocol.handle_command`).
             return self._handle_user_ip(cmd)
         else:
+            # Returning None when this process definitely has nothing to do
+            # reduces the overhead of handling the USER_IP command, which is
+            # currently broadcast to all workers regardless of utility.
             return None
 
     async def _handle_user_ip(self, cmd: UserIpCommand) -> None:
-        await self._store.insert_client_ip(
-            cmd.user_id,
-            cmd.access_token,
-            cmd.ip,
-            cmd.user_agent,
-            cmd.device_id,
-            cmd.last_seen,
-        )
-
-        assert self._server_notices_sender is not None
-        await self._server_notices_sender.on_user_ip(cmd.user_id)
+        """
+        Handles a User IP, branching depending on whether we are the main process
+        and/or the background worker.
+        """
+        if self._is_master:
+            assert self._server_notices_sender is not None
+            await self._server_notices_sender.on_user_ip(cmd.user_id)
+
+        if self._should_insert_client_ips:
+            await self._store.insert_client_ip(
+                cmd.user_id,
+                cmd.access_token,
+                cmd.ip,
+                cmd.user_agent,
+                cmd.device_id,
+                cmd.last_seen,
+            )
 
     def on_RDATA(self, conn: IReplicationConnection, cmd: RdataCommand) -> None:
         if cmd.instance_name == self._instance_name:
@@ -698,7 +720,7 @@ class ReplicationCommandHandler:
         access_token: str,
         ip: str,
         user_agent: str,
-        device_id: str,
+        device_id: Optional[str],
         last_seen: int,
     ) -> None:
         """Tell the master that the user made a request."""
diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py
index c16078b187..55c96a2af3 100644
--- a/synapse/rest/client/relations.py
+++ b/synapse/rest/client/relations.py
@@ -12,22 +12,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""This class implements the proposed relation APIs from MSC 1849.
-
-Since the MSC has not been approved all APIs here are unstable and may change at
-any time to reflect changes in the MSC.
-"""
-
 import logging
 from typing import TYPE_CHECKING, Optional, Tuple
 
-from synapse.api.constants import RelationTypes
-from synapse.api.errors import SynapseError
 from synapse.http.server import HttpServer
 from synapse.http.servlet import RestServlet, parse_integer, parse_string
 from synapse.http.site import SynapseRequest
 from synapse.rest.client._base import client_patterns
-from synapse.storage.relations import AggregationPaginationToken
 from synapse.types import JsonDict, StreamToken
 
 if TYPE_CHECKING:
@@ -93,166 +84,5 @@ class RelationPaginationServlet(RestServlet):
         return 200, result
 
 
-class RelationAggregationPaginationServlet(RestServlet):
-    """API to paginate aggregation groups of relations, e.g. paginate the
-    types and counts of the reactions on the events.
-
-    Example request and response:
-
-        GET /rooms/{room_id}/aggregations/{parent_id}
-
-        {
-            chunk: [
-                {
-                    "type": "m.reaction",
-                    "key": "👍",
-                    "count": 3
-                }
-            ]
-        }
-    """
-
-    PATTERNS = client_patterns(
-        "/rooms/(?P<room_id>[^/]*)/aggregations/(?P<parent_id>[^/]*)"
-        "(/(?P<relation_type>[^/]*)(/(?P<event_type>[^/]*))?)?$",
-        releases=(),
-    )
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.store = hs.get_datastores().main
-        self.event_handler = hs.get_event_handler()
-
-    async def on_GET(
-        self,
-        request: SynapseRequest,
-        room_id: str,
-        parent_id: str,
-        relation_type: Optional[str] = None,
-        event_type: Optional[str] = None,
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
-
-        await self.auth.check_user_in_room_or_world_readable(
-            room_id,
-            requester.user.to_string(),
-            allow_departed_users=True,
-        )
-
-        # This checks that a) the event exists and b) the user is allowed to
-        # view it.
-        event = await self.event_handler.get_event(requester.user, room_id, parent_id)
-        if event is None:
-            raise SynapseError(404, "Unknown parent event.")
-
-        if relation_type not in (RelationTypes.ANNOTATION, None):
-            raise SynapseError(
-                400, f"Relation type must be '{RelationTypes.ANNOTATION}'"
-            )
-
-        limit = parse_integer(request, "limit", default=5)
-        from_token_str = parse_string(request, "from")
-        to_token_str = parse_string(request, "to")
-
-        # Return the relations
-        from_token = None
-        if from_token_str:
-            from_token = AggregationPaginationToken.from_string(from_token_str)
-
-        to_token = None
-        if to_token_str:
-            to_token = AggregationPaginationToken.from_string(to_token_str)
-
-        pagination_chunk = await self.store.get_aggregation_groups_for_event(
-            event_id=parent_id,
-            room_id=room_id,
-            event_type=event_type,
-            limit=limit,
-            from_token=from_token,
-            to_token=to_token,
-        )
-
-        return 200, await pagination_chunk.to_dict(self.store)
-
-
-class RelationAggregationGroupPaginationServlet(RestServlet):
-    """API to paginate within an aggregation group of relations, e.g. paginate
-    all the 👍 reactions on an event.
-
-    Example request and response:
-
-        GET /rooms/{room_id}/aggregations/{parent_id}/m.annotation/m.reaction/👍
-
-        {
-            chunk: [
-                {
-                    "type": "m.reaction",
-                    "content": {
-                        "m.relates_to": {
-                            "rel_type": "m.annotation",
-                            "key": "👍"
-                        }
-                    }
-                },
-                ...
-            ]
-        }
-    """
-
-    PATTERNS = client_patterns(
-        "/rooms/(?P<room_id>[^/]*)/aggregations/(?P<parent_id>[^/]*)"
-        "/(?P<relation_type>[^/]*)/(?P<event_type>[^/]*)/(?P<key>[^/]*)$",
-        releases=(),
-    )
-
-    def __init__(self, hs: "HomeServer"):
-        super().__init__()
-        self.auth = hs.get_auth()
-        self.store = hs.get_datastores().main
-        self._relations_handler = hs.get_relations_handler()
-
-    async def on_GET(
-        self,
-        request: SynapseRequest,
-        room_id: str,
-        parent_id: str,
-        relation_type: str,
-        event_type: str,
-        key: str,
-    ) -> Tuple[int, JsonDict]:
-        requester = await self.auth.get_user_by_req(request, allow_guest=True)
-
-        if relation_type != RelationTypes.ANNOTATION:
-            raise SynapseError(400, "Relation type must be 'annotation'")
-
-        limit = parse_integer(request, "limit", default=5)
-        from_token_str = parse_string(request, "from")
-        to_token_str = parse_string(request, "to")
-
-        from_token = None
-        if from_token_str:
-            from_token = await StreamToken.from_string(self.store, from_token_str)
-        to_token = None
-        if to_token_str:
-            to_token = await StreamToken.from_string(self.store, to_token_str)
-
-        result = await self._relations_handler.get_relations(
-            requester=requester,
-            event_id=parent_id,
-            room_id=room_id,
-            relation_type=relation_type,
-            event_type=event_type,
-            aggregation_key=key,
-            limit=limit,
-            from_token=from_token,
-            to_token=to_token,
-        )
-
-        return 200, result
-
-
 def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
     RelationPaginationServlet(hs).register(http_server)
-    RelationAggregationPaginationServlet(hs).register(http_server)
-    RelationAggregationGroupPaginationServlet(hs).register(http_server)
diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py
index 0780485322..dd91dabedd 100644
--- a/synapse/rest/client/room_batch.py
+++ b/synapse/rest/client/room_batch.py
@@ -123,6 +123,19 @@ class RoomBatchSendEventRestServlet(RestServlet):
                     errcode=Codes.INVALID_PARAM,
                 )
 
+        # Make sure that the prev_event_ids exist and aren't outliers - ie, they are
+        # regular parts of the room DAG where we know the state.
+        non_outlier_prev_events = await self.store.have_events_in_timeline(
+            prev_event_ids_from_query
+        )
+        for prev_event_id in prev_event_ids_from_query:
+            if prev_event_id not in non_outlier_prev_events:
+                raise SynapseError(
+                    HTTPStatus.BAD_REQUEST,
+                    "prev_event %s does not exist, or is an outlier" % (prev_event_id,),
+                    errcode=Codes.INVALID_PARAM,
+                )
+
         # For the event we are inserting next to (`prev_event_ids_from_query`),
         # find the most recent state events that allowed that message to be
         # sent. We will use that as a base to auth our historical messages
@@ -131,14 +144,6 @@ class RoomBatchSendEventRestServlet(RestServlet):
             prev_event_ids_from_query
         )
 
-        if not state_event_ids:
-            raise SynapseError(
-                HTTPStatus.BAD_REQUEST,
-                "No auth events found for given prev_event query parameter. The prev_event=%s probably does not exist."
-                % prev_event_ids_from_query,
-                errcode=Codes.INVALID_PARAM,
-            )
-
         state_event_ids_at_start = []
         # Create and persist all of the state events that float off on their own
         # before the batch. These will most likely be all of the invite/member
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 53c385a86c..0bf32f873b 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -99,6 +99,7 @@ class SyncRestServlet(RestServlet):
         self.presence_handler = hs.get_presence_handler()
         self._server_notices_sender = hs.get_server_notices_sender()
         self._event_serializer = hs.get_event_client_serializer()
+        self._msc2654_enabled = hs.config.experimental.msc2654_enabled
 
     async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
         # This will always be set by the time Twisted calls us.
@@ -521,7 +522,8 @@ class SyncRestServlet(RestServlet):
             result["ephemeral"] = {"events": ephemeral_events}
             result["unread_notifications"] = room.unread_notifications
             result["summary"] = room.summary
-            result["org.matrix.msc2654.unread_count"] = room.unread_count
+            if self._msc2654_enabled:
+                result["org.matrix.msc2654.unread_count"] = room.unread_count
 
         return result
 
diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py
index b9bfbea21b..0c9f042c84 100644
--- a/synapse/rest/key/v2/local_key_resource.py
+++ b/synapse/rest/key/v2/local_key_resource.py
@@ -76,17 +76,17 @@ class LocalKey(Resource):
 
     def response_json_object(self) -> JsonDict:
         verify_keys = {}
-        for key in self.config.key.signing_key:
-            verify_key_bytes = key.verify_key.encode()
-            key_id = "%s:%s" % (key.alg, key.version)
+        for signing_key in self.config.key.signing_key:
+            verify_key_bytes = signing_key.verify_key.encode()
+            key_id = "%s:%s" % (signing_key.alg, signing_key.version)
             verify_keys[key_id] = {"key": encode_base64(verify_key_bytes)}
 
         old_verify_keys = {}
-        for key_id, key in self.config.key.old_signing_keys.items():
-            verify_key_bytes = key.encode()
+        for key_id, old_signing_key in self.config.key.old_signing_keys.items():
+            verify_key_bytes = old_signing_key.encode()
             old_verify_keys[key_id] = {
                 "key": encode_base64(verify_key_bytes),
-                "expired_ts": key.expired_ts,
+                "expired_ts": old_signing_key.expired,
             }
 
         json_object = {
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index 3525d6ae54..f597157581 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import logging
-from typing import TYPE_CHECKING, Dict
+from typing import TYPE_CHECKING, Dict, Set
 
 from signedjson.sign import sign_json
 
@@ -149,7 +149,7 @@ class RemoteKey(DirectServeJsonResource):
 
         cached = await self.store.get_server_keys_json(store_queries)
 
-        json_results = set()
+        json_results: Set[bytes] = set()
 
         time_now_ms = self.clock.time_msec()
 
@@ -234,8 +234,8 @@ class RemoteKey(DirectServeJsonResource):
             await self.query_keys(request, query, query_remote_on_cache_miss=False)
         else:
             signed_keys = []
-            for key_json in json_results:
-                key_json = json_decoder.decode(key_json.decode("utf-8"))
+            for key_json_raw in json_results:
+                key_json = json_decoder.decode(key_json_raw.decode("utf-8"))
                 for signing_key in self.config.key.key_server_signing_keys:
                     key_json = sign_json(
                         key_json, self.config.server.server_name, signing_key
diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index 9f6c251caf..604f18bf52 100644
--- a/synapse/rest/media/v1/media_storage.py
+++ b/synapse/rest/media/v1/media_storage.py
@@ -352,7 +352,7 @@ class ReadableFileWrapper:
     `IConsumer`.
     """
 
-    CHUNK_SIZE = 2 ** 14
+    CHUNK_SIZE = 2**14
 
     clock: Clock
     path: str
diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py
index 4cc9c66fbe..ca73965fc2 100644
--- a/synapse/rest/media/v1/preview_html.py
+++ b/synapse/rest/media/v1/preview_html.py
@@ -23,10 +23,10 @@ if TYPE_CHECKING:
 logger = logging.getLogger(__name__)
 
 _charset_match = re.compile(
-    br'<\s*meta[^>]*charset\s*=\s*"?([a-z0-9_-]+)"?', flags=re.I
+    rb'<\s*meta[^>]*charset\s*=\s*"?([a-z0-9_-]+)"?', flags=re.I
 )
 _xml_encoding_match = re.compile(
-    br'\s*<\s*\?\s*xml[^>]*encoding="([a-z0-9_-]+)"', flags=re.I
+    rb'\s*<\s*\?\s*xml[^>]*encoding="([a-z0-9_-]+)"', flags=re.I
 )
 _content_type_match = re.compile(r'.*; *charset="?(.*?)"?(;|$)', flags=re.I)
 
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index d47af8ead6..50383bdbd1 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -200,12 +200,17 @@ class PreviewUrlResource(DirectServeJsonResource):
                     match = False
                     continue
 
+                # Some attributes might not be parsed as strings by urlsplit (such as the
+                # port, which is parsed as an int). Because we use match functions that
+                # expect strings, we want to make sure that's what we give them.
+                value_str = str(value)
+
                 if pattern.startswith("^"):
-                    if not re.match(pattern, getattr(url_tuple, attrib)):
+                    if not re.match(pattern, value_str):
                         match = False
                         continue
                 else:
-                    if not fnmatch.fnmatch(getattr(url_tuple, attrib), pattern):
+                    if not fnmatch.fnmatch(value_str, pattern):
                         match = False
                         continue
             if match:
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 72fef1533f..0264dea61d 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -63,7 +63,7 @@ if TYPE_CHECKING:
     from synapse.server import HomeServer
 
 # python 3 does not have a maximum int value
-MAX_TXN_ID = 2 ** 63 - 1
+MAX_TXN_ID = 2**63 - 1
 
 logger = logging.getLogger(__name__)
 
@@ -241,9 +241,17 @@ class LoggingTransaction:
         self.exception_callbacks = exception_callbacks
 
     def call_after(self, callback: Callable[..., object], *args: Any, **kwargs: Any):
-        """Call the given callback on the main twisted thread after the
-        transaction has finished. Used to invalidate the caches on the
-        correct thread.
+        """Call the given callback on the main twisted thread after the transaction has
+        finished.
+
+        Mostly used to invalidate the caches on the correct thread.
+
+        Note that transactions may be retried a few times if they encounter database
+        errors such as serialization failures. Callbacks given to `call_after`
+        will accumulate across transaction attempts and will _all_ be called once a
+        transaction attempt succeeds, regardless of whether previous transaction
+        attempts failed. Otherwise, if all transaction attempts fail, all
+        `call_on_exception` callbacks will be run instead.
         """
         # if self.after_callbacks is None, that means that whatever constructed the
         # LoggingTransaction isn't expecting there to be any callbacks; assert that
@@ -254,6 +262,15 @@ class LoggingTransaction:
     def call_on_exception(
         self, callback: Callable[..., object], *args: Any, **kwargs: Any
     ):
+        """Call the given callback on the main twisted thread after the transaction has
+        failed.
+
+        Note that transactions may be retried a few times if they encounter database
+        errors such as serialization failures. Callbacks given to `call_on_exception`
+        will accumulate across transaction attempts and will _all_ be called once the
+        final transaction attempt fails. No `call_on_exception` callbacks will be run
+        if any transaction attempt succeeds.
+        """
         # if self.exception_callbacks is None, that means that whatever constructed the
         # LoggingTransaction isn't expecting there to be any callbacks; assert that
         # is not the case.
diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py
index f024761ba7..d4a38daa9a 100644
--- a/synapse/storage/databases/main/__init__.py
+++ b/synapse/storage/databases/main/__init__.py
@@ -33,7 +33,7 @@ from .account_data import AccountDataStore
 from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore
 from .cache import CacheInvalidationWorkerStore
 from .censor_events import CensorEventsStore
-from .client_ips import ClientIpStore
+from .client_ips import ClientIpWorkerStore
 from .deviceinbox import DeviceInboxStore
 from .devices import DeviceStore
 from .directory import DirectoryStore
@@ -49,7 +49,7 @@ from .keys import KeyStore
 from .lock import LockStore
 from .media_repository import MediaRepositoryStore
 from .metrics import ServerMetricsStore
-from .monthly_active_users import MonthlyActiveUsersStore
+from .monthly_active_users import MonthlyActiveUsersWorkerStore
 from .openid import OpenIdStore
 from .presence import PresenceStore
 from .profile import ProfileStore
@@ -112,13 +112,13 @@ class DataStore(
     AccountDataStore,
     EventPushActionsStore,
     OpenIdStore,
-    ClientIpStore,
+    ClientIpWorkerStore,
     DeviceStore,
     DeviceInboxStore,
     UserDirectoryStore,
     GroupServerStore,
     UserErasureStore,
-    MonthlyActiveUsersStore,
+    MonthlyActiveUsersWorkerStore,
     StatsStore,
     RelationsStore,
     CensorEventsStore,
@@ -146,6 +146,7 @@ class DataStore(
             extra_tables=[
                 ("user_signature_stream", "stream_id"),
                 ("device_lists_outbound_pokes", "stream_id"),
+                ("device_lists_changes_in_room", "stream_id"),
             ],
         )
 
@@ -183,8 +184,18 @@ class DataStore(
         super().__init__(database, db_conn, hs)
 
         device_list_max = self._device_list_id_gen.get_current_token()
+        device_list_prefill, min_device_list_id = self.db_pool.get_cache_dict(
+            db_conn,
+            "device_lists_stream",
+            entity_column="user_id",
+            stream_column="stream_id",
+            max_value=device_list_max,
+            limit=1000,
+        )
         self._device_list_stream_cache = StreamChangeCache(
-            "DeviceListStreamChangeCache", device_list_max
+            "DeviceListStreamChangeCache",
+            min_device_list_id,
+            prefilled_cache=device_list_prefill,
         )
         self._user_signature_stream_cache = StreamChangeCache(
             "UserSignatureStreamChangeCache", device_list_max
diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py
index 0694446558..eb32c34a85 100644
--- a/synapse/storage/databases/main/appservice.py
+++ b/synapse/storage/databases/main/appservice.py
@@ -29,7 +29,9 @@ from synapse.storage._base import db_to_json
 from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
 from synapse.storage.databases.main.events_worker import EventsWorkerStore
 from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
-from synapse.types import JsonDict
+from synapse.storage.types import Cursor
+from synapse.storage.util.sequence import build_sequence_generator
+from synapse.types import DeviceListUpdates, JsonDict
 from synapse.util import json_encoder
 from synapse.util.caches.descriptors import _CacheContext, cached
 
@@ -72,6 +74,22 @@ class ApplicationServiceWorkerStore(RoomMemberWorkerStore):
         )
         self.exclusive_user_regex = _make_exclusive_regex(self.services_cache)
 
+        def get_max_as_txn_id(txn: Cursor) -> int:
+            logger.warning("Falling back to slow query, you should port to postgres")
+            txn.execute(
+                "SELECT COALESCE(max(txn_id), 0) FROM application_services_txns"
+            )
+            return txn.fetchone()[0]  # type: ignore
+
+        self._as_txn_seq_gen = build_sequence_generator(
+            db_conn,
+            database.engine,
+            get_max_as_txn_id,
+            "application_services_txn_id_seq",
+            table="application_services_txns",
+            id_column="txn_id",
+        )
+
         super().__init__(database, db_conn, hs)
 
     def get_app_services(self):
@@ -217,6 +235,7 @@ class ApplicationServiceTransactionWorkerStore(
         to_device_messages: List[JsonDict],
         one_time_key_counts: TransactionOneTimeKeyCounts,
         unused_fallback_keys: TransactionUnusedFallbackKeys,
+        device_list_summary: DeviceListUpdates,
     ) -> AppServiceTransaction:
         """Atomically creates a new transaction for this application service
         with the given list of events. Ephemeral events are NOT persisted to the
@@ -231,27 +250,14 @@ class ApplicationServiceTransactionWorkerStore(
                 appservice devices in the transaction.
             unused_fallback_keys: Lists of unused fallback keys for relevant
                 appservice devices in the transaction.
+            device_list_summary: The device list summary to include in the transaction.
 
         Returns:
             A new transaction.
         """
 
         def _create_appservice_txn(txn):
-            # work out new txn id (highest txn id for this service += 1)
-            # The highest id may be the last one sent (in which case it is last_txn)
-            # or it may be the highest in the txns list (which are waiting to be/are
-            # being sent)
-            last_txn_id = self._get_last_txn(txn, service.id)
-
-            txn.execute(
-                "SELECT MAX(txn_id) FROM application_services_txns WHERE as_id=?",
-                (service.id,),
-            )
-            highest_txn_id = txn.fetchone()[0]
-            if highest_txn_id is None:
-                highest_txn_id = 0
-
-            new_txn_id = max(highest_txn_id, last_txn_id) + 1
+            new_txn_id = self._as_txn_seq_gen.get_next_id_txn(txn)
 
             # Insert new txn into txn table
             event_ids = json_encoder.encode([e.event_id for e in events])
@@ -268,6 +274,7 @@ class ApplicationServiceTransactionWorkerStore(
                 to_device_messages=to_device_messages,
                 one_time_key_counts=one_time_key_counts,
                 unused_fallback_keys=unused_fallback_keys,
+                device_list_summary=device_list_summary,
             )
 
         return await self.db_pool.runInteraction(
@@ -283,25 +290,8 @@ class ApplicationServiceTransactionWorkerStore(
             txn_id: The transaction ID being completed.
             service: The application service which was sent this transaction.
         """
-        txn_id = int(txn_id)
 
         def _complete_appservice_txn(txn):
-            # Debugging query: Make sure the txn being completed is EXACTLY +1 from
-            # what was there before. If it isn't, we've got problems (e.g. the AS
-            # has probably missed some events), so whine loudly but still continue,
-            # since it shouldn't fail completion of the transaction.
-            last_txn_id = self._get_last_txn(txn, service.id)
-            if (last_txn_id + 1) != txn_id:
-                logger.error(
-                    "appservice: Completing a transaction which has an ID > 1 from "
-                    "the last ID sent to this AS. We've either dropped events or "
-                    "sent it to the AS out of order. FIX ME. last_txn=%s "
-                    "completing_txn=%s service_id=%s",
-                    last_txn_id,
-                    txn_id,
-                    service.id,
-                )
-
             # Set current txn_id for AS to 'txn_id'
             self.db_pool.simple_upsert_txn(
                 txn,
@@ -359,8 +349,8 @@ class ApplicationServiceTransactionWorkerStore(
 
         events = await self.get_events_as_list(event_ids)
 
-        # TODO: to-device messages, one-time key counts and unused fallback keys
-        #       are not yet populated for catch-up transactions.
+        # TODO: to-device messages, one-time key counts, device list summaries and unused
+        #       fallback keys are not yet populated for catch-up transactions.
         #       We likely want to populate those for reliability.
         return AppServiceTransaction(
             service=service,
@@ -370,19 +360,9 @@ class ApplicationServiceTransactionWorkerStore(
             to_device_messages=[],
             one_time_key_counts={},
             unused_fallback_keys={},
+            device_list_summary=DeviceListUpdates(),
         )
 
-    def _get_last_txn(self, txn, service_id: Optional[str]) -> int:
-        txn.execute(
-            "SELECT last_txn FROM application_services_state WHERE as_id=?",
-            (service_id,),
-        )
-        last_txn_id = txn.fetchone()
-        if last_txn_id is None or last_txn_id[0] is None:  # no row exists
-            return 0
-        else:
-            return int(last_txn_id[0])  # select 'last_txn' col
-
     async def set_appservice_last_pos(self, pos: int) -> None:
         def set_appservice_last_pos_txn(txn):
             txn.execute(
@@ -430,7 +410,7 @@ class ApplicationServiceTransactionWorkerStore(
     async def get_type_stream_id_for_appservice(
         self, service: ApplicationService, type: str
     ) -> int:
-        if type not in ("read_receipt", "presence", "to_device"):
+        if type not in ("read_receipt", "presence", "to_device", "device_list"):
             raise ValueError(
                 "Expected type to be a valid application stream id type, got %s"
                 % (type,)
@@ -446,7 +426,8 @@ class ApplicationServiceTransactionWorkerStore(
             )
             last_stream_id = txn.fetchone()
             if last_stream_id is None or last_stream_id[0] is None:  # no row exists
-                return 0
+                # Stream tokens always start from 1, to avoid foot guns around `0` being falsey.
+                return 1
             else:
                 return int(last_stream_id[0])
 
@@ -457,7 +438,7 @@ class ApplicationServiceTransactionWorkerStore(
     async def set_appservice_stream_type_pos(
         self, service: ApplicationService, stream_type: str, pos: Optional[int]
     ) -> None:
-        if stream_type not in ("read_receipt", "presence", "to_device"):
+        if stream_type not in ("read_receipt", "presence", "to_device", "device_list"):
             raise ValueError(
                 "Expected type to be a valid application stream id type, got %s"
                 % (stream_type,)
diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
index 8b0c614ece..8480ea4e1c 100644
--- a/synapse/storage/databases/main/client_ips.py
+++ b/synapse/storage/databases/main/client_ips.py
@@ -25,7 +25,9 @@ from synapse.storage.database import (
     LoggingTransaction,
     make_tuple_comparison_clause,
 )
-from synapse.storage.databases.main.monthly_active_users import MonthlyActiveUsersStore
+from synapse.storage.databases.main.monthly_active_users import (
+    MonthlyActiveUsersWorkerStore,
+)
 from synapse.types import JsonDict, UserID
 from synapse.util.caches.lrucache import LruCache
 
@@ -397,7 +399,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
         return updated
 
 
-class ClientIpWorkerStore(ClientIpBackgroundUpdateStore):
+class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorkerStore):
     def __init__(
         self,
         database: DatabasePool,
@@ -406,11 +408,40 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore):
     ):
         super().__init__(database, db_conn, hs)
 
+        if hs.config.redis.redis_enabled:
+            # If we're using Redis, we can shift this update process off to
+            # the background worker
+            self._update_on_this_worker = hs.config.worker.run_background_tasks
+        else:
+            # If we're NOT using Redis, this must be handled by the master
+            self._update_on_this_worker = hs.get_instance_name() == "master"
+
         self.user_ips_max_age = hs.config.server.user_ips_max_age
 
+        # (user_id, access_token, ip,) -> last_seen
+        self.client_ip_last_seen = LruCache[Tuple[str, str, str], int](
+            cache_name="client_ip_last_seen", max_size=50000
+        )
+
         if hs.config.worker.run_background_tasks and self.user_ips_max_age:
             self._clock.looping_call(self._prune_old_user_ips, 5 * 1000)
 
+        if self._update_on_this_worker:
+            # This is the designated worker that can write to the client IP
+            # tables.
+
+            # (user_id, access_token, ip,) -> (user_agent, device_id, last_seen)
+            self._batch_row_update: Dict[
+                Tuple[str, str, str], Tuple[str, Optional[str], int]
+            ] = {}
+
+            self._client_ip_looper = self._clock.looping_call(
+                self._update_client_ips_batch, 5 * 1000
+            )
+            self.hs.get_reactor().addSystemEventTrigger(
+                "before", "shutdown", self._update_client_ips_batch
+            )
+
     @wrap_as_background_process("prune_old_user_ips")
     async def _prune_old_user_ips(self) -> None:
         """Removes entries in user IPs older than the configured period."""
@@ -456,7 +487,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore):
             "_prune_old_user_ips", _prune_old_user_ips_txn
         )
 
-    async def get_last_client_ip_by_device(
+    async def _get_last_client_ip_by_device_from_database(
         self, user_id: str, device_id: Optional[str]
     ) -> Dict[Tuple[str, str], DeviceLastConnectionInfo]:
         """For each device_id listed, give the user_ip it was last seen on.
@@ -487,7 +518,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore):
 
         return {(d["user_id"], d["device_id"]): d for d in res}
 
-    async def get_user_ip_and_agents(
+    async def _get_user_ip_and_agents_from_database(
         self, user: UserID, since_ts: int = 0
     ) -> List[LastConnectionInfo]:
         """Fetch the IPs and user agents for a user since the given timestamp.
@@ -539,34 +570,6 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore):
             for access_token, ip, user_agent, last_seen in rows
         ]
 
-
-class ClientIpStore(ClientIpWorkerStore, MonthlyActiveUsersStore):
-    def __init__(
-        self,
-        database: DatabasePool,
-        db_conn: LoggingDatabaseConnection,
-        hs: "HomeServer",
-    ):
-
-        # (user_id, access_token, ip,) -> last_seen
-        self.client_ip_last_seen = LruCache[Tuple[str, str, str], int](
-            cache_name="client_ip_last_seen", max_size=50000
-        )
-
-        super().__init__(database, db_conn, hs)
-
-        # (user_id, access_token, ip,) -> (user_agent, device_id, last_seen)
-        self._batch_row_update: Dict[
-            Tuple[str, str, str], Tuple[str, Optional[str], int]
-        ] = {}
-
-        self._client_ip_looper = self._clock.looping_call(
-            self._update_client_ips_batch, 5 * 1000
-        )
-        self.hs.get_reactor().addSystemEventTrigger(
-            "before", "shutdown", self._update_client_ips_batch
-        )
-
     async def insert_client_ip(
         self,
         user_id: str,
@@ -584,17 +587,27 @@ class ClientIpStore(ClientIpWorkerStore, MonthlyActiveUsersStore):
             last_seen = self.client_ip_last_seen.get(key)
         except KeyError:
             last_seen = None
-        await self.populate_monthly_active_users(user_id)
+
         # Rate-limited inserts
         if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
             return
 
         self.client_ip_last_seen.set(key, now)
 
-        self._batch_row_update[key] = (user_agent, device_id, now)
+        if self._update_on_this_worker:
+            await self.populate_monthly_active_users(user_id)
+            self._batch_row_update[key] = (user_agent, device_id, now)
+        else:
+            # We are not the designated writer-worker, so stream over replication
+            self.hs.get_replication_command_handler().send_user_ip(
+                user_id, access_token, ip, user_agent, device_id, now
+            )
 
     @wrap_as_background_process("update_client_ips")
     async def _update_client_ips_batch(self) -> None:
+        assert (
+            self._update_on_this_worker
+        ), "This worker is not designated to update client IPs"
 
         # If the DB pool has already terminated, don't try updating
         if not self.db_pool.is_running():
@@ -612,6 +625,10 @@ class ClientIpStore(ClientIpWorkerStore, MonthlyActiveUsersStore):
         txn: LoggingTransaction,
         to_update: Mapping[Tuple[str, str, str], Tuple[str, Optional[str], int]],
     ) -> None:
+        assert (
+            self._update_on_this_worker
+        ), "This worker is not designated to update client IPs"
+
         if "user_ips" in self.db_pool._unsafe_to_upsert_tables or (
             not self.database_engine.can_native_upsert
         ):
@@ -662,7 +679,12 @@ class ClientIpStore(ClientIpWorkerStore, MonthlyActiveUsersStore):
             A dictionary mapping a tuple of (user_id, device_id) to dicts, with
             keys giving the column names from the devices table.
         """
-        ret = await super().get_last_client_ip_by_device(user_id, device_id)
+        ret = await self._get_last_client_ip_by_device_from_database(user_id, device_id)
+
+        if not self._update_on_this_worker:
+            # Only the writing-worker has additional in-memory data to enhance
+            # the result
+            return ret
 
         # Update what is retrieved from the database with data which is pending
         # insertion, as if it has already been stored in the database.
@@ -707,9 +729,16 @@ class ClientIpStore(ClientIpWorkerStore, MonthlyActiveUsersStore):
             Only the latest user agent for each access token and IP address combination
             is available.
         """
+        rows_from_db = await self._get_user_ip_and_agents_from_database(user, since_ts)
+
+        if not self._update_on_this_worker:
+            # Only the writing-worker has additional in-memory data to enhance
+            # the result
+            return rows_from_db
+
         results: Dict[Tuple[str, str], LastConnectionInfo] = {
             (connection["access_token"], connection["ip"]): connection
-            for connection in await super().get_user_ip_and_agents(user, since_ts)
+            for connection in rows_from_db
         }
 
         # Overlay data that is pending insertion on top of the results from the
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 3b3a089b76..07eea4b3d2 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -681,42 +681,64 @@ class DeviceWorkerStore(SQLBaseStore):
         return self._device_list_stream_cache.get_all_entities_changed(from_key)
 
     async def get_users_whose_devices_changed(
-        self, from_key: int, user_ids: Iterable[str]
+        self,
+        from_key: int,
+        user_ids: Optional[Iterable[str]] = None,
+        to_key: Optional[int] = None,
     ) -> Set[str]:
         """Get set of users whose devices have changed since `from_key` that
         are in the given list of user_ids.
 
         Args:
-            from_key: The device lists stream token
-            user_ids: The user IDs to query for devices.
+            from_key: The minimum device lists stream token to query device list changes for,
+                exclusive.
+            user_ids: If provided, only check if these users have changed their device lists.
+                Otherwise changes from all users are returned.
+            to_key: The maximum device lists stream token to query device list changes for,
+                inclusive.
 
         Returns:
-            The set of user_ids whose devices have changed since `from_key`
+            The set of user_ids whose devices have changed since `from_key` (exclusive)
+                until `to_key` (inclusive).
         """
-
         # Get set of users who *may* have changed. Users not in the returned
         # list have definitely not changed.
-        to_check = self._device_list_stream_cache.get_entities_changed(
-            user_ids, from_key
-        )
+        if user_ids is None:
+            # Get set of all users that have had device list changes since 'from_key'
+            user_ids_to_check = self._device_list_stream_cache.get_all_entities_changed(
+                from_key
+            )
+        else:
+            # The same as above, but filter results to only those users in 'user_ids'
+            user_ids_to_check = self._device_list_stream_cache.get_entities_changed(
+                user_ids, from_key
+            )
 
-        if not to_check:
+        if not user_ids_to_check:
             return set()
 
         def _get_users_whose_devices_changed_txn(txn):
             changes = set()
 
-            sql = """
+            stream_id_where_clause = "stream_id > ?"
+            sql_args = [from_key]
+
+            if to_key:
+                stream_id_where_clause += " AND stream_id <= ?"
+                sql_args.append(to_key)
+
+            sql = f"""
                 SELECT DISTINCT user_id FROM device_lists_stream
-                WHERE stream_id > ?
+                WHERE {stream_id_where_clause}
                 AND
             """
 
-            for chunk in batch_iter(to_check, 100):
+            # Query device changes with a batch of users at a time
+            for chunk in batch_iter(user_ids_to_check, 100):
                 clause, args = make_in_list_sql_clause(
                     txn.database_engine, "user_id", chunk
                 )
-                txn.execute(sql + clause, (from_key,) + tuple(args))
+                txn.execute(sql + clause, sql_args + args)
                 changes.update(user_id for user_id, in txn)
 
             return changes
@@ -788,6 +810,7 @@ class DeviceWorkerStore(SQLBaseStore):
                     SELECT stream_id, destination AS entity FROM device_lists_outbound_pokes
                 ) AS e
                 WHERE ? < stream_id AND stream_id <= ?
+                ORDER BY stream_id ASC
                 LIMIT ?
             """
 
@@ -1506,7 +1529,11 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         )
 
     async def add_device_change_to_streams(
-        self, user_id: str, device_ids: Collection[str], hosts: Collection[str]
+        self,
+        user_id: str,
+        device_ids: Collection[str],
+        hosts: Optional[Collection[str]],
+        room_ids: Collection[str],
     ) -> Optional[int]:
         """Persist that a user's devices have been updated, and which hosts
         (if any) should be poked.
@@ -1515,7 +1542,10 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             user_id: The ID of the user whose device changed.
             device_ids: The IDs of any changed devices. If empty, this function will
                 return None.
-            hosts: The remote destinations that should be notified of the change.
+            hosts: The remote destinations that should be notified of the change. If
+                None then the set of hosts have *not* been calculated, and will be
+                calculated later by a background task.
+            room_ids: The rooms that the user is in
 
         Returns:
             The maximum stream ID of device list updates that were added to the database, or
@@ -1524,34 +1554,62 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         if not device_ids:
             return None
 
-        async with self._device_list_id_gen.get_next_mult(
-            len(device_ids)
-        ) as stream_ids:
-            await self.db_pool.runInteraction(
-                "add_device_change_to_stream",
-                self._add_device_change_to_stream_txn,
+        context = get_active_span_text_map()
+
+        def add_device_changes_txn(
+            txn, stream_ids_for_device_change, stream_ids_for_outbound_pokes
+        ):
+            self._add_device_change_to_stream_txn(
+                txn,
                 user_id,
                 device_ids,
-                stream_ids,
+                stream_ids_for_device_change,
             )
 
-        if not hosts:
-            return stream_ids[-1]
+            self._add_device_outbound_room_poke_txn(
+                txn,
+                user_id,
+                device_ids,
+                room_ids,
+                stream_ids_for_device_change,
+                context,
+                hosts_have_been_calculated=hosts is not None,
+            )
 
-        context = get_active_span_text_map()
-        async with self._device_list_id_gen.get_next_mult(
-            len(hosts) * len(device_ids)
-        ) as stream_ids:
-            await self.db_pool.runInteraction(
-                "add_device_outbound_poke_to_stream",
-                self._add_device_outbound_poke_to_stream_txn,
+            # If the set of hosts to send to has not been calculated yet (and so
+            # `hosts` is None) or there are no `hosts` to send to, then skip
+            # trying to persist them to the DB.
+            if not hosts:
+                return
+
+            self._add_device_outbound_poke_to_stream_txn(
+                txn,
                 user_id,
                 device_ids,
                 hosts,
-                stream_ids,
+                stream_ids_for_outbound_pokes,
                 context,
             )
 
+        # `device_lists_stream` wants a stream ID per device update.
+        num_stream_ids = len(device_ids)
+
+        if hosts:
+            # `device_lists_outbound_pokes` wants a different stream ID for
+            # each row, which is a row per host per device update.
+            num_stream_ids += len(hosts) * len(device_ids)
+
+        async with self._device_list_id_gen.get_next_mult(num_stream_ids) as stream_ids:
+            stream_ids_for_device_change = stream_ids[: len(device_ids)]
+            stream_ids_for_outbound_pokes = stream_ids[len(device_ids) :]
+
+            await self.db_pool.runInteraction(
+                "add_device_change_to_stream",
+                add_device_changes_txn,
+                stream_ids_for_device_change,
+                stream_ids_for_outbound_pokes,
+            )
+
         return stream_ids[-1]
 
     def _add_device_change_to_stream_txn(
@@ -1595,7 +1653,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         user_id: str,
         device_ids: Iterable[str],
         hosts: Collection[str],
-        stream_ids: List[str],
+        stream_ids: List[int],
         context: Dict[str, str],
     ) -> None:
         for host in hosts:
@@ -1606,8 +1664,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             )
 
         now = self._clock.time_msec()
-        next_stream_id = iter(stream_ids)
+        stream_id_iterator = iter(stream_ids)
 
+        encoded_context = json_encoder.encode(context)
         self.db_pool.simple_insert_many_txn(
             txn,
             table="device_lists_outbound_pokes",
@@ -1623,16 +1682,146 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             values=[
                 (
                     destination,
-                    next(next_stream_id),
+                    next(stream_id_iterator),
                     user_id,
                     device_id,
                     False,
                     now,
-                    json_encoder.encode(context)
-                    if whitelisted_homeserver(destination)
-                    else "{}",
+                    encoded_context if whitelisted_homeserver(destination) else "{}",
                 )
                 for destination in hosts
                 for device_id in device_ids
             ],
         )
+
+    def _add_device_outbound_room_poke_txn(
+        self,
+        txn: LoggingTransaction,
+        user_id: str,
+        device_ids: Iterable[str],
+        room_ids: Collection[str],
+        stream_ids: List[str],
+        context: Dict[str, str],
+        hosts_have_been_calculated: bool,
+    ) -> None:
+        """Record the user in the room has updated their device.
+
+        Args:
+            hosts_have_been_calculated: True if `device_lists_outbound_pokes`
+                has been updated already with the updates.
+        """
+
+        # We only need to convert to outbound pokes if they are our user.
+        converted_to_destinations = (
+            hosts_have_been_calculated or not self.hs.is_mine_id(user_id)
+        )
+
+        encoded_context = json_encoder.encode(context)
+
+        # The `device_lists_changes_in_room.stream_id` column matches the
+        # corresponding `stream_id` of the update in the `device_lists_stream`
+        # table, i.e. all rows persisted for the same device update will have
+        # the same `stream_id` (but different room IDs).
+        self.db_pool.simple_insert_many_txn(
+            txn,
+            table="device_lists_changes_in_room",
+            keys=(
+                "user_id",
+                "device_id",
+                "room_id",
+                "stream_id",
+                "converted_to_destinations",
+                "opentracing_context",
+            ),
+            values=[
+                (
+                    user_id,
+                    device_id,
+                    room_id,
+                    stream_id,
+                    converted_to_destinations,
+                    encoded_context,
+                )
+                for room_id in room_ids
+                for device_id, stream_id in zip(device_ids, stream_ids)
+            ],
+        )
+
+    async def get_uncoverted_outbound_room_pokes(
+        self, limit: int = 10
+    ) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]:
+        """Get device list changes by room that have not yet been handled and
+        written to `device_lists_outbound_pokes`.
+
+        Returns:
+            A list of user ID, device ID, room ID, stream ID and optional opentracing context.
+        """
+
+        sql = """
+            SELECT user_id, device_id, room_id, stream_id, opentracing_context
+            FROM device_lists_changes_in_room
+            WHERE NOT converted_to_destinations
+            ORDER BY stream_id
+            LIMIT ?
+        """
+
+        def get_uncoverted_outbound_room_pokes_txn(txn):
+            txn.execute(sql, (limit,))
+            return txn.fetchall()
+
+        return await self.db_pool.runInteraction(
+            "get_uncoverted_outbound_room_pokes", get_uncoverted_outbound_room_pokes_txn
+        )
+
+    async def add_device_list_outbound_pokes(
+        self,
+        user_id: str,
+        device_id: str,
+        room_id: str,
+        stream_id: int,
+        hosts: Collection[str],
+        context: Optional[Dict[str, str]],
+    ) -> None:
+        """Queue the device update to be sent to the given set of hosts,
+        calculated from the room ID.
+
+        Marks the associated row in `device_lists_changes_in_room` as handled.
+        """
+
+        def add_device_list_outbound_pokes_txn(txn, stream_ids: List[int]):
+            if hosts:
+                self._add_device_outbound_poke_to_stream_txn(
+                    txn,
+                    user_id=user_id,
+                    device_ids=[device_id],
+                    hosts=hosts,
+                    stream_ids=stream_ids,
+                    context=context,
+                )
+
+            self.db_pool.simple_update_txn(
+                txn,
+                table="device_lists_changes_in_room",
+                keyvalues={
+                    "user_id": user_id,
+                    "device_id": device_id,
+                    "stream_id": stream_id,
+                    "room_id": room_id,
+                },
+                updatevalues={"converted_to_destinations": True},
+            )
+
+        if not hosts:
+            # If there are no hosts then we don't try and generate stream IDs.
+            return await self.db_pool.runInteraction(
+                "add_device_list_outbound_pokes",
+                add_device_list_outbound_pokes_txn,
+                [],
+            )
+
+        async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids:
+            return await self.db_pool.runInteraction(
+                "add_device_list_outbound_pokes",
+                add_device_list_outbound_pokes_txn,
+                stream_ids,
+            )
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 59454a47df..a60e3f4fdd 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -22,7 +22,6 @@ from typing import (
     Dict,
     Iterable,
     List,
-    NoReturn,
     Optional,
     Set,
     Tuple,
@@ -1330,10 +1329,9 @@ class EventsWorkerStore(SQLBaseStore):
         return results
 
     @cached(max_entries=100000, tree=True)
-    async def have_seen_event(self, room_id: str, event_id: str) -> NoReturn:
-        # this only exists for the benefit of the @cachedList descriptor on
-        # _have_seen_events_dict
-        raise NotImplementedError()
+    async def have_seen_event(self, room_id: str, event_id: str) -> bool:
+        res = await self._have_seen_events_dict(((room_id, event_id),))
+        return res[(room_id, event_id)]
 
     def _get_current_state_event_counts_txn(
         self, txn: LoggingTransaction, room_id: str
diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py
index 216622964a..4f1c22c71b 100644
--- a/synapse/storage/databases/main/monthly_active_users.py
+++ b/synapse/storage/databases/main/monthly_active_users.py
@@ -15,7 +15,6 @@ import logging
 from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast
 
 from synapse.metrics.background_process_metrics import wrap_as_background_process
-from synapse.storage._base import SQLBaseStore
 from synapse.storage.database import (
     DatabasePool,
     LoggingDatabaseConnection,
@@ -36,7 +35,7 @@ logger = logging.getLogger(__name__)
 LAST_SEEN_GRANULARITY = 60 * 60 * 1000
 
 
-class MonthlyActiveUsersWorkerStore(SQLBaseStore):
+class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
     def __init__(
         self,
         database: DatabasePool,
@@ -47,9 +46,30 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
         self._clock = hs.get_clock()
         self.hs = hs
 
+        if hs.config.redis.redis_enabled:
+            # If we're using Redis, we can shift this update process off to
+            # the background worker
+            self._update_on_this_worker = hs.config.worker.run_background_tasks
+        else:
+            # If we're NOT using Redis, this must be handled by the master
+            self._update_on_this_worker = hs.get_instance_name() == "master"
+
         self._limit_usage_by_mau = hs.config.server.limit_usage_by_mau
         self._max_mau_value = hs.config.server.max_mau_value
 
+        self._mau_stats_only = hs.config.server.mau_stats_only
+
+        if self._update_on_this_worker:
+            # Do not add more reserved users than the total allowable number
+            self.db_pool.new_transaction(
+                db_conn,
+                "initialise_mau_threepids",
+                [],
+                [],
+                self._initialise_reserved_users,
+                hs.config.server.mau_limits_reserved_threepids[: self._max_mau_value],
+            )
+
     @cached(num_args=0)
     async def get_monthly_active_count(self) -> int:
         """Generates current count of monthly active users
@@ -222,28 +242,6 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
             "reap_monthly_active_users", _reap_users, reserved_users
         )
 
-
-class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore, RegistrationWorkerStore):
-    def __init__(
-        self,
-        database: DatabasePool,
-        db_conn: LoggingDatabaseConnection,
-        hs: "HomeServer",
-    ):
-        super().__init__(database, db_conn, hs)
-
-        self._mau_stats_only = hs.config.server.mau_stats_only
-
-        # Do not add more reserved users than the total allowable number
-        self.db_pool.new_transaction(
-            db_conn,
-            "initialise_mau_threepids",
-            [],
-            [],
-            self._initialise_reserved_users,
-            hs.config.server.mau_limits_reserved_threepids[: self._max_mau_value],
-        )
-
     def _initialise_reserved_users(
         self, txn: LoggingTransaction, threepids: List[dict]
     ) -> None:
@@ -254,6 +252,9 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore, RegistrationWorkerS
             txn:
             threepids: List of threepid dicts to reserve
         """
+        assert (
+            self._update_on_this_worker
+        ), "This worker is not designated to update MAUs"
 
         # XXX what is this function trying to achieve?  It upserts into
         # monthly_active_users for each *registered* reserved mau user, but why?
@@ -287,6 +288,10 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore, RegistrationWorkerS
         Args:
             user_id: user to add/update
         """
+        assert (
+            self._update_on_this_worker
+        ), "This worker is not designated to update MAUs"
+
         # Support user never to be included in MAU stats. Note I can't easily call this
         # from upsert_monthly_active_user_txn because then I need a _txn form of
         # is_support_user which is complicated because I want to cache the result.
@@ -322,6 +327,9 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore, RegistrationWorkerS
             txn (cursor):
             user_id (str): user to add/update
         """
+        assert (
+            self._update_on_this_worker
+        ), "This worker is not designated to update MAUs"
 
         # Am consciously deciding to lock the table on the basis that is ought
         # never be a big table and alternative approaches (batching multiple
@@ -349,6 +357,10 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore, RegistrationWorkerS
         Args:
             user_id(str): the user_id to query
         """
+        assert (
+            self._update_on_this_worker
+        ), "This worker is not designated to update MAUs"
+
         if self._limit_usage_by_mau or self._mau_stats_only:
             # Trial users and guests should not be included as part of MAU group
             is_guest = await self.is_guest(user_id)  # type: ignore[attr-defined]
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index 7f3d190e94..c7634c92fd 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -1745,6 +1745,18 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
             "replace_refresh_token", _replace_refresh_token_txn
         )
 
+    @cached()
+    async def is_guest(self, user_id: str) -> bool:
+        res = await self.db_pool.simple_select_one_onecol(
+            table="users",
+            keyvalues={"name": user_id},
+            retcol="is_guest",
+            allow_none=True,
+            desc="is_guest",
+        )
+
+        return res if res else False
+
 
 class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
     def __init__(
@@ -1887,18 +1899,6 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
         self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
         txn.call_after(self.is_guest.invalidate, (user_id,))
 
-    @cached()
-    async def is_guest(self, user_id: str) -> bool:
-        res = await self.db_pool.simple_select_one_onecol(
-            table="users",
-            keyvalues={"name": user_id},
-            retcol="is_guest",
-            allow_none=True,
-            desc="is_guest",
-        )
-
-        return res if res else False
-
 
 class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
     def __init__(
diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index b2295fd51f..64a7808140 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -26,8 +26,6 @@ from typing import (
     cast,
 )
 
-import attr
-
 from synapse.api.constants import RelationTypes
 from synapse.events import EventBase
 from synapse.storage._base import SQLBaseStore
@@ -39,8 +37,7 @@ from synapse.storage.database import (
 )
 from synapse.storage.databases.main.stream import generate_pagination_where_clause
 from synapse.storage.engines import PostgresEngine
-from synapse.storage.relations import AggregationPaginationToken, PaginationChunk
-from synapse.types import RoomStreamToken, StreamToken
+from synapse.types import JsonDict, RoomStreamToken, StreamToken
 from synapse.util.caches.descriptors import cached, cachedList
 
 if TYPE_CHECKING:
@@ -73,7 +70,7 @@ class RelationsWorkerStore(SQLBaseStore):
         direction: str = "b",
         from_token: Optional[StreamToken] = None,
         to_token: Optional[StreamToken] = None,
-    ) -> PaginationChunk:
+    ) -> Tuple[List[str], Optional[StreamToken]]:
         """Get a list of relations for an event, ordered by topological ordering.
 
         Args:
@@ -90,8 +87,10 @@ class RelationsWorkerStore(SQLBaseStore):
             to_token: Fetch rows up to the given token, or up to the end if None.
 
         Returns:
-            List of event IDs that match relations requested. The rows are of
-            the form `{"event_id": "..."}`.
+            A tuple of:
+                A list of related event IDs
+
+                The next stream token, if one exists.
         """
         # We don't use `event_id`, it's there so that we can cache based on
         # it. The `event_id` must match the `event.event_id`.
@@ -146,7 +145,7 @@ class RelationsWorkerStore(SQLBaseStore):
 
         def _get_recent_references_for_event_txn(
             txn: LoggingTransaction,
-        ) -> PaginationChunk:
+        ) -> Tuple[List[str], Optional[StreamToken]]:
             txn.execute(sql, where_args + [limit + 1])
 
             last_topo_id = None
@@ -156,7 +155,7 @@ class RelationsWorkerStore(SQLBaseStore):
                 # Do not include edits for redacted events as they leak event
                 # content.
                 if not is_redacted or row[1] != RelationTypes.REPLACE:
-                    events.append({"event_id": row[0]})
+                    events.append(row[0])
                 last_topo_id = row[2]
                 last_stream_id = row[3]
 
@@ -179,9 +178,7 @@ class RelationsWorkerStore(SQLBaseStore):
                         groups_key=0,
                     )
 
-            return PaginationChunk(
-                chunk=list(events[:limit]), next_batch=next_token, prev_batch=from_token
-            )
+            return events[:limit], next_token
 
         return await self.db_pool.runInteraction(
             "get_recent_references_for_event", _get_recent_references_for_event_txn
@@ -252,15 +249,8 @@ class RelationsWorkerStore(SQLBaseStore):
 
     @cached(tree=True)
     async def get_aggregation_groups_for_event(
-        self,
-        event_id: str,
-        room_id: str,
-        event_type: Optional[str] = None,
-        limit: int = 5,
-        direction: str = "b",
-        from_token: Optional[AggregationPaginationToken] = None,
-        to_token: Optional[AggregationPaginationToken] = None,
-    ) -> PaginationChunk:
+        self, event_id: str, room_id: str, limit: int = 5
+    ) -> List[JsonDict]:
         """Get a list of annotations on the event, grouped by event type and
         aggregation key, sorted by count.
 
@@ -270,79 +260,36 @@ class RelationsWorkerStore(SQLBaseStore):
         Args:
             event_id: Fetch events that relate to this event ID.
             room_id: The room the event belongs to.
-            event_type: Only fetch events with this event type, if given.
             limit: Only fetch the `limit` groups.
-            direction: Whether to fetch the highest count first (`"b"`) or
-                the lowest count first (`"f"`).
-            from_token: Fetch rows from the given token, or from the start if None.
-            to_token: Fetch rows up to the given token, or up to the end if None.
 
         Returns:
             List of groups of annotations that match. Each row is a dict with
             `type`, `key` and `count` fields.
         """
 
-        where_clause = ["relates_to_id = ?", "room_id = ?", "relation_type = ?"]
-        where_args: List[Union[str, int]] = [
+        where_args = [
             event_id,
             room_id,
             RelationTypes.ANNOTATION,
+            limit,
         ]
 
-        if event_type:
-            where_clause.append("type = ?")
-            where_args.append(event_type)
-
-        having_clause = generate_pagination_where_clause(
-            direction=direction,
-            column_names=("COUNT(*)", "MAX(stream_ordering)"),
-            from_token=attr.astuple(from_token) if from_token else None,  # type: ignore[arg-type]
-            to_token=attr.astuple(to_token) if to_token else None,  # type: ignore[arg-type]
-            engine=self.database_engine,
-        )
-
-        if direction == "b":
-            order = "DESC"
-        else:
-            order = "ASC"
-
-        if having_clause:
-            having_clause = "HAVING " + having_clause
-        else:
-            having_clause = ""
-
         sql = """
-            SELECT type, aggregation_key, COUNT(DISTINCT sender), MAX(stream_ordering)
+            SELECT type, aggregation_key, COUNT(DISTINCT sender)
             FROM event_relations
             INNER JOIN events USING (event_id)
-            WHERE {where_clause}
+            WHERE relates_to_id = ? AND room_id = ? AND relation_type = ?
             GROUP BY relation_type, type, aggregation_key
-            {having_clause}
-            ORDER BY COUNT(*) {order}, MAX(stream_ordering) {order}
+            ORDER BY COUNT(*) DESC
             LIMIT ?
-        """.format(
-            where_clause=" AND ".join(where_clause),
-            order=order,
-            having_clause=having_clause,
-        )
+        """
 
         def _get_aggregation_groups_for_event_txn(
             txn: LoggingTransaction,
-        ) -> PaginationChunk:
-            txn.execute(sql, where_args + [limit + 1])
-
-            next_batch = None
-            events = []
-            for row in txn:
-                events.append({"type": row[0], "key": row[1], "count": row[2]})
-                next_batch = AggregationPaginationToken(row[2], row[3])
+        ) -> List[JsonDict]:
+            txn.execute(sql, where_args)
 
-            if len(events) <= limit:
-                next_batch = None
-
-            return PaginationChunk(
-                chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token
-            )
+            return [{"type": row[0], "key": row[1], "count": row[2]} for row in txn]
 
         return await self.db_pool.runInteraction(
             "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index 3248da5356..98d09b3736 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -361,7 +361,10 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         return None
 
     async def get_rooms_for_local_user_where_membership_is(
-        self, user_id: str, membership_list: Collection[str]
+        self,
+        user_id: str,
+        membership_list: Collection[str],
+        excluded_rooms: Optional[List[str]] = None,
     ) -> List[RoomsForUser]:
         """Get all the rooms for this *local* user where the membership for this user
         matches one in the membership list.
@@ -372,6 +375,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             user_id: The user ID.
             membership_list: A list of synapse.api.constants.Membership
                 values which the user must be in.
+            excluded_rooms: A list of rooms to ignore.
 
         Returns:
             The RoomsForUser that the user matches the membership types.
@@ -386,12 +390,19 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             membership_list,
         )
 
-        # Now we filter out forgotten rooms
-        forgotten_rooms = await self.get_forgotten_rooms_for_user(user_id)
-        return [room for room in rooms if room.room_id not in forgotten_rooms]
+        # Now we filter out forgotten and excluded rooms
+        rooms_to_exclude: Set[str] = await self.get_forgotten_rooms_for_user(user_id)
+
+        if excluded_rooms is not None:
+            rooms_to_exclude.update(set(excluded_rooms))
+
+        return [room for room in rooms if room.room_id not in rooms_to_exclude]
 
     def _get_rooms_for_local_user_where_membership_is_txn(
-        self, txn, user_id: str, membership_list: List[str]
+        self,
+        txn,
+        user_id: str,
+        membership_list: List[str],
     ) -> List[RoomsForUser]:
         # Paranoia check.
         if not self.hs.is_mine_id(user_id):
diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py
index 28460fd364..4a461a0abb 100644
--- a/synapse/storage/databases/main/state.py
+++ b/synapse/storage/databases/main/state.py
@@ -12,9 +12,10 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import collections.abc
 import logging
-from typing import TYPE_CHECKING, Collection, Iterable, Optional, Set, Tuple
+from typing import TYPE_CHECKING, Collection, Dict, Iterable, Optional, Set, Tuple
+
+from frozendict import frozendict
 
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
@@ -29,7 +30,7 @@ from synapse.storage.database import (
 from synapse.storage.databases.main.events_worker import EventsWorkerStore
 from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
 from synapse.storage.state import StateFilter
-from synapse.types import JsonDict, StateMap
+from synapse.types import JsonDict, JsonMapping, StateMap
 from synapse.util.caches import intern_string
 from synapse.util.caches.descriptors import cached, cachedList
 
@@ -132,7 +133,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         return room_version
 
-    async def get_room_predecessor(self, room_id: str) -> Optional[dict]:
+    async def get_room_predecessor(self, room_id: str) -> Optional[JsonMapping]:
         """Get the predecessor of an upgraded room if it exists.
         Otherwise return None.
 
@@ -158,9 +159,10 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         predecessor = create_event.content.get("predecessor", None)
 
         # Ensure the key is a dictionary
-        if not isinstance(predecessor, collections.abc.Mapping):
+        if not isinstance(predecessor, (dict, frozendict)):
             return None
 
+        # The keys must be strings since the data is JSON.
         return predecessor
 
     async def get_create_event_for_room(self, room_id: str) -> EventBase:
@@ -306,8 +308,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         list_name="event_ids",
         num_args=1,
     )
-    async def _get_state_group_for_events(self, event_ids: Collection[str]) -> JsonDict:
-        """Returns mapping event_id -> state_group"""
+    async def _get_state_group_for_events(
+        self, event_ids: Collection[str]
+    ) -> Dict[str, int]:
+        """Returns mapping event_id -> state_group.
+
+        Raises:
+             RuntimeError if the state is unknown at any of the given events
+        """
         rows = await self.db_pool.simple_select_many_batch(
             table="event_to_state_groups",
             column="event_id",
@@ -317,7 +325,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             desc="_get_state_group_for_events",
         )
 
-        return {row["event_id"]: row["state_group"] for row in rows}
+        res = {row["event_id"]: row["state_group"] for row in rows}
+        for e in event_ids:
+            if e not in res:
+                raise RuntimeError("No state group for unknown or outlier event %s" % e)
+        return res
 
     async def get_referenced_state_groups(
         self, state_groups: Iterable[int]
@@ -521,7 +533,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
         )
 
         for user_id in potentially_left_users - joined_users:
-            await self.mark_remote_user_device_list_as_unsubscribed(user_id)
+            await self.mark_remote_user_device_list_as_unsubscribed(user_id)  # type: ignore[attr-defined]
 
         return batch_size
 
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 39e1efe373..8e764790db 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -36,7 +36,7 @@ what sort order was used:
 """
 
 import logging
-from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set, Tuple
+from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Set, Tuple
 
 import attr
 from frozendict import frozendict
@@ -585,7 +585,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         return ret, key
 
     async def get_membership_changes_for_user(
-        self, user_id: str, from_key: RoomStreamToken, to_key: RoomStreamToken
+        self,
+        user_id: str,
+        from_key: RoomStreamToken,
+        to_key: RoomStreamToken,
+        excluded_rooms: Optional[List[str]] = None,
     ) -> List[EventBase]:
         """Fetch membership events for a given user.
 
@@ -610,23 +614,29 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             min_from_id = from_key.stream
             max_to_id = to_key.get_max_stream_pos()
 
+            args: List[Any] = [user_id, min_from_id, max_to_id]
+
+            ignore_room_clause = ""
+            if excluded_rooms is not None and len(excluded_rooms) > 0:
+                ignore_room_clause = "AND e.room_id NOT IN (%s)" % ",".join(
+                    "?" for _ in excluded_rooms
+                )
+                args = args + excluded_rooms
+
             sql = """
                 SELECT m.event_id, instance_name, topological_ordering, stream_ordering
                 FROM events AS e, room_memberships AS m
                 WHERE e.event_id = m.event_id
                     AND m.user_id = ?
                     AND e.stream_ordering > ? AND e.stream_ordering <= ?
+                    %s
                 ORDER BY e.stream_ordering ASC
-            """
-            txn.execute(
-                sql,
-                (
-                    user_id,
-                    min_from_id,
-                    max_to_id,
-                ),
+            """ % (
+                ignore_room_clause,
             )
 
+            txn.execute(sql, args)
+
             rows = [
                 _EventDictReturn(event_id, None, stream_ordering)
                 for event_id, instance_name, topological_ordering, stream_ordering in txn
diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py
deleted file mode 100644
index fba270150b..0000000000
--- a/synapse/storage/relations.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2019 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
-
-import attr
-
-from synapse.api.errors import SynapseError
-from synapse.types import JsonDict
-
-if TYPE_CHECKING:
-    from synapse.storage.databases.main import DataStore
-
-logger = logging.getLogger(__name__)
-
-
-@attr.s(slots=True, auto_attribs=True)
-class PaginationChunk:
-    """Returned by relation pagination APIs.
-
-    Attributes:
-        chunk: The rows returned by pagination
-        next_batch: Token to fetch next set of results with, if
-            None then there are no more results.
-        prev_batch: Token to fetch previous set of results with, if
-            None then there are no previous results.
-    """
-
-    chunk: List[JsonDict]
-    next_batch: Optional[Any] = None
-    prev_batch: Optional[Any] = None
-
-    async def to_dict(self, store: "DataStore") -> Dict[str, Any]:
-        d = {"chunk": self.chunk}
-
-        if self.next_batch:
-            d["next_batch"] = await self.next_batch.to_string(store)
-
-        if self.prev_batch:
-            d["prev_batch"] = await self.prev_batch.to_string(store)
-
-        return d
-
-
-@attr.s(frozen=True, slots=True, auto_attribs=True)
-class AggregationPaginationToken:
-    """Pagination token for relation aggregation pagination API.
-
-    As the results are order by count and then MAX(stream_ordering) of the
-    aggregation groups, we can just use them as our pagination token.
-
-    Attributes:
-        count: The count of relations in the boundary group.
-        stream: The MAX stream ordering in the boundary group.
-    """
-
-    count: int
-    stream: int
-
-    @staticmethod
-    def from_string(string: str) -> "AggregationPaginationToken":
-        try:
-            c, s = string.split("-")
-            return AggregationPaginationToken(int(c), int(s))
-        except ValueError:
-            raise SynapseError(400, "Invalid aggregation pagination token")
-
-    async def to_string(self, store: "DataStore") -> str:
-        return "%d-%d" % (self.count, self.stream)
-
-    def as_tuple(self) -> Tuple[Any, ...]:
-        return attr.astuple(self)
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index 7b21c1b96d..151f2aa9bb 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-SCHEMA_VERSION = 68  # remember to update the list below when updating
+SCHEMA_VERSION = 69  # remember to update the list below when updating
 """Represents the expectations made by the codebase about the database schema
 
 This should be incremented whenever the codebase changes its requirements on the
@@ -58,6 +58,10 @@ Changes in SCHEMA_VERSION = 68:
     - event_reference_hashes is no longer read.
     - `events` has `state_key` and `rejection_reason` columns, which are populated for
       new events.
+
+Changes in SCHEMA_VERSION = 69:
+    - We now write to `device_lists_changes_in_room` table.
+    - Use sequence to generate future `application_services_txns.txn_id`s
 """
 
 
diff --git a/synapse/storage/schema/main/delta/68/06_msc3202_add_device_list_appservice_stream_type.sql b/synapse/storage/schema/main/delta/68/06_msc3202_add_device_list_appservice_stream_type.sql
new file mode 100644
index 0000000000..7590e34b94
--- /dev/null
+++ b/synapse/storage/schema/main/delta/68/06_msc3202_add_device_list_appservice_stream_type.sql
@@ -0,0 +1,23 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add a column to track what device list changes stream id that this application
+-- service has been caught up to.
+
+-- We explicitly don't set this field as "NOT NULL", as having NULL as a possible
+-- state is useful for determining if we've ever sent traffic for a stream type
+-- to an appservice. See https://github.com/matrix-org/synapse/issues/10836 for
+-- one way this can be used.
+ALTER TABLE application_services_state ADD COLUMN device_list_stream_id BIGINT;
\ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/69/01as_txn_seq.py b/synapse/storage/schema/main/delta/69/01as_txn_seq.py
new file mode 100644
index 0000000000..24bd4b391e
--- /dev/null
+++ b/synapse/storage/schema/main/delta/69/01as_txn_seq.py
@@ -0,0 +1,44 @@
+# Copyright 2022 Beeper
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Adds a postgres SEQUENCE for generating application service transaction IDs.
+"""
+
+from synapse.storage.engines import PostgresEngine
+
+
+def run_create(cur, database_engine, *args, **kwargs):
+    if isinstance(database_engine, PostgresEngine):
+        # If we already have some AS TXNs we want to start from the current
+        # maximum value. There are two potential places this is stored - the
+        # actual TXNs themselves *and* the AS state table. At time of migration
+        # it is possible the TXNs table is empty so we must include the AS state
+        # last_txn as a potential option, and pick the maximum.
+
+        cur.execute("SELECT COALESCE(max(txn_id), 0) FROM application_services_txns")
+        row = cur.fetchone()
+        txn_max = row[0]
+
+        cur.execute("SELECT COALESCE(max(last_txn), 0) FROM application_services_state")
+        row = cur.fetchone()
+        last_txn_max = row[0]
+
+        start_val = max(last_txn_max, txn_max) + 1
+
+        cur.execute(
+            "CREATE SEQUENCE application_services_txn_id_seq START WITH %s",
+            (start_val,),
+        )
diff --git a/synapse/storage/schema/main/delta/69/01device_list_oubound_by_room.sql b/synapse/storage/schema/main/delta/69/01device_list_oubound_by_room.sql
new file mode 100644
index 0000000000..b5b1782b2a
--- /dev/null
+++ b/synapse/storage/schema/main/delta/69/01device_list_oubound_by_room.sql
@@ -0,0 +1,38 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE device_lists_changes_in_room (
+    user_id TEXT NOT NULL,
+    device_id TEXT NOT NULL,
+    room_id TEXT NOT NULL,
+
+    -- This initially matches `device_lists_stream.stream_id`. Note that we
+    -- delete older values from `device_lists_stream`, so we can't use a foreign
+    -- constraint here.
+    --
+    -- The table will contain rows with the same `stream_id` but different
+    -- `room_id`, as for each device update we store a row per room the user is
+    -- joined to. Therefore `(stream_id, room_id)` gives a unique index.
+    stream_id BIGINT NOT NULL,
+
+    -- We have a background process which goes through this table and converts
+    -- entries into rows in `device_lists_outbound_pokes`. Once we have processed
+    -- a row, we mark it as such by setting `converted_to_destinations=TRUE`.
+    converted_to_destinations BOOLEAN NOT NULL,
+    opentracing_context TEXT
+);
+
+CREATE UNIQUE INDEX device_lists_changes_in_stream_id ON device_lists_changes_in_room(stream_id, room_id);
+CREATE INDEX device_lists_changes_in_stream_id_unconverted ON device_lists_changes_in_room(stream_id) WHERE NOT converted_to_destinations;
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 86f1a5373b..cda194e8c8 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -571,6 +571,10 @@ class StateGroupStorage:
 
         Returns:
             dict of state_group_id -> (dict of (type, state_key) -> event id)
+
+        Raises:
+            RuntimeError if we don't have a state group for one or more of the events
+               (ie they are outliers or unknown)
         """
         if not event_ids:
             return {}
@@ -659,6 +663,10 @@ class StateGroupStorage:
 
         Returns:
             A dict of (event_id) -> (type, state_key) -> [state_events]
+
+        Raises:
+            RuntimeError if we don't have a state group for one or more of the events
+               (ie they are outliers or unknown)
         """
         event_to_groups = await self.stores.main._get_state_group_for_events(event_ids)
 
@@ -696,6 +704,10 @@ class StateGroupStorage:
 
         Returns:
             A dict from event_id -> (type, state_key) -> event_id
+
+        Raises:
+            RuntimeError if we don't have a state group for one or more of the events
+                (ie they are outliers or unknown)
         """
         event_to_groups = await self.stores.main._get_state_group_for_events(event_ids)
 
@@ -723,6 +735,10 @@ class StateGroupStorage:
 
         Returns:
             A dict from (type, state_key) -> state_event
+
+        Raises:
+            RuntimeError if we don't have a state group for the event (ie it is an
+                outlier or is unknown)
         """
         state_map = await self.get_state_for_events(
             [event_id], state_filter or StateFilter.all()
@@ -741,6 +757,10 @@ class StateGroupStorage:
 
         Returns:
             A dict from (type, state_key) -> state_event_id
+
+        Raises:
+            RuntimeError if we don't have a state group for the event (ie it is an
+                outlier or is unknown)
         """
         state_map = await self.get_state_ids_for_events(
             [event_id], state_filter or StateFilter.all()
diff --git a/synapse/types.py b/synapse/types.py
index 5ce2a5b0a5..6bbefb6faa 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -25,6 +25,7 @@ from typing import (
     Match,
     MutableMapping,
     Optional,
+    Set,
     Tuple,
     Type,
     TypeVar,
@@ -421,22 +422,44 @@ class RoomStreamToken:
 
             s0    s1
             |     |
-        [0] V [1] V [2]
+        [0] ▼ [1] ▼ [2]
 
     Tokens can either be a point in the live event stream or a cursor going
     through historic events.
 
-    When traversing the live event stream events are ordered by when they
-    arrived at the homeserver.
+    When traversing the live event stream, events are ordered by
+    `stream_ordering` (when they arrived at the homeserver).
 
-    When traversing historic events the events are ordered by their depth in
-    the event graph "topological_ordering" and then by when they arrived at the
-    homeserver "stream_ordering".
+    When traversing historic events, events are first ordered by their `depth`
+    (`topological_ordering` in the event graph) and tie-broken by
+    `stream_ordering` (when the event arrived at the homeserver).
 
-    Live tokens start with an "s" followed by the "stream_ordering" id of the
-    event it comes after. Historic tokens start with a "t" followed by the
-    "topological_ordering" id of the event it comes after, followed by "-",
-    followed by the "stream_ordering" id of the event it comes after.
+    If you're looking for more info about what a token with all of the
+    underscores means, ex.
+    `s2633508_17_338_6732159_1082514_541479_274711_265584_1`, see the docstring
+    for `StreamToken` below.
+
+    ---
+
+    Live tokens start with an "s" followed by the `stream_ordering` of the event
+    that comes before the position of the token. Said another way:
+    `stream_ordering` uniquely identifies a persisted event. The live token
+    means "the position just after the event identified by `stream_ordering`".
+    An example token is:
+
+        s2633508
+
+    ---
+
+    Historic tokens start with a "t" followed by the `depth`
+    (`topological_ordering` in the event graph) of the event that comes before
+    the position of the token, followed by "-", followed by the
+    `stream_ordering` of the event that comes before the position of the token.
+    An example token is:
+
+        t426-2633508
+
+    ---
 
     There is also a third mode for live tokens where the token starts with "m",
     which is sometimes used when using sharded event persisters. In this case
@@ -463,6 +486,8 @@ class RoomStreamToken:
     Note: The `RoomStreamToken` cannot have both a topological part and an
     instance map.
 
+    ---
+
     For caching purposes, `RoomStreamToken`s and by extension, all their
     attributes, must be hashable.
     """
@@ -599,7 +624,57 @@ class RoomStreamToken:
 
 @attr.s(slots=True, frozen=True, auto_attribs=True)
 class StreamToken:
-    """A collection of positions within multiple streams.
+    """A collection of keys joined together by underscores in the following
+    order and which represent the position in their respective streams.
+
+    ex. `s2633508_17_338_6732159_1082514_541479_274711_265584_1`
+        1. `room_key`: `s2633508` which is a `RoomStreamToken`
+           - `RoomStreamToken`'s can also look like `t426-2633508` or `m56~2.58~3.59`
+           - See the docstring for `RoomStreamToken` for more details.
+        2. `presence_key`: `17`
+        3. `typing_key`: `338`
+        4. `receipt_key`: `6732159`
+        5. `account_data_key`: `1082514`
+        6. `push_rules_key`: `541479`
+        7. `to_device_key`: `274711`
+        8. `device_list_key`: `265584`
+        9. `groups_key`: `1`
+
+    You can see how many of these keys correspond to the various
+    fields in a "/sync" response:
+    ```json
+    {
+        "next_batch": "s12_4_0_1_1_1_1_4_1",
+        "presence": {
+            "events": []
+        },
+        "device_lists": {
+            "changed": []
+        },
+        "rooms": {
+            "join": {
+                "!QrZlfIDQLNLdZHqTnt:hs1": {
+                    "timeline": {
+                        "events": [],
+                        "prev_batch": "s10_4_0_1_1_1_1_4_1",
+                        "limited": false
+                    },
+                    "state": {
+                        "events": []
+                    },
+                    "account_data": {
+                        "events": []
+                    },
+                    "ephemeral": {
+                        "events": []
+                    }
+                }
+            }
+        }
+    }
+    ```
+
+    ---
 
     For caching purposes, `StreamToken`s and by extension, all their attributes,
     must be hashable.
@@ -748,6 +823,30 @@ class ReadReceipt:
     data: JsonDict
 
 
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class DeviceListUpdates:
+    """
+    An object containing a diff of information regarding other users' device lists, intended for
+    a recipient to carry out device list tracking.
+
+    Attributes:
+        changed: A set of users whose device lists have changed recently.
+        left: A set of users who the recipient no longer needs to track the device lists of.
+            Typically when those users no longer share any end-to-end encryption enabled rooms.
+    """
+
+    # We need to use a factory here, otherwise `set` is not evaluated at
+    # object instantiation, but instead at class definition instantiation.
+    # The latter happening only once, thus always giving you the same sets
+    # across multiple DeviceListUpdates instances.
+    # Also see: don't define mutable default arguments.
+    changed: Set[str] = attr.ib(factory=set)
+    left: Set[str] = attr.ib(factory=set)
+
+    def __bool__(self) -> bool:
+        return bool(self.changed or self.left)
+
+
 def get_verify_key_from_cross_signing_key(key_info):
     """Get the key ID and signedjson verify key from a cross-signing key dict
 
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
index 1cbc180eda..42f6abb5e1 100644
--- a/synapse/util/caches/__init__.py
+++ b/synapse/util/caches/__init__.py
@@ -17,7 +17,7 @@ import logging
 import typing
 from enum import Enum, auto
 from sys import intern
-from typing import Any, Callable, Dict, List, Optional, Sized
+from typing import Any, Callable, Dict, List, Optional, Sized, TypeVar
 
 import attr
 from prometheus_client.core import Gauge
@@ -195,8 +195,10 @@ KNOWN_KEYS = {
     )
 }
 
+T = TypeVar("T", Optional[str], str)
 
-def intern_string(string: Optional[str]) -> Optional[str]:
+
+def intern_string(string: T) -> T:
     """Takes a (potentially) unicode string and interns it if it's ascii"""
     if string is None:
         return None
diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py
index 6d4b0b7c5a..dace68666c 100644
--- a/synapse/util/patch_inline_callbacks.py
+++ b/synapse/util/patch_inline_callbacks.py
@@ -217,13 +217,16 @@ def _check_yield_points(
                 # We don't raise here as its perfectly valid for contexts to
                 # change in a function, as long as it sets the correct context
                 # on resolving (which is checked separately).
-                err = "%s changed context from %s to %s, happened between lines %d and %d in %s" % (
-                    frame.f_code.co_name,
-                    expected_context,
-                    current_context(),
-                    last_yield_line_no,
-                    frame.f_lineno,
-                    frame.f_code.co_filename,
+                err = (
+                    "%s changed context from %s to %s, happened between lines %d and %d in %s"
+                    % (
+                        frame.f_code.co_name,
+                        expected_context,
+                        current_context(),
+                        last_yield_line_no,
+                        frame.f_lineno,
+                        frame.f_code.co_filename,
+                    )
                 )
                 changes.append(err)
 
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index 648d9a95a7..d81f2527d7 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -30,7 +30,7 @@ MIN_RETRY_INTERVAL = 10 * 60 * 1000
 RETRY_MULTIPLIER = 5
 
 # a cap on the backoff. (Essentially none)
-MAX_RETRY_INTERVAL = 2 ** 62
+MAX_RETRY_INTERVAL = 2**62
 
 
 class NotRetryingDestination(Exception):
diff --git a/synapse/visibility.py b/synapse/visibility.py
index 49519eb8f5..250f073597 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -1,4 +1,5 @@
 # Copyright 2014 - 2016 OpenMarket Ltd
+# Copyright (C) The Matrix.org Foundation C.I.C. 2022
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,7 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import Dict, FrozenSet, List, Optional
+from typing import Collection, Dict, FrozenSet, List, Optional, Tuple
+
+from typing_extensions import Final
 
 from synapse.api.constants import EventTypes, HistoryVisibility, Membership
 from synapse.events import EventBase
@@ -40,6 +43,8 @@ MEMBERSHIP_PRIORITY = (
     Membership.BAN,
 )
 
+_HISTORY_VIS_KEY: Final[Tuple[str, str]] = (EventTypes.RoomHistoryVisibility, "")
+
 
 async def filter_events_for_client(
     storage: Storage,
@@ -74,7 +79,7 @@ async def filter_events_for_client(
     # to clients.
     events = [e for e in events if not e.internal_metadata.is_soft_failed()]
 
-    types = ((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id))
+    types = (_HISTORY_VIS_KEY, (EventTypes.Member, user_id))
 
     # we exclude outliers at this point, and then handle them separately later
     event_id_to_state = await storage.state.get_state_for_events(
@@ -157,7 +162,7 @@ async def filter_events_for_client(
         state = event_id_to_state[event.event_id]
 
         # get the room_visibility at the time of the event.
-        visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
+        visibility_event = state.get(_HISTORY_VIS_KEY, None)
         if visibility_event:
             visibility = visibility_event.content.get(
                 "history_visibility", HistoryVisibility.SHARED
@@ -293,67 +298,28 @@ async def filter_events_for_server(
             return True
         return False
 
-    def check_event_is_visible(event: EventBase, state: StateMap[EventBase]) -> bool:
-        history = state.get((EventTypes.RoomHistoryVisibility, ""), None)
-        if history:
-            visibility = history.content.get(
-                "history_visibility", HistoryVisibility.SHARED
-            )
-            if visibility in [HistoryVisibility.INVITED, HistoryVisibility.JOINED]:
-                # We now loop through all state events looking for
-                # membership states for the requesting server to determine
-                # if the server is either in the room or has been invited
-                # into the room.
-                for ev in state.values():
-                    if ev.type != EventTypes.Member:
-                        continue
-                    try:
-                        domain = get_domain_from_id(ev.state_key)
-                    except Exception:
-                        continue
-
-                    if domain != server_name:
-                        continue
-
-                    memtype = ev.membership
-                    if memtype == Membership.JOIN:
-                        return True
-                    elif memtype == Membership.INVITE:
-                        if visibility == HistoryVisibility.INVITED:
-                            return True
-                else:
-                    # server has no users in the room: redact
-                    return False
-
-        return True
-
-    # Lets check to see if all the events have a history visibility
-    # of "shared" or "world_readable". If that's the case then we don't
-    # need to check membership (as we know the server is in the room).
-    event_to_state_ids = await storage.state.get_state_ids_for_events(
-        frozenset(e.event_id for e in events),
-        state_filter=StateFilter.from_types(
-            types=((EventTypes.RoomHistoryVisibility, ""),)
-        ),
-    )
-
-    visibility_ids = set()
-    for sids in event_to_state_ids.values():
-        hist = sids.get((EventTypes.RoomHistoryVisibility, ""))
-        if hist:
-            visibility_ids.add(hist)
+    def check_event_is_visible(
+        visibility: str, memberships: StateMap[EventBase]
+    ) -> bool:
+        if visibility not in (HistoryVisibility.INVITED, HistoryVisibility.JOINED):
+            return True
 
-    # If we failed to find any history visibility events then the default
-    # is "shared" visibility.
-    if not visibility_ids:
-        all_open = True
-    else:
-        event_map = await storage.main.get_events(visibility_ids)
-        all_open = all(
-            e.content.get("history_visibility")
-            in (None, HistoryVisibility.SHARED, HistoryVisibility.WORLD_READABLE)
-            for e in event_map.values()
-        )
+        # We now loop through all membership events looking for
+        # membership states for the requesting server to determine
+        # if the server is either in the room or has been invited
+        # into the room.
+        for ev in memberships.values():
+            assert get_domain_from_id(ev.state_key) == server_name
+
+            memtype = ev.membership
+            if memtype == Membership.JOIN:
+                return True
+            elif memtype == Membership.INVITE:
+                if visibility == HistoryVisibility.INVITED:
+                    return True
+
+        # server has no users in the room: redact
+        return False
 
     if not check_history_visibility_only:
         erased_senders = await storage.main.are_users_erased(e.sender for e in events)
@@ -362,34 +328,100 @@ async def filter_events_for_server(
         # to no users having been erased.
         erased_senders = {}
 
-    if all_open:
-        # all the history_visibility state affecting these events is open, so
-        # we don't need to filter by membership state. We *do* need to check
-        # for user erasure, though.
-        if erased_senders:
-            to_return = []
-            for e in events:
-                if not is_sender_erased(e, erased_senders):
-                    to_return.append(e)
-                elif redact:
-                    to_return.append(prune_event(e))
-
-            return to_return
-
-        # If there are no erased users then we can just return the given list
-        # of events without having to copy it.
-        return events
-
-    # Ok, so we're dealing with events that have non-trivial visibility
-    # rules, so we need to also get the memberships of the room.
-
-    # first, for each event we're wanting to return, get the event_ids
-    # of the history vis and membership state at those events.
+    # Let's check to see if all the events have a history visibility
+    # of "shared" or "world_readable". If that's the case then we don't
+    # need to check membership (as we know the server is in the room).
+    event_to_history_vis = await _event_to_history_vis(storage, events)
+
+    # for any with restricted vis, we also need the memberships
+    event_to_memberships = await _event_to_memberships(
+        storage,
+        [
+            e
+            for e in events
+            if event_to_history_vis[e.event_id]
+            not in (HistoryVisibility.SHARED, HistoryVisibility.WORLD_READABLE)
+        ],
+        server_name,
+    )
+
+    to_return = []
+    for e in events:
+        erased = is_sender_erased(e, erased_senders)
+        visible = check_event_is_visible(
+            event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {})
+        )
+        if visible and not erased:
+            to_return.append(e)
+        elif redact:
+            to_return.append(prune_event(e))
+
+    return to_return
+
+
+async def _event_to_history_vis(
+    storage: Storage, events: Collection[EventBase]
+) -> Dict[str, str]:
+    """Get the history visibility at each of the given events
+
+    Returns a map from event id to history_visibility setting
+    """
+
+    # outliers get special treatment here. We don't have the state at that point in the
+    # room (and attempting to look it up will raise an exception), so all we can really
+    # do is assume that the requesting server is allowed to see the event. That's
+    # equivalent to there not being a history_visibility event, so we just exclude
+    # any outliers from the query.
+    event_to_state_ids = await storage.state.get_state_ids_for_events(
+        frozenset(e.event_id for e in events if not e.internal_metadata.is_outlier()),
+        state_filter=StateFilter.from_types(types=(_HISTORY_VIS_KEY,)),
+    )
+
+    visibility_ids = {
+        vis_event_id
+        for vis_event_id in (
+            state_ids.get(_HISTORY_VIS_KEY) for state_ids in event_to_state_ids.values()
+        )
+        if vis_event_id
+    }
+    vis_events = await storage.main.get_events(visibility_ids)
+
+    result: Dict[str, str] = {}
+    for event in events:
+        vis = HistoryVisibility.SHARED
+        state_ids = event_to_state_ids.get(event.event_id)
+
+        # if we didn't find any state for this event, it's an outlier, and we assume
+        # it's open
+        visibility_id = None
+        if state_ids:
+            visibility_id = state_ids.get(_HISTORY_VIS_KEY)
+
+        if visibility_id:
+            vis_event = vis_events[visibility_id]
+            vis = vis_event.content.get("history_visibility", HistoryVisibility.SHARED)
+            assert isinstance(vis, str)
+
+        result[event.event_id] = vis
+    return result
+
+
+async def _event_to_memberships(
+    storage: Storage, events: Collection[EventBase], server_name: str
+) -> Dict[str, StateMap[EventBase]]:
+    """Get the remote membership list at each of the given events
+
+    Returns a map from event id to state map, which will contain only membership events
+    for the given server.
+    """
+
+    if not events:
+        return {}
+
+    # for each event, get the event_ids of the membership state at those events.
     event_to_state_ids = await storage.state.get_state_ids_for_events(
         frozenset(e.event_id for e in events),
-        state_filter=StateFilter.from_types(
-            types=((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, None))
-        ),
+        state_filter=StateFilter.from_types(types=((EventTypes.Member, None),)),
     )
 
     # We only want to pull out member events that correspond to the
@@ -405,10 +437,7 @@ async def filter_events_for_server(
         for key, event_id in key_to_eid.items()
     }
 
-    def include(typ, state_key):
-        if typ != EventTypes.Member:
-            return True
-
+    def include(state_key: str) -> bool:
         # we avoid using get_domain_from_id here for efficiency.
         idx = state_key.find(":")
         if idx == -1:
@@ -416,10 +445,14 @@ async def filter_events_for_server(
         return state_key[idx + 1 :] == server_name
 
     event_map = await storage.main.get_events(
-        [e_id for e_id, key in event_id_to_state_key.items() if include(key[0], key[1])]
+        [
+            e_id
+            for e_id, (_, state_key) in event_id_to_state_key.items()
+            if include(state_key)
+        ]
     )
 
-    event_to_state = {
+    return {
         e_id: {
             key: event_map[inner_e_id]
             for key, inner_e_id in key_to_eid.items()
@@ -427,14 +460,3 @@ async def filter_events_for_server(
         }
         for e_id, key_to_eid in event_to_state_ids.items()
     }
-
-    to_return = []
-    for e in events:
-        erased = is_sender_erased(e, erased_senders)
-        visible = check_event_is_visible(e, event_to_state[e.event_id])
-        if visible and not erased:
-            to_return.append(e)
-        elif redact:
-            to_return.append(prune_event(e))
-
-    return to_return
diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
index 1cbb059357..0b22afdc75 100644
--- a/tests/appservice/test_scheduler.py
+++ b/tests/appservice/test_scheduler.py
@@ -24,6 +24,7 @@ from synapse.appservice.scheduler import (
 )
 from synapse.logging.context import make_deferred_yieldable
 from synapse.server import HomeServer
+from synapse.types import DeviceListUpdates
 from synapse.util import Clock
 
 from tests import unittest
@@ -70,6 +71,7 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
             to_device_messages=[],  # txn made and saved
             one_time_key_counts={},
             unused_fallback_keys={},
+            device_list_summary=DeviceListUpdates(),
         )
         self.assertEqual(0, len(self.txnctrl.recoverers))  # no recoverer made
         txn.complete.assert_called_once_with(self.store)  # txn completed
@@ -96,6 +98,7 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
             to_device_messages=[],  # txn made and saved
             one_time_key_counts={},
             unused_fallback_keys={},
+            device_list_summary=DeviceListUpdates(),
         )
         self.assertEqual(0, txn.send.call_count)  # txn not sent though
         self.assertEqual(0, txn.complete.call_count)  # or completed
@@ -124,6 +127,7 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
             to_device_messages=[],
             one_time_key_counts={},
             unused_fallback_keys={},
+            device_list_summary=DeviceListUpdates(),
         )
         self.assertEqual(1, self.recoverer_fn.call_count)  # recoverer made
         self.assertEqual(1, self.recoverer.recover.call_count)  # and invoked
@@ -225,7 +229,9 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase):
         service = Mock(id=4)
         event = Mock()
         self.scheduler.enqueue_for_appservice(service, events=[event])
-        self.txn_ctrl.send.assert_called_once_with(service, [event], [], [], None, None)
+        self.txn_ctrl.send.assert_called_once_with(
+            service, [event], [], [], None, None, DeviceListUpdates()
+        )
 
     def test_send_single_event_with_queue(self):
         d = defer.Deferred()
@@ -240,12 +246,14 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase):
         # (call enqueue_for_appservice multiple times deliberately)
         self.scheduler.enqueue_for_appservice(service, events=[event2])
         self.scheduler.enqueue_for_appservice(service, events=[event3])
-        self.txn_ctrl.send.assert_called_with(service, [event], [], [], None, None)
+        self.txn_ctrl.send.assert_called_with(
+            service, [event], [], [], None, None, DeviceListUpdates()
+        )
         self.assertEqual(1, self.txn_ctrl.send.call_count)
         # Resolve the send event: expect the queued events to be sent
         d.callback(service)
         self.txn_ctrl.send.assert_called_with(
-            service, [event2, event3], [], [], None, None
+            service, [event2, event3], [], [], None, None, DeviceListUpdates()
         )
         self.assertEqual(2, self.txn_ctrl.send.call_count)
 
@@ -272,15 +280,21 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase):
         # send events for different ASes and make sure they are sent
         self.scheduler.enqueue_for_appservice(srv1, events=[srv_1_event])
         self.scheduler.enqueue_for_appservice(srv1, events=[srv_1_event2])
-        self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event], [], [], None, None)
+        self.txn_ctrl.send.assert_called_with(
+            srv1, [srv_1_event], [], [], None, None, DeviceListUpdates()
+        )
         self.scheduler.enqueue_for_appservice(srv2, events=[srv_2_event])
         self.scheduler.enqueue_for_appservice(srv2, events=[srv_2_event2])
-        self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event], [], [], None, None)
+        self.txn_ctrl.send.assert_called_with(
+            srv2, [srv_2_event], [], [], None, None, DeviceListUpdates()
+        )
 
         # make sure callbacks for a service only send queued events for THAT
         # service
         srv_2_defer.callback(srv2)
-        self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], [], [], None, None)
+        self.txn_ctrl.send.assert_called_with(
+            srv2, [srv_2_event2], [], [], None, None, DeviceListUpdates()
+        )
         self.assertEqual(3, self.txn_ctrl.send.call_count)
 
     def test_send_large_txns(self):
@@ -300,17 +314,17 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase):
 
         # Expect the first event to be sent immediately.
         self.txn_ctrl.send.assert_called_with(
-            service, [event_list[0]], [], [], None, None
+            service, [event_list[0]], [], [], None, None, DeviceListUpdates()
         )
         srv_1_defer.callback(service)
         # Then send the next 100 events
         self.txn_ctrl.send.assert_called_with(
-            service, event_list[1:101], [], [], None, None
+            service, event_list[1:101], [], [], None, None, DeviceListUpdates()
         )
         srv_2_defer.callback(service)
         # Then the final 99 events
         self.txn_ctrl.send.assert_called_with(
-            service, event_list[101:], [], [], None, None
+            service, event_list[101:], [], [], None, None, DeviceListUpdates()
         )
         self.assertEqual(3, self.txn_ctrl.send.call_count)
 
@@ -320,7 +334,7 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase):
         event_list = [Mock(name="event")]
         self.scheduler.enqueue_for_appservice(service, ephemeral=event_list)
         self.txn_ctrl.send.assert_called_once_with(
-            service, [], event_list, [], None, None
+            service, [], event_list, [], None, None, DeviceListUpdates()
         )
 
     def test_send_multiple_ephemeral_no_queue(self):
@@ -329,7 +343,7 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase):
         event_list = [Mock(name="event1"), Mock(name="event2"), Mock(name="event3")]
         self.scheduler.enqueue_for_appservice(service, ephemeral=event_list)
         self.txn_ctrl.send.assert_called_once_with(
-            service, [], event_list, [], None, None
+            service, [], event_list, [], None, None, DeviceListUpdates()
         )
 
     def test_send_single_ephemeral_with_queue(self):
@@ -345,13 +359,21 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase):
         # Send more events: expect send() to NOT be called multiple times.
         self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_2)
         self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_3)
-        self.txn_ctrl.send.assert_called_with(service, [], event_list_1, [], None, None)
+        self.txn_ctrl.send.assert_called_with(
+            service, [], event_list_1, [], None, None, DeviceListUpdates()
+        )
         self.assertEqual(1, self.txn_ctrl.send.call_count)
         # Resolve txn_ctrl.send
         d.callback(service)
         # Expect the queued events to be sent
         self.txn_ctrl.send.assert_called_with(
-            service, [], event_list_2 + event_list_3, [], None, None
+            service,
+            [],
+            event_list_2 + event_list_3,
+            [],
+            None,
+            None,
+            DeviceListUpdates(),
         )
         self.assertEqual(2, self.txn_ctrl.send.call_count)
 
@@ -365,8 +387,10 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase):
         event_list = first_chunk + second_chunk
         self.scheduler.enqueue_for_appservice(service, ephemeral=event_list)
         self.txn_ctrl.send.assert_called_once_with(
-            service, [], first_chunk, [], None, None
+            service, [], first_chunk, [], None, None, DeviceListUpdates()
         )
         d.callback(service)
-        self.txn_ctrl.send.assert_called_with(service, [], second_chunk, [], None, None)
+        self.txn_ctrl.send.assert_called_with(
+            service, [], second_chunk, [], None, None, DeviceListUpdates()
+        )
         self.assertEqual(2, self.txn_ctrl.send.call_count)
diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py
index 694020fbef..06e0545a4f 100644
--- a/tests/crypto/test_event_signing.py
+++ b/tests/crypto/test_event_signing.py
@@ -28,8 +28,8 @@ from tests import unittest
 SIGNING_KEY_SEED = decode_base64("YJDBA9Xnr2sVqXD9Vj7XVUnmFZcZrlw8Md7kMW+3XA1")
 
 KEY_ALG = "ed25519"
-KEY_VER = 1
-KEY_NAME = "%s:%d" % (KEY_ALG, KEY_VER)
+KEY_VER = "1"
+KEY_NAME = "%s:%s" % (KEY_ALG, KEY_VER)
 
 HOSTNAME = "domain"
 
@@ -39,7 +39,7 @@ class EventSigningTestCase(unittest.TestCase):
         # NB: `signedjson` expects `nacl.signing.SigningKey` instances which have been
         # monkeypatched to include new `alg` and `version` attributes. This is captured
         # by the `signedjson.types.SigningKey` protocol.
-        self.signing_key: signedjson.types.SigningKey = nacl.signing.SigningKey(
+        self.signing_key: signedjson.types.SigningKey = nacl.signing.SigningKey(  # type: ignore[assignment]
             SIGNING_KEY_SEED
         )
         self.signing_key.alg = KEY_ALG
diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index e90592855a..a6e91956af 100644
--- a/tests/federation/test_federation_sender.py
+++ b/tests/federation/test_federation_sender.py
@@ -14,6 +14,7 @@
 from typing import Optional
 from unittest.mock import Mock
 
+from parameterized import parameterized_class
 from signedjson import key, sign
 from signedjson.types import BaseKey, SigningKey
 
@@ -154,6 +155,12 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
         )
 
 
+@parameterized_class(
+    [
+        {"enable_room_poke_code_path": False},
+        {"enable_room_poke_code_path": True},
+    ]
+)
 class FederationSenderDevicesTestCases(HomeserverTestCase):
     servlets = [
         admin.register_servlets,
@@ -168,17 +175,21 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
     def default_config(self):
         c = super().default_config()
         c["send_federation"] = True
+        c["use_new_device_lists_changes_in_room"] = self.enable_room_poke_code_path
         return c
 
     def prepare(self, reactor, clock, hs):
-        # stub out get_users_who_share_room_with_user so that it claims that
-        # `@user2:host2` is in the room
-        def get_users_who_share_room_with_user(user_id):
+        # stub out `get_rooms_for_user` and `get_users_in_room` so that the
+        # server thinks the user shares a room with `@user2:host2`
+        def get_rooms_for_user(user_id):
+            return defer.succeed({"!room:host1"})
+
+        hs.get_datastores().main.get_rooms_for_user = get_rooms_for_user
+
+        def get_users_in_room(room_id):
             return defer.succeed({"@user2:host2"})
 
-        hs.get_datastores().main.get_users_who_share_room_with_user = (
-            get_users_who_share_room_with_user
-        )
+        hs.get_datastores().main.get_users_in_room = get_users_in_room
 
         # whenever send_transaction is called, record the edu data
         self.edus = []
diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py
index 648a01618e..d21c11b716 100644
--- a/tests/federation/transport/test_knocking.py
+++ b/tests/federation/transport/test_knocking.py
@@ -23,7 +23,7 @@ from synapse.server import HomeServer
 from synapse.types import RoomAlias
 
 from tests.test_utils import event_injection
-from tests.unittest import FederatingHomeserverTestCase, TestCase, override_config
+from tests.unittest import FederatingHomeserverTestCase, TestCase
 
 
 class KnockingStrippedStateEventHelperMixin(TestCase):
@@ -221,7 +221,6 @@ class FederationKnockingTestCase(
 
         return super().prepare(reactor, clock, homeserver)
 
-    @override_config({"experimental_features": {"msc2403_enabled": True}})
     def test_room_state_returned_when_knocking(self):
         """
         Tests that specific, stripped state events from a room are returned after
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index cead9f90df..8c72cf6b30 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -15,6 +15,8 @@
 from typing import Dict, Iterable, List, Optional
 from unittest.mock import Mock
 
+from parameterized import parameterized
+
 from twisted.internet import defer
 from twisted.test.proto_helpers import MemoryReactor
 
@@ -471,6 +473,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
             to_device_messages,
             _otks,
             _fbks,
+            _device_list_summary,
         ) = self.send_mock.call_args[0]
 
         # Assert that this was the same to-device message that local_user sent
@@ -583,7 +586,15 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
         service_id_to_message_count: Dict[str, int] = {}
 
         for call in self.send_mock.call_args_list:
-            service, _events, _ephemeral, to_device_messages, _otks, _fbks = call[0]
+            (
+                service,
+                _events,
+                _ephemeral,
+                to_device_messages,
+                _otks,
+                _fbks,
+                _device_list_summary,
+            ) = call[0]
 
             # Check that this was made to an interested service
             self.assertIn(service, interested_appservices)
@@ -627,6 +638,114 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
         return appservice
 
 
+class ApplicationServicesHandlerDeviceListsTestCase(unittest.HomeserverTestCase):
+    """
+    Tests that the ApplicationServicesHandler sends device list updates to application
+    services correctly.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets_for_client_rest_resource,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        # Allow us to modify cached feature flags mid-test
+        self.as_handler = hs.get_application_service_handler()
+
+        # Mock ApplicationServiceApi's put_json, so we can verify the raw JSON that
+        # will be sent over the wire
+        self.put_json = simple_async_mock()
+        hs.get_application_service_api().put_json = self.put_json  # type: ignore[assignment]
+
+        # Mock out application services, and allow defining our own in tests
+        self._services: List[ApplicationService] = []
+        self.hs.get_datastores().main.get_app_services = Mock(
+            return_value=self._services
+        )
+
+    # Test across a variety of configuration values
+    @parameterized.expand(
+        [
+            (True, True, True),
+            (True, False, False),
+            (False, True, False),
+            (False, False, False),
+        ]
+    )
+    def test_application_service_receives_device_list_updates(
+        self,
+        experimental_feature_enabled: bool,
+        as_supports_txn_extensions: bool,
+        as_should_receive_device_list_updates: bool,
+    ):
+        """
+        Tests that an application service receives notice of changed device
+        lists for a user, when a user changes their device lists.
+
+        Arguments above are populated by parameterized.
+
+        Args:
+            as_should_receive_device_list_updates: Whether we expect the AS to receive the
+                device list changes.
+            experimental_feature_enabled: Whether the "msc3202_transaction_extensions" experimental
+                feature is enabled. This feature must be enabled for device lists to ASs to work.
+            as_supports_txn_extensions: Whether the application service has explicitly registered
+                to receive information defined by MSC3202 - which includes device list changes.
+        """
+        # Change whether the experimental feature is enabled or disabled before making
+        # device list changes
+        self.as_handler._msc3202_transaction_extensions_enabled = (
+            experimental_feature_enabled
+        )
+
+        # Create an appservice that is interested in "local_user"
+        appservice = ApplicationService(
+            token=random_string(10),
+            hostname="example.com",
+            id=random_string(10),
+            sender="@as:example.com",
+            rate_limited=False,
+            namespaces={
+                ApplicationService.NS_USERS: [
+                    {
+                        "regex": "@local_user:.+",
+                        "exclusive": False,
+                    }
+                ],
+            },
+            supports_ephemeral=True,
+            msc3202_transaction_extensions=as_supports_txn_extensions,
+            # Must be set for Synapse to try pushing data to the AS
+            hs_token="abcde",
+            url="some_url",
+        )
+
+        # Register the application service
+        self._services.append(appservice)
+
+        # Register a user on the homeserver
+        self.local_user = self.register_user("local_user", "password")
+        self.local_user_token = self.login("local_user", "password")
+
+        if as_should_receive_device_list_updates:
+            # Ensure that the resulting JSON uses the unstable prefix and contains the
+            # expected users
+            self.put_json.assert_called_once()
+            json_body = self.put_json.call_args[1]["json_body"]
+
+            # Our application service should have received a device list update with
+            # "local_user" in the "changed" list
+            device_list_dict = json_body.get("org.matrix.msc3202.device_lists", {})
+            self.assertEqual([], device_list_dict["left"])
+            self.assertEqual([self.local_user], device_list_dict["changed"])
+
+        else:
+            # No device list changes should have been sent out
+            self.put_json.assert_not_called()
+
+
 class ApplicationServicesHandlerOtkCountsTestCase(unittest.HomeserverTestCase):
     # Argument indices for pulling out arguments from a `send_mock`.
     ARG_OTK_COUNTS = 4
diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py
index 3a10791226..7586e472b5 100644
--- a/tests/handlers/test_deactivate_account.py
+++ b/tests/handlers/test_deactivate_account.py
@@ -44,21 +44,20 @@ class DeactivateAccountTestCase(HomeserverTestCase):
         Deactivates the account `self.user` using `self.token` and asserts
         that it returns a 200 success code.
         """
-        req = self.get_success(
-            self.make_request(
-                "POST",
-                "account/deactivate",
-                {
-                    "auth": {
-                        "type": "m.login.password",
-                        "user": self.user,
-                        "password": "pass",
-                    },
-                    "erase": True,
+        req = self.make_request(
+            "POST",
+            "account/deactivate",
+            {
+                "auth": {
+                    "type": "m.login.password",
+                    "user": self.user,
+                    "password": "pass",
                 },
-                access_token=self.token,
-            )
+                "erase": True,
+            },
+            access_token=self.token,
         )
+
         self.assertEqual(req.code, HTTPStatus.OK, req)
 
     def test_global_account_data_deleted_upon_deactivation(self) -> None:
diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
index ac21a28c43..8c74ed1fcf 100644
--- a/tests/handlers/test_e2e_keys.py
+++ b/tests/handlers/test_e2e_keys.py
@@ -463,8 +463,10 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
         res = e.value.code
         self.assertEqual(res, 400)
 
-        res = self.get_success(self.handler.query_local_devices({local_user: None}))
-        self.assertDictEqual(res, {local_user: {}})
+        query_res = self.get_success(
+            self.handler.query_local_devices({local_user: None})
+        )
+        self.assertDictEqual(query_res, {local_user: {}})
 
     def test_upload_signatures(self) -> None:
         """should check signatures that are uploaded"""
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index 89078fc637..060ba5f517 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -20,17 +20,17 @@ from twisted.test.proto_helpers import MemoryReactor
 from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseError
 from synapse.api.room_versions import RoomVersions
-from synapse.events import EventBase
+from synapse.events import EventBase, make_event_from_dict
 from synapse.federation.federation_base import event_from_pdu_json
 from synapse.logging.context import LoggingContext, run_in_background
 from synapse.rest import admin
 from synapse.rest.client import login, room
 from synapse.server import HomeServer
-from synapse.types import create_requester
 from synapse.util import Clock
 from synapse.util.stringutils import random_string
 
 from tests import unittest
+from tests.test_utils import event_injection
 
 logger = logging.getLogger(__name__)
 
@@ -39,7 +39,7 @@ def generate_fake_event_id() -> str:
     return "$fake_" + random_string(43)
 
 
-class FederationTestCase(unittest.HomeserverTestCase):
+class FederationTestCase(unittest.FederatingHomeserverTestCase):
     servlets = [
         admin.register_servlets,
         login.register_servlets,
@@ -219,41 +219,77 @@ class FederationTestCase(unittest.HomeserverTestCase):
         # create the room
         user_id = self.register_user("kermit", "test")
         tok = self.login("kermit", "test")
-        requester = create_requester(user_id)
 
         room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+        room_version = self.get_success(self.store.get_room_version(room_id))
+
+        # we need a user on the remote server to be a member, so that we can send
+        # extremity-causing events.
+        self.get_success(
+            event_injection.inject_member_event(
+                self.hs, room_id, f"@user:{self.OTHER_SERVER_NAME}", "join"
+            )
+        )
 
-        ev1 = self.helper.send(room_id, "first message", tok=tok)
+        send_result = self.helper.send(room_id, "first message", tok=tok)
+        ev1 = self.get_success(
+            self.store.get_event(send_result["event_id"], allow_none=False)
+        )
+        current_state = self.get_success(
+            self.store.get_events_as_list(
+                (self.get_success(self.store.get_current_state_ids(room_id))).values()
+            )
+        )
 
         # Create "many" backward extremities. The magic number we're trying to
         # create more than is 5 which corresponds to the number of backward
         # extremities we slice off in `_maybe_backfill_inner`
+        federation_event_handler = self.hs.get_federation_event_handler()
         for _ in range(0, 8):
-            event_handler = self.hs.get_event_creation_handler()
-            event, context = self.get_success(
-                event_handler.create_event(
-                    requester,
+            event = make_event_from_dict(
+                self.add_hashes_and_signatures(
                     {
+                        "origin_server_ts": 1,
                         "type": "m.room.message",
                         "content": {
                             "msgtype": "m.text",
                             "body": "message connected to fake event",
                         },
                         "room_id": room_id,
-                        "sender": user_id,
+                        "sender": f"@user:{self.OTHER_SERVER_NAME}",
+                        "prev_events": [
+                            ev1.event_id,
+                            # We're creating an backward extremity each time thanks
+                            # to this fake event
+                            generate_fake_event_id(),
+                        ],
+                        # lazy: *everything* is an auth event
+                        "auth_events": [ev.event_id for ev in current_state],
+                        "depth": ev1.depth + 1,
                     },
-                    prev_event_ids=[
-                        ev1["event_id"],
-                        # We're creating an backward extremity each time thanks
-                        # to this fake event
-                        generate_fake_event_id(),
-                    ],
-                )
+                    room_version,
+                ),
+                room_version,
             )
+
+            # we poke this directly into _process_received_pdu, to avoid the
+            # federation handler wanting to backfill the fake event.
             self.get_success(
-                event_handler.handle_new_client_event(requester, event, context)
+                federation_event_handler._process_received_pdu(
+                    self.OTHER_SERVER_NAME, event, state=current_state
+                )
             )
 
+        # we should now have 8 backwards extremities.
+        backwards_extremities = self.get_success(
+            self.store.db_pool.simple_select_list(
+                "event_backward_extremities",
+                keyvalues={"room_id": room_id},
+                retcols=["event_id"],
+            )
+        )
+        self.assertEqual(len(backwards_extremities), 8)
+
         current_depth = 1
         limit = 100
         with LoggingContext("receive_pdu"):
@@ -339,7 +375,8 @@ class FederationTestCase(unittest.HomeserverTestCase):
         member_event.signatures = member_event_dict["signatures"]
 
         # Add the new member_event to the StateMap
-        prev_state_map[
+        updated_state_map = dict(prev_state_map)
+        updated_state_map[
             (member_event.type, member_event.state_key)
         ] = member_event.event_id
         auth_events.append(member_event)
@@ -363,7 +400,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
                 prev_event_ids=message_event_dict["prev_events"],
                 auth_event_ids=self._event_auth_handler.compute_auth_events(
                     builder,
-                    prev_state_map,
+                    updated_state_map,
                     for_verification=False,
                 ),
                 depth=message_event_dict["depth"],
@@ -496,8 +533,8 @@ class EventFromPduTestCase(TestCase):
     def test_invalid_numbers(self) -> None:
         """Invalid values for an integer should be rejected, all floats should be rejected."""
         for value in [
-            -(2 ** 53),
-            2 ** 53,
+            -(2**53),
+            2**53,
             1.0,
             float("inf"),
             float("-inf"),
@@ -524,7 +561,7 @@ class EventFromPduTestCase(TestCase):
             event_from_pdu_json(
                 {
                     "type": EventTypes.Message,
-                    "content": {"foo": [{"bar": 2 ** 56}]},
+                    "content": {"foo": [{"bar": 2**56}]},
                     "room_id": "!room:test",
                     "sender": "@user:test",
                     "depth": 1,
diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py
new file mode 100644
index 0000000000..489ba57736
--- /dev/null
+++ b/tests/handlers/test_federation_event.py
@@ -0,0 +1,225 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from unittest import mock
+
+from synapse.events import make_event_from_dict
+from synapse.events.snapshot import EventContext
+from synapse.federation.transport.client import StateRequestResponse
+from synapse.logging.context import LoggingContext
+from synapse.rest import admin
+from synapse.rest.client import login, room
+
+from tests import unittest
+from tests.test_utils import event_injection, make_awaitable
+
+
+class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
+    servlets = [
+        admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def make_homeserver(self, reactor, clock):
+        # mock out the federation transport client
+        self.mock_federation_transport_client = mock.Mock(
+            spec=["get_room_state_ids", "get_room_state", "get_event"]
+        )
+        return super().setup_test_homeserver(
+            federation_transport_client=self.mock_federation_transport_client
+        )
+
+    def test_process_pulled_event_with_missing_state(self) -> None:
+        """Ensure that we correctly handle pulled events with lots of missing state
+
+        In this test, we pretend we are processing a "pulled" event (eg, via backfill
+        or get_missing_events). The pulled event has a prev_event we haven't previously
+        seen, so the server requests the state at that prev_event. There is a lot
+        of state we don't have, so we expect the server to make a /state request.
+
+        We check that the pulled event is correctly persisted, and that the state is
+        as we expect.
+        """
+        return self._test_process_pulled_event_with_missing_state(False)
+
+    def test_process_pulled_event_with_missing_state_where_prev_is_outlier(
+        self,
+    ) -> None:
+        """Ensure that we correctly handle pulled events with lots of missing state
+
+        A slight modification to test_process_pulled_event_with_missing_state. Again
+        we have a "pulled" event which refers to a prev_event with lots of state,
+        but in this case we already have the prev_event (as an outlier, obviously -
+        if it were a regular event, we wouldn't need to request the state).
+        """
+        return self._test_process_pulled_event_with_missing_state(True)
+
+    def _test_process_pulled_event_with_missing_state(
+        self, prev_exists_as_outlier: bool
+    ) -> None:
+        OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
+        main_store = self.hs.get_datastores().main
+        state_storage = self.hs.get_storage().state
+
+        # create the room
+        user_id = self.register_user("kermit", "test")
+        tok = self.login("kermit", "test")
+        room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+        room_version = self.get_success(main_store.get_room_version(room_id))
+
+        # allow the remote user to send state events
+        self.helper.send_state(
+            room_id,
+            "m.room.power_levels",
+            {"events_default": 0, "state_default": 0},
+            tok=tok,
+        )
+
+        # add the remote user to the room
+        member_event = self.get_success(
+            event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join")
+        )
+
+        initial_state_map = self.get_success(main_store.get_current_state_ids(room_id))
+
+        auth_event_ids = [
+            initial_state_map[("m.room.create", "")],
+            initial_state_map[("m.room.power_levels", "")],
+            initial_state_map[("m.room.join_rules", "")],
+            member_event.event_id,
+        ]
+
+        # mock up a load of state events which we are missing
+        state_events = [
+            make_event_from_dict(
+                self.add_hashes_and_signatures(
+                    {
+                        "type": "test_state_type",
+                        "state_key": f"state_{i}",
+                        "room_id": room_id,
+                        "sender": OTHER_USER,
+                        "prev_events": [member_event.event_id],
+                        "auth_events": auth_event_ids,
+                        "origin_server_ts": 1,
+                        "depth": 10,
+                        "content": {"body": f"state_{i}"},
+                    }
+                ),
+                room_version,
+            )
+            for i in range(1, 10)
+        ]
+
+        # this is the state that we are going to claim is active at the prev_event.
+        state_at_prev_event = state_events + self.get_success(
+            main_store.get_events_as_list(initial_state_map.values())
+        )
+
+        # mock up a prev event.
+        # Depending on the test, we either persist this upfront (as an outlier),
+        # or let the server request it.
+        prev_event = make_event_from_dict(
+            self.add_hashes_and_signatures(
+                {
+                    "type": "test_regular_type",
+                    "room_id": room_id,
+                    "sender": OTHER_USER,
+                    "prev_events": [],
+                    "auth_events": auth_event_ids,
+                    "origin_server_ts": 1,
+                    "depth": 11,
+                    "content": {"body": "missing_prev"},
+                }
+            ),
+            room_version,
+        )
+        if prev_exists_as_outlier:
+            prev_event.internal_metadata.outlier = True
+            persistence = self.hs.get_storage().persistence
+            self.get_success(
+                persistence.persist_event(prev_event, EventContext.for_outlier())
+            )
+        else:
+
+            async def get_event(destination: str, event_id: str, timeout=None):
+                self.assertEqual(destination, self.OTHER_SERVER_NAME)
+                self.assertEqual(event_id, prev_event.event_id)
+                return {"pdus": [prev_event.get_pdu_json()]}
+
+            self.mock_federation_transport_client.get_event.side_effect = get_event
+
+        # mock up a regular event to pass into _process_pulled_event
+        pulled_event = make_event_from_dict(
+            self.add_hashes_and_signatures(
+                {
+                    "type": "test_regular_type",
+                    "room_id": room_id,
+                    "sender": OTHER_USER,
+                    "prev_events": [prev_event.event_id],
+                    "auth_events": auth_event_ids,
+                    "origin_server_ts": 1,
+                    "depth": 12,
+                    "content": {"body": "pulled"},
+                }
+            ),
+            room_version,
+        )
+
+        # we expect an outbound request to /state_ids, so stub that out
+        self.mock_federation_transport_client.get_room_state_ids.return_value = (
+            make_awaitable(
+                {
+                    "pdu_ids": [e.event_id for e in state_at_prev_event],
+                    "auth_chain_ids": [],
+                }
+            )
+        )
+
+        # we also expect an outbound request to /state
+        self.mock_federation_transport_client.get_room_state.return_value = (
+            make_awaitable(
+                StateRequestResponse(auth_events=[], state=state_at_prev_event)
+            )
+        )
+
+        # we have to bump the clock a bit, to keep the retry logic in
+        # FederationClient.get_pdu happy
+        self.reactor.advance(60000)
+
+        # Finally, the call under test: send the pulled event into _process_pulled_event
+        with LoggingContext("test"):
+            self.get_success(
+                self.hs.get_federation_event_handler()._process_pulled_event(
+                    self.OTHER_SERVER_NAME, pulled_event, backfilled=False
+                )
+            )
+
+        # check that the event is correctly persisted
+        persisted = self.get_success(main_store.get_event(pulled_event.event_id))
+        self.assertIsNotNone(persisted, "pulled event was not persisted at all")
+        self.assertFalse(
+            persisted.internal_metadata.is_outlier(), "pulled event was an outlier"
+        )
+
+        # check that the state at that event is as expected
+        state = self.get_success(
+            state_storage.get_state_ids_for_event(pulled_event.event_id)
+        )
+        expected_state = {
+            (e.type, e.state_key): e.event_id for e in state_at_prev_event
+        }
+        self.assertEqual(state, expected_state)
+
+        if prev_exists_as_outlier:
+            self.mock_federation_transport_client.get_event.assert_not_called()
diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py
index 014815db6e..9684120c70 100644
--- a/tests/handlers/test_oidc.py
+++ b/tests/handlers/test_oidc.py
@@ -354,10 +354,11 @@ class OidcHandlerTestCase(HomeserverTestCase):
         req = Mock(spec=["cookies"])
         req.cookies = []
 
-        url = self.get_success(
-            self.provider.handle_redirect_request(req, b"http://client/redirect")
+        url = urlparse(
+            self.get_success(
+                self.provider.handle_redirect_request(req, b"http://client/redirect")
+            )
         )
-        url = urlparse(url)
         auth_endpoint = urlparse(AUTHORIZATION_ENDPOINT)
 
         self.assertEqual(url.scheme, auth_endpoint.scheme)
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index 1ec105c373..f88c725a42 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -59,7 +59,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
         self.bob = UserID.from_string("@4567:test")
         self.alice = UserID.from_string("@alice:remote")
 
-        self.get_success(self.register_user(self.frank.localpart, "frankpassword"))
+        self.register_user(self.frank.localpart, "frankpassword")
 
         self.handler = hs.get_profile_handler()
 
diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
index 3aedc0767b..865b8b7e47 100644
--- a/tests/handlers/test_sync.py
+++ b/tests/handlers/test_sync.py
@@ -158,9 +158,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
             )
 
         # Blow away caches (supported room versions can only change due to a restart).
-        self.get_success(
-            self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()
-        )
+        self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()
         self.store._get_event_cache.clear()
 
         # The rooms should be excluded from the sync response.
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 92012cd6f7..c6e501c7be 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -351,6 +351,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
             self.handler.handle_local_profile_change(regular_user_id, profile_info)
         )
         profile = self.get_success(self.store.get_user_in_directory(regular_user_id))
+        assert profile is not None
         self.assertTrue(profile["display_name"] == display_name)
 
     def test_handle_local_profile_change_with_deactivated_user(self) -> None:
@@ -369,6 +370,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
 
         # profile is in directory
         profile = self.get_success(self.store.get_user_in_directory(r_user_id))
+        assert profile is not None
         self.assertTrue(profile["display_name"] == display_name)
 
         # deactivate user
diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index 10dd94b549..9fd5d59c55 100644
--- a/tests/module_api/test_api.py
+++ b/tests/module_api/test_api.py
@@ -87,11 +87,23 @@ class ModuleApiTestCase(HomeserverTestCase):
         self.assertEqual(displayname, "Bobberino")
 
     def test_can_register_admin_user(self):
-        user_id = self.get_success(
-            self.register_user(
-                "bob_module_admin", "1234", displayname="Bobberino Admin", admin=True
-            )
+        user_id = self.register_user(
+            "bob_module_admin", "1234", displayname="Bobberino Admin", admin=True
         )
+
+        found_user = self.get_success(self.module_api.get_userinfo_by_id(user_id))
+        self.assertEqual(found_user.user_id.to_string(), user_id)
+        self.assertIdentical(found_user.is_admin, True)
+
+    def test_can_set_admin(self):
+        user_id = self.register_user(
+            "alice_wants_admin",
+            "1234",
+            displayname="Alice Powerhungry",
+            admin=False,
+        )
+
+        self.get_success(self.module_api.set_user_admin(user_id, True))
         found_user = self.get_success(self.module_api.get_userinfo_by_id(user_id))
         self.assertEqual(found_user.user_id.to_string(), user_id)
         self.assertIdentical(found_user.is_admin, True)
@@ -278,7 +290,7 @@ class ModuleApiTestCase(HomeserverTestCase):
         # Create a user and room to play with
         user_id = self.register_user("kermit", "monkey")
         tok = self.login("kermit", "monkey")
-        room_id = self.helper.create_room_as(user_id, tok=tok)
+        room_id = self.helper.create_room_as(user_id, tok=tok, is_public=False)
 
         # The room should not currently be in the public rooms directory
         is_in_public_rooms = self.get_success(
diff --git a/tests/replication/_base.py b/tests/replication/_base.py
index 9c5df266bd..a0589b6d6a 100644
--- a/tests/replication/_base.py
+++ b/tests/replication/_base.py
@@ -206,7 +206,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
         path: bytes = request.path  # type: ignore
         self.assertRegex(
             path,
-            br"^/_synapse/replication/get_repl_stream_updates/%s/[^/]+$"
+            rb"^/_synapse/replication/get_repl_stream_updates/%s/[^/]+$"
             % (stream_name.encode("ascii"),),
         )
 
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index 17dc42fd37..297a9e77f8 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -268,7 +268,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
 
         event_source = RoomEventSource(self.hs)
         event_source.store = self.slaved_store
-        current_token = self.get_success(event_source.get_current_key())
+        current_token = event_source.get_current_key()
 
         # gradually stream out the replication
         while repl_transport.buffer:
@@ -277,7 +277,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
             self.pump(0)
 
             prev_token = current_token
-            current_token = self.get_success(event_source.get_current_key())
+            current_token = event_source.get_current_key()
 
             # attempt to replicate the behaviour of the sync handler.
             #
diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index 0d47dd0aff..e909e444ac 100644
--- a/tests/rest/admin/test_media.py
+++ b/tests/rest/admin/test_media.py
@@ -702,6 +702,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
         """
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
+        assert media_info is not None
         self.assertFalse(media_info["quarantined_by"])
 
         # quarantining
@@ -715,6 +716,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
         self.assertFalse(channel.json_body)
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
+        assert media_info is not None
         self.assertTrue(media_info["quarantined_by"])
 
         # remove from quarantine
@@ -728,6 +730,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
         self.assertFalse(channel.json_body)
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
+        assert media_info is not None
         self.assertFalse(media_info["quarantined_by"])
 
     def test_quarantine_protected_media(self) -> None:
@@ -740,6 +743,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
 
         # verify protection
         media_info = self.get_success(self.store.get_local_media(self.media_id))
+        assert media_info is not None
         self.assertTrue(media_info["safe_from_quarantine"])
 
         # quarantining
@@ -754,6 +758,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
 
         # verify that is not in quarantine
         media_info = self.get_success(self.store.get_local_media(self.media_id))
+        assert media_info is not None
         self.assertFalse(media_info["quarantined_by"])
 
 
@@ -830,6 +835,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
         """
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
+        assert media_info is not None
         self.assertFalse(media_info["safe_from_quarantine"])
 
         # protect
@@ -843,6 +849,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
         self.assertFalse(channel.json_body)
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
+        assert media_info is not None
         self.assertTrue(media_info["safe_from_quarantine"])
 
         # unprotect
@@ -856,6 +863,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
         self.assertFalse(channel.json_body)
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
+        assert media_info is not None
         self.assertFalse(media_info["safe_from_quarantine"])
 
 
diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py
index 2c855bff99..a53463c9ba 100644
--- a/tests/rest/admin/test_server_notice.py
+++ b/tests/rest/admin/test_server_notice.py
@@ -214,9 +214,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
         self.assertEqual(messages[0]["sender"], "@notices:test")
 
         # invalidate cache of server notices room_ids
-        self.get_success(
-            self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all()
-        )
+        self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all()
 
         # send second message
         channel = self.make_request(
@@ -291,9 +289,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
         # invalidate cache of server notices room_ids
         # if server tries to send to a cached room_id the user gets the message
         # in old room
-        self.get_success(
-            self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all()
-        )
+        self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all()
 
         # send second message
         channel = self.make_request(
@@ -380,9 +376,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
 
         # invalidate cache of server notices room_ids
         # if server tries to send to a cached room_id it gives an error
-        self.get_success(
-            self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all()
-        )
+        self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all()
 
         # send second message
         channel = self.make_request(
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index bef911d5df..0cdf1dec40 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -1590,10 +1590,9 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
         self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
 
-        pushers = self.get_success(
-            self.store.get_pushers_by({"user_name": "@bob:test"})
+        pushers = list(
+            self.get_success(self.store.get_pushers_by({"user_name": "@bob:test"}))
         )
-        pushers = list(pushers)
         self.assertEqual(len(pushers), 1)
         self.assertEqual("@bob:test", pushers[0].user_name)
 
@@ -1632,10 +1631,9 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
         self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
 
-        pushers = self.get_success(
-            self.store.get_pushers_by({"user_name": "@bob:test"})
+        pushers = list(
+            self.get_success(self.store.get_pushers_by({"user_name": "@bob:test"}))
         )
-        pushers = list(pushers)
         self.assertEqual(len(pushers), 0)
 
     def test_set_password(self) -> None:
@@ -2144,6 +2142,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
 
         # is in user directory
         profile = self.get_success(self.store.get_user_in_directory(self.other_user))
+        assert profile is not None
         self.assertTrue(profile["display_name"] == "User")
 
         # Deactivate user
@@ -2711,6 +2710,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
         user_tuple = self.get_success(
             self.store.get_user_by_access_token(other_user_token)
         )
+        assert user_tuple is not None
         token_id = user_tuple.token_id
 
         self.get_success(
@@ -3676,6 +3676,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
         # The user starts off as not shadow-banned.
         other_user_token = self.login("user", "pass")
         result = self.get_success(self.store.get_user_by_access_token(other_user_token))
+        assert result is not None
         self.assertFalse(result.shadow_banned)
 
         channel = self.make_request("POST", self.url, access_token=self.admin_user_tok)
@@ -3684,6 +3685,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
 
         # Ensure the user is shadow-banned (and the cache was cleared).
         result = self.get_success(self.store.get_user_by_access_token(other_user_token))
+        assert result is not None
         self.assertTrue(result.shadow_banned)
 
         # Un-shadow-ban the user.
@@ -3695,6 +3697,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
 
         # Ensure the user is no longer shadow-banned (and the cache was cleared).
         result = self.get_success(self.store.get_user_by_access_token(other_user_token))
+        assert result is not None
         self.assertFalse(result.shadow_banned)
 
 
diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py
index 27946febff..e00b5c171c 100644
--- a/tests/rest/client/test_account.py
+++ b/tests/rest/client/test_account.py
@@ -89,6 +89,17 @@ class PasswordResetTestCase(unittest.HomeserverTestCase):
         self.store = hs.get_datastores().main
         self.submit_token_resource = PasswordResetSubmitTokenResource(hs)
 
+    def attempt_wrong_password_login(self, username: str, password: str) -> None:
+        """Attempts to login as the user with the given password, asserting
+        that the attempt *fails*.
+        """
+        body = {"type": "m.login.password", "user": username, "password": password}
+
+        channel = self.make_request(
+            "POST", "/_matrix/client/r0/login", json.dumps(body).encode("utf8")
+        )
+        self.assertEqual(channel.code, 403, channel.result)
+
     def test_basic_password_reset(self) -> None:
         """Test basic password reset flow"""
         old_password = "monkey"
diff --git a/tests/rest/client/test_account_data.py b/tests/rest/client/test_account_data.py
new file mode 100644
index 0000000000..d5b0640e7a
--- /dev/null
+++ b/tests/rest/client/test_account_data.py
@@ -0,0 +1,75 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from unittest.mock import Mock
+
+from synapse.rest import admin
+from synapse.rest.client import account_data, login, room
+
+from tests import unittest
+from tests.test_utils import make_awaitable
+
+
+class AccountDataTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        account_data.register_servlets,
+    ]
+
+    def test_on_account_data_updated_callback(self) -> None:
+        """Tests that the on_account_data_updated module callback is called correctly when
+        a user's account data changes.
+        """
+        mocked_callback = Mock(return_value=make_awaitable(None))
+        self.hs.get_account_data_handler()._on_account_data_updated_callbacks.append(
+            mocked_callback
+        )
+
+        user_id = self.register_user("user", "password")
+        tok = self.login("user", "password")
+        account_data_type = "org.matrix.foo"
+        account_data_content = {"bar": "baz"}
+
+        # Change the user's global account data.
+        channel = self.make_request(
+            "PUT",
+            f"/user/{user_id}/account_data/{account_data_type}",
+            account_data_content,
+            access_token=tok,
+        )
+
+        # Test that the callback is called with the user ID, the new account data, and
+        # None as the room ID.
+        self.assertEqual(channel.code, 200, channel.result)
+        mocked_callback.assert_called_once_with(
+            user_id, None, account_data_type, account_data_content
+        )
+
+        # Change the user's room-specific account data.
+        room_id = self.helper.create_room_as(user_id, tok=tok)
+        channel = self.make_request(
+            "PUT",
+            f"/user/{user_id}/rooms/{room_id}/account_data/{account_data_type}",
+            account_data_content,
+            access_token=tok,
+        )
+
+        # Test that the callback is called with the user ID, the room ID and the new
+        # account data.
+        self.assertEqual(channel.code, 200, channel.result)
+        self.assertEqual(mocked_callback.call_count, 2)
+        mocked_callback.assert_called_with(
+            user_id, room_id, account_data_type, account_data_content
+        )
diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py
index fe97a0b3dd..419eef166a 100644
--- a/tests/rest/client/test_relations.py
+++ b/tests/rest/client/test_relations.py
@@ -13,7 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import itertools
 import urllib.parse
 from typing import Any, Callable, Dict, List, Optional, Tuple
 from unittest.mock import patch
@@ -145,16 +144,6 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase):
         self.assertEquals(200, channel.code, channel.json_body)
         return channel.json_body["unsigned"].get("m.relations", {})
 
-    def _get_aggregations(self) -> List[JsonDict]:
-        """Request /aggregations on the parent ID and includes the returned chunk."""
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        return channel.json_body["chunk"]
-
     def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict:
         """
         Find the parent event in a chunk of events and assert that it has the proper bundled aggregations.
@@ -264,43 +253,6 @@ class RelationsTestCase(BaseRelationsTestCase):
             expected_response_code=400,
         )
 
-    def test_aggregation(self) -> None:
-        """Test that annotations get correctly aggregated."""
-
-        self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
-        self._send_relation(
-            RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token
-        )
-        self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b")
-
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-
-        self.assertEqual(
-            channel.json_body,
-            {
-                "chunk": [
-                    {"type": "m.reaction", "key": "a", "count": 2},
-                    {"type": "m.reaction", "key": "b", "count": 1},
-                ]
-            },
-        )
-
-    def test_aggregation_must_be_annotation(self) -> None:
-        """Test that aggregations must be annotations."""
-
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{self.room}/aggregations"
-            f"/{self.parent_id}/{RelationTypes.REPLACE}?limit=1",
-            access_token=self.user_token,
-        )
-        self.assertEqual(400, channel.code, channel.json_body)
-
     def test_ignore_invalid_room(self) -> None:
         """Test that we ignore invalid relations over federation."""
         # Create another room and send a message in it.
@@ -394,15 +346,6 @@ class RelationsTestCase(BaseRelationsTestCase):
         self.assertEqual(200, channel.code, channel.json_body)
         self.assertEqual(channel.json_body["chunk"], [])
 
-        # And when fetching aggregations.
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{room2}/aggregations/{parent_id}",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        self.assertEqual(channel.json_body["chunk"], [])
-
         # And for bundled aggregations.
         channel = self.make_request(
             "GET",
@@ -717,15 +660,6 @@ class RelationsTestCase(BaseRelationsTestCase):
         self.assertEqual(200, channel.code, channel.json_body)
         self.assertNotIn("m.relations", channel.json_body["unsigned"])
 
-        # But unknown relations can be directly queried.
-        channel = self.make_request(
-            "GET",
-            f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1",
-            access_token=self.user_token,
-        )
-        self.assertEqual(200, channel.code, channel.json_body)
-        self.assertEqual(channel.json_body["chunk"], [])
-
     def test_background_update(self) -> None:
         """Test the event_arbitrary_relations background update."""
         channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍")
@@ -941,131 +875,6 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
                 annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]]
             )
 
-    def test_aggregation_pagination_groups(self) -> None:
-        """Test that we can paginate annotation groups correctly."""
-
-        # We need to create ten separate users to send each reaction.
-        access_tokens = [self.user_token, self.user2_token]
-        idx = 0
-        while len(access_tokens) < 10:
-            user_id, token = self._create_user("test" + str(idx))
-            idx += 1
-
-            self.helper.join(self.room, user=user_id, tok=token)
-            access_tokens.append(token)
-
-        idx = 0
-        sent_groups = {"👍": 10, "a": 7, "b": 5, "c": 3, "d": 2, "e": 1}
-        for key in itertools.chain.from_iterable(
-            itertools.repeat(key, num) for key, num in sent_groups.items()
-        ):
-            self._send_relation(
-                RelationTypes.ANNOTATION,
-                "m.reaction",
-                key=key,
-                access_token=access_tokens[idx],
-            )
-
-            idx += 1
-            idx %= len(access_tokens)
-
-        prev_token: Optional[str] = None
-        found_groups: Dict[str, int] = {}
-        for _ in range(20):
-            from_token = ""
-            if prev_token:
-                from_token = "&from=" + prev_token
-
-            channel = self.make_request(
-                "GET",
-                f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1{from_token}",
-                access_token=self.user_token,
-            )
-            self.assertEqual(200, channel.code, channel.json_body)
-
-            self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
-
-            for groups in channel.json_body["chunk"]:
-                # We only expect reactions
-                self.assertEqual(groups["type"], "m.reaction", channel.json_body)
-
-                # We should only see each key once
-                self.assertNotIn(groups["key"], found_groups, channel.json_body)
-
-                found_groups[groups["key"]] = groups["count"]
-
-            next_batch = channel.json_body.get("next_batch")
-
-            self.assertNotEqual(prev_token, next_batch)
-            prev_token = next_batch
-
-            if not prev_token:
-                break
-
-        self.assertEqual(sent_groups, found_groups)
-
-    def test_aggregation_pagination_within_group(self) -> None:
-        """Test that we can paginate within an annotation group."""
-
-        # We need to create ten separate users to send each reaction.
-        access_tokens = [self.user_token, self.user2_token]
-        idx = 0
-        while len(access_tokens) < 10:
-            user_id, token = self._create_user("test" + str(idx))
-            idx += 1
-
-            self.helper.join(self.room, user=user_id, tok=token)
-            access_tokens.append(token)
-
-        idx = 0
-        expected_event_ids = []
-        for _ in range(10):
-            channel = self._send_relation(
-                RelationTypes.ANNOTATION,
-                "m.reaction",
-                key="👍",
-                access_token=access_tokens[idx],
-            )
-            expected_event_ids.append(channel.json_body["event_id"])
-
-            idx += 1
-
-        # Also send a different type of reaction so that we test we don't see it
-        self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a")
-
-        prev_token = ""
-        found_event_ids: List[str] = []
-        encoded_key = urllib.parse.quote_plus("👍".encode())
-        for _ in range(20):
-            from_token = ""
-            if prev_token:
-                from_token = "&from=" + prev_token
-
-            channel = self.make_request(
-                "GET",
-                f"/_matrix/client/unstable/rooms/{self.room}"
-                f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}"
-                f"/m.reaction/{encoded_key}?limit=1{from_token}",
-                access_token=self.user_token,
-            )
-            self.assertEqual(200, channel.code, channel.json_body)
-
-            self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
-
-            found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
-
-            next_batch = channel.json_body.get("next_batch")
-
-            self.assertNotEqual(prev_token, next_batch)
-            prev_token = next_batch
-
-            if not prev_token:
-                break
-
-        # We paginated backwards, so reverse
-        found_event_ids.reverse()
-        self.assertEqual(found_event_ids, expected_event_ids)
-
 
 class BundledAggregationsTestCase(BaseRelationsTestCase):
     """
@@ -1453,10 +1262,6 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
             {"chunk": [{"type": "m.reaction", "key": "a", "count": 2}]},
         )
 
-        # Both relations appear in the aggregation.
-        chunk = self._get_aggregations()
-        self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 2}])
-
         # Redact one of the reactions.
         self._redact(to_redact_event_id)
 
@@ -1469,10 +1274,6 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
             {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]},
         )
 
-        # The unredacted aggregation should still exist.
-        chunk = self._get_aggregations()
-        self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 1}])
-
     def test_redact_relation_thread(self) -> None:
         """
         Test that thread replies are properly handled after the thread reply redacted.
@@ -1578,10 +1379,6 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
         self.assertEqual(len(event_ids), 1)
         self.assertIn(RelationTypes.ANNOTATION, relations)
 
-        # The aggregation should exist.
-        chunk = self._get_aggregations()
-        self.assertEqual(chunk, [{"type": "m.reaction", "key": "👍", "count": 1}])
-
         # Redact the original event.
         self._redact(self.parent_id)
 
@@ -1594,10 +1391,6 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
             {"chunk": [{"type": "m.reaction", "key": "👍", "count": 1}]},
         )
 
-        # There's nothing to aggregate.
-        chunk = self._get_aggregations()
-        self.assertEqual(chunk, [{"count": 1, "key": "👍", "type": "m.reaction"}])
-
     @unittest.override_config({"experimental_features": {"msc3440_enabled": True}})
     def test_redact_parent_thread(self) -> None:
         """
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index 3a9617d6da..6ff79b9e2e 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -982,7 +982,7 @@ class RoomJoinRatelimitTestCase(RoomBase):
         super().prepare(reactor, clock, hs)
         # profile changes expect that the user is actually registered
         user = UserID.from_string(self.user_id)
-        self.get_success(self.register_user(user.localpart, "supersecretpassword"))
+        self.register_user(user.localpart, "supersecretpassword")
 
     @unittest.override_config(
         {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}}
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index 4351013952..773c16a54c 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -341,7 +341,6 @@ class SyncKnockTestCase(
             hs, self.room_id, self.user_id
         )
 
-    @override_config({"experimental_features": {"msc2403_enabled": True}})
     def test_knock_room_state(self) -> None:
         """Tests that /sync returns state from a room after knocking on it."""
         # Knock on a room
@@ -497,6 +496,11 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase):
         receipts.register_servlets,
     ]
 
+    def default_config(self) -> JsonDict:
+        config = super().default_config()
+        config["experimental_features"] = {"msc2654_enabled": True}
+        return config
+
     def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
         self.url = "/sync?since=%s"
         self.next_batch = "s0"
@@ -772,3 +776,65 @@ class DeviceListSyncTestCase(unittest.HomeserverTestCase):
         self.assertIn(
             self.user_id, device_list_changes, incremental_sync_channel.json_body
         )
+
+
+class ExcludeRoomTestCase(unittest.HomeserverTestCase):
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        sync.register_servlets,
+        room.register_servlets,
+    ]
+
+    def prepare(
+        self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+    ) -> None:
+        self.user_id = self.register_user("user", "password")
+        self.tok = self.login("user", "password")
+
+        self.excluded_room_id = self.helper.create_room_as(self.user_id, tok=self.tok)
+        self.included_room_id = self.helper.create_room_as(self.user_id, tok=self.tok)
+
+        # We need to manually append the room ID, because we can't know the ID before
+        # creating the room, and we can't set the config after starting the homeserver.
+        self.hs.get_sync_handler().rooms_to_exclude.append(self.excluded_room_id)
+
+    def test_join_leave(self) -> None:
+        """Tests that rooms are correctly excluded from the 'join' and 'leave' sections of
+        sync responses.
+        """
+        channel = self.make_request("GET", "/sync", access_token=self.tok)
+        self.assertEqual(channel.code, 200, channel.result)
+
+        self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["join"])
+        self.assertIn(self.included_room_id, channel.json_body["rooms"]["join"])
+
+        self.helper.leave(self.excluded_room_id, self.user_id, tok=self.tok)
+        self.helper.leave(self.included_room_id, self.user_id, tok=self.tok)
+
+        channel = self.make_request(
+            "GET",
+            "/sync?since=" + channel.json_body["next_batch"],
+            access_token=self.tok,
+        )
+        self.assertEqual(channel.code, 200, channel.result)
+
+        self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["leave"])
+        self.assertIn(self.included_room_id, channel.json_body["rooms"]["leave"])
+
+    def test_invite(self) -> None:
+        """Tests that rooms are correctly excluded from the 'invite' section of sync
+        responses.
+        """
+        invitee = self.register_user("invitee", "password")
+        invitee_tok = self.login("invitee", "password")
+
+        self.helper.invite(self.excluded_room_id, self.user_id, invitee, tok=self.tok)
+        self.helper.invite(self.included_room_id, self.user_id, invitee, tok=self.tok)
+
+        channel = self.make_request("GET", "/sync", access_token=invitee_tok)
+        self.assertEqual(channel.code, 200, channel.result)
+
+        self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["invite"])
+        self.assertIn(self.included_room_id, channel.json_body["rooms"]["invite"])
diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py
index e7de67e3a3..5eb0f243f7 100644
--- a/tests/rest/client/test_third_party_rules.py
+++ b/tests/rest/client/test_third_party_rules.py
@@ -896,3 +896,44 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
 
         # Check that the mock was called with the right room ID
         self.assertEqual(args[1], self.room_id)
+
+    def test_on_threepid_bind(self) -> None:
+        """Tests that the on_threepid_bind module callback is called correctly after
+        associating a 3PID to an account.
+        """
+        # Register a mocked callback.
+        threepid_bind_mock = Mock(return_value=make_awaitable(None))
+        third_party_rules = self.hs.get_third_party_event_rules()
+        third_party_rules._on_threepid_bind_callbacks.append(threepid_bind_mock)
+
+        # Register an admin user.
+        self.register_user("admin", "password", admin=True)
+        admin_tok = self.login("admin", "password")
+
+        # Also register a normal user we can modify.
+        user_id = self.register_user("user", "password")
+
+        # Add a 3PID to the user.
+        channel = self.make_request(
+            "PUT",
+            "/_synapse/admin/v2/users/%s" % user_id,
+            {
+                "threepids": [
+                    {
+                        "medium": "email",
+                        "address": "foo@example.com",
+                    },
+                ],
+            },
+            access_token=admin_tok,
+        )
+
+        # Check that the shutdown was blocked
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Check that the mock was called once.
+        threepid_bind_mock.assert_called_once()
+        args = threepid_bind_mock.call_args[0]
+
+        # Check that the mock was called with the right parameters
+        self.assertEqual(args, (user_id, "email", "foo@example.com"))
diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index 28663826fc..a0788b1bb0 100644
--- a/tests/rest/client/utils.py
+++ b/tests/rest/client/utils.py
@@ -88,7 +88,7 @@ class RestHelper:
     def create_room_as(
         self,
         room_creator: Optional[str] = None,
-        is_public: Optional[bool] = None,
+        is_public: Optional[bool] = True,
         room_version: Optional[str] = None,
         tok: Optional[str] = None,
         expect_code: int = HTTPStatus.OK,
@@ -101,9 +101,12 @@ class RestHelper:
         Args:
             room_creator: The user ID to create the room with.
             is_public: If True, the `visibility` parameter will be set to
-                "public". If False, it will be set to "private". If left
-                unspecified, the server will set it to an appropriate default
-                (which should be "private" as per the CS spec).
+                "public". If False, it will be set to "private".
+                If None, doesn't specify the `visibility` parameter in which
+                case the server is supposed to make the room private according to
+                the CS API.
+                Defaults to public, since that is commonly needed in tests
+                for convenience where room privacy is not a problem.
             room_version: The room version to create the room as. Defaults to Synapse's
                 default room version.
             tok: The access token to use in the request.
diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py
index 978c252f84..ac0ac06b7e 100644
--- a/tests/rest/key/v2/test_remote_key_resource.py
+++ b/tests/rest/key/v2/test_remote_key_resource.py
@@ -76,7 +76,7 @@ class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase):
                 "verify_keys": {
                     key_id: {
                         "key": signedjson.key.encode_verify_key_base64(
-                            signing_key.verify_key
+                            signedjson.key.get_verify_key(signing_key)
                         )
                     }
                 },
@@ -175,7 +175,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
                     % (
                         self.hs_signing_key.version,
                     ): signedjson.key.encode_verify_key_base64(
-                        self.hs_signing_key.verify_key
+                        signedjson.key.get_verify_key(self.hs_signing_key)
                     )
                 },
             }
@@ -229,7 +229,9 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
         assert isinstance(keyres, FetchKeyResult)
         self.assertEqual(
             signedjson.key.encode_verify_key_base64(keyres.verify_key),
-            signedjson.key.encode_verify_key_base64(testkey.verify_key),
+            signedjson.key.encode_verify_key_base64(
+                signedjson.key.get_verify_key(testkey)
+            ),
         )
 
     def test_get_notary_key(self) -> None:
@@ -251,7 +253,9 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
         assert isinstance(keyres, FetchKeyResult)
         self.assertEqual(
             signedjson.key.encode_verify_key_base64(keyres.verify_key),
-            signedjson.key.encode_verify_key_base64(testkey.verify_key),
+            signedjson.key.encode_verify_key_base64(
+                signedjson.key.get_verify_key(testkey)
+            ),
         )
 
     def test_get_notary_keyserver_key(self) -> None:
@@ -268,5 +272,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
         assert isinstance(keyres, FetchKeyResult)
         self.assertEqual(
             signedjson.key.encode_verify_key_base64(keyres.verify_key),
-            signedjson.key.encode_verify_key_base64(self.hs_signing_key.verify_key),
+            signedjson.key.encode_verify_key_base64(
+                signedjson.key.get_verify_key(self.hs_signing_key)
+            ),
         )
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
index 5148c39874..3b24d0ace6 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -17,7 +17,7 @@ import json
 import os
 import re
 from typing import Any, Dict, Optional, Sequence, Tuple, Type
-from urllib.parse import urlencode
+from urllib.parse import quote, urlencode
 
 from twisted.internet._resolver import HostResolution
 from twisted.internet.address import IPv4Address, IPv6Address
@@ -69,7 +69,6 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             "2001:800::/21",
         )
         config["url_preview_ip_range_whitelist"] = ("1.1.1.1",)
-        config["url_preview_url_blacklist"] = []
         config["url_preview_accept_language"] = [
             "en-UK",
             "en-US;q=0.9",
@@ -1123,3 +1122,43 @@ class URLPreviewTests(unittest.HomeserverTestCase):
                 os.path.exists(path),
                 f"{os.path.relpath(path, self.media_store_path)} was not deleted",
             )
+
+    @unittest.override_config({"url_preview_url_blacklist": [{"port": "*"}]})
+    def test_blacklist_port(self) -> None:
+        """Tests that blacklisting URLs with a port makes previewing such URLs
+        fail with a 403 error and doesn't impact other previews.
+        """
+        self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+        bad_url = quote("http://matrix.org:8888/foo")
+        good_url = quote("http://matrix.org/foo")
+
+        channel = self.make_request(
+            "GET",
+            "preview_url?url=" + bad_url,
+            shorthand=False,
+            await_result=False,
+        )
+        self.pump()
+        self.assertEqual(channel.code, 403, channel.result)
+
+        channel = self.make_request(
+            "GET",
+            "preview_url?url=" + good_url,
+            shorthand=False,
+            await_result=False,
+        )
+        self.pump()
+
+        client = self.reactor.tcpClients[0][2].buildProtocol(None)
+        server = AccumulatingProtocol()
+        server.makeConnection(FakeTransport(client, self.reactor))
+        client.makeConnection(FakeTransport(server, self.reactor))
+        client.dataReceived(
+            b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: text/html\r\n\r\n"
+            % (len(self.end_content),)
+            + self.end_content
+        )
+
+        self.pump()
+        self.assertEqual(channel.code, 200)
diff --git a/tests/server.py b/tests/server.py
index 6ce2a17bf4..aaa5ca3e74 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -22,7 +22,6 @@ import warnings
 from collections import deque
 from io import SEEK_END, BytesIO
 from typing import (
-    AnyStr,
     Callable,
     Dict,
     Iterable,
@@ -86,6 +85,9 @@ from tests.utils import (
 
 logger = logging.getLogger(__name__)
 
+# the type of thing that can be passed into `make_request` in the headers list
+CustomHeaderType = Tuple[Union[str, bytes], Union[str, bytes]]
+
 
 class TimedOutException(Exception):
     """
@@ -260,7 +262,7 @@ def make_request(
     federation_auth_origin: Optional[bytes] = None,
     content_is_form: bool = False,
     await_result: bool = True,
-    custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None,
+    custom_headers: Optional[Iterable[CustomHeaderType]] = None,
     client_ip: str = "127.0.0.1",
 ) -> FakeChannel:
     """
diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py
index 3ac4646969..74c6224eb6 100644
--- a/tests/storage/databases/main/test_lock.py
+++ b/tests/storage/databases/main/test_lock.py
@@ -28,7 +28,7 @@ class LockTestCase(unittest.HomeserverTestCase):
         """
         # First to acquire this lock, so it should complete
         lock = self.get_success(self.store.try_acquire_lock("name", "key"))
-        self.assertIsNotNone(lock)
+        assert lock is not None
 
         # Enter the context manager
         self.get_success(lock.__aenter__())
@@ -45,7 +45,7 @@ class LockTestCase(unittest.HomeserverTestCase):
 
         # We can now acquire the lock again.
         lock3 = self.get_success(self.store.try_acquire_lock("name", "key"))
-        self.assertIsNotNone(lock3)
+        assert lock3 is not None
         self.get_success(lock3.__aenter__())
         self.get_success(lock3.__aexit__(None, None, None))
 
@@ -53,7 +53,7 @@ class LockTestCase(unittest.HomeserverTestCase):
         """Test that we don't time out locks while they're still active"""
 
         lock = self.get_success(self.store.try_acquire_lock("name", "key"))
-        self.assertIsNotNone(lock)
+        assert lock is not None
 
         self.get_success(lock.__aenter__())
 
@@ -69,7 +69,7 @@ class LockTestCase(unittest.HomeserverTestCase):
         """Test that we time out locks if they're not updated for ages"""
 
         lock = self.get_success(self.store.try_acquire_lock("name", "key"))
-        self.assertIsNotNone(lock)
+        assert lock is not None
 
         self.get_success(lock.__aenter__())
 
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index ee599f4336..1bf93e79a7 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -31,6 +31,7 @@ from synapse.storage.databases.main.appservice import (
     ApplicationServiceStore,
     ApplicationServiceTransactionStore,
 )
+from synapse.types import DeviceListUpdates
 from synapse.util import Clock
 
 from tests import unittest
@@ -168,15 +169,6 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
             (as_id, txn_id, json.dumps([e.event_id for e in events])),
         )
 
-    def _set_last_txn(self, as_id, txn_id):
-        return self.db_pool.runOperation(
-            self.engine.convert_param_style(
-                "INSERT INTO application_services_state(as_id, last_txn, state) "
-                "VALUES(?,?,?)"
-            ),
-            (as_id, txn_id, ApplicationServiceState.UP.value),
-        )
-
     def test_get_appservice_state_none(
         self,
     ) -> None:
@@ -267,65 +259,15 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
         events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")])
         txn = self.get_success(
             defer.ensureDeferred(
-                self.store.create_appservice_txn(service, events, [], [], {}, {})
+                self.store.create_appservice_txn(
+                    service, events, [], [], {}, {}, DeviceListUpdates()
+                )
             )
         )
         self.assertEqual(txn.id, 1)
         self.assertEqual(txn.events, events)
         self.assertEqual(txn.service, service)
 
-    def test_create_appservice_txn_older_last_txn(
-        self,
-    ) -> None:
-        service = Mock(id=self.as_list[0]["id"])
-        events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")])
-        self.get_success(self._set_last_txn(service.id, 9643))  # AS is falling behind
-        self.get_success(self._insert_txn(service.id, 9644, events))
-        self.get_success(self._insert_txn(service.id, 9645, events))
-        txn = self.get_success(
-            self.store.create_appservice_txn(service, events, [], [], {}, {})
-        )
-        self.assertEqual(txn.id, 9646)
-        self.assertEqual(txn.events, events)
-        self.assertEqual(txn.service, service)
-
-    def test_create_appservice_txn_up_to_date_last_txn(
-        self,
-    ) -> None:
-        service = Mock(id=self.as_list[0]["id"])
-        events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")])
-        self.get_success(self._set_last_txn(service.id, 9643))
-        txn = self.get_success(
-            self.store.create_appservice_txn(service, events, [], [], {}, {})
-        )
-        self.assertEqual(txn.id, 9644)
-        self.assertEqual(txn.events, events)
-        self.assertEqual(txn.service, service)
-
-    def test_create_appservice_txn_up_fuzzing(
-        self,
-    ) -> None:
-        service = Mock(id=self.as_list[0]["id"])
-        events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")])
-        self.get_success(self._set_last_txn(service.id, 9643))
-
-        # dump in rows with higher IDs to make sure the queries aren't wrong.
-        self.get_success(self._set_last_txn(self.as_list[1]["id"], 119643))
-        self.get_success(self._set_last_txn(self.as_list[2]["id"], 9))
-        self.get_success(self._set_last_txn(self.as_list[3]["id"], 9643))
-        self.get_success(self._insert_txn(self.as_list[1]["id"], 119644, events))
-        self.get_success(self._insert_txn(self.as_list[1]["id"], 119645, events))
-        self.get_success(self._insert_txn(self.as_list[1]["id"], 119646, events))
-        self.get_success(self._insert_txn(self.as_list[2]["id"], 10, events))
-        self.get_success(self._insert_txn(self.as_list[3]["id"], 9643, events))
-
-        txn = self.get_success(
-            self.store.create_appservice_txn(service, events, [], [], {}, {})
-        )
-        self.assertEqual(txn.id, 9644)
-        self.assertEqual(txn.events, events)
-        self.assertEqual(txn.service, service)
-
     def test_complete_appservice_txn_first_txn(
         self,
     ) -> None:
@@ -359,13 +301,13 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
         )
         self.assertEqual(0, len(res))
 
-    def test_complete_appservice_txn_existing_in_state_table(
+    def test_complete_appservice_txn_updates_last_txn_state(
         self,
     ) -> None:
         service = Mock(id=self.as_list[0]["id"])
         events = [Mock(event_id="e1"), Mock(event_id="e2")]
         txn_id = 5
-        self.get_success(self._set_last_txn(service.id, 4))
+        self._set_state(self.as_list[0]["id"], ApplicationServiceState.UP)
         self.get_success(self._insert_txn(service.id, txn_id, events))
         self.get_success(
             self.store.complete_appservice_txn(txn_id=txn_id, service=service)
@@ -416,6 +358,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
         self.get_success(self._insert_txn(service.id, 12, other_events))
 
         txn = self.get_success(self.store.get_oldest_unsent_txn(service))
+        assert txn is not None
         self.assertEqual(service, txn.service)
         self.assertEqual(10, txn.id)
         self.assertEqual(events, txn.events)
@@ -476,12 +419,12 @@ class ApplicationServiceStoreTypeStreamIds(unittest.HomeserverTestCase):
         value = self.get_success(
             self.store.get_type_stream_id_for_appservice(self.service, "read_receipt")
         )
-        self.assertEqual(value, 0)
+        self.assertEqual(value, 1)
 
         value = self.get_success(
             self.store.get_type_stream_id_for_appservice(self.service, "presence")
         )
-        self.assertEqual(value, 0)
+        self.assertEqual(value, 1)
 
     def test_get_type_stream_id_for_appservice_invalid_type(self) -> None:
         self.get_failure(
diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py
index ce89c96912..b998ad42d9 100644
--- a/tests/storage/test_cleanup_extrems.py
+++ b/tests/storage/test_cleanup_extrems.py
@@ -68,6 +68,22 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
 
         self.wait_for_background_updates()
 
+    def add_extremity(self, room_id: str, event_id: str) -> None:
+        """
+        Add the given event as an extremity to the room.
+        """
+        self.get_success(
+            self.hs.get_datastores().main.db_pool.simple_insert(
+                table="event_forward_extremities",
+                values={"room_id": room_id, "event_id": event_id},
+                desc="test_add_extremity",
+            )
+        )
+
+        self.hs.get_datastores().main.get_latest_event_ids_in_room.invalidate(
+            (room_id,)
+        )
+
     def test_soft_failed_extremities_handled_correctly(self):
         """Test that extremities are correctly calculated in the presence of
         soft failed events.
@@ -250,7 +266,9 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
         self.user = UserID.from_string(self.register_user("user1", "password"))
         self.token1 = self.login("user1", "password")
         self.requester = create_requester(self.user)
-        info, _ = self.get_success(self.room_creator.create_room(self.requester, {}))
+        info, _ = self.get_success(
+            self.room_creator.create_room(self.requester, {"visibility": "public"})
+        )
         self.room_id = info["room_id"]
         self.event_creator = homeserver.get_event_creation_handler()
         homeserver.config.consent.user_consent_version = self.CONSENT_VERSION
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index 21ffc5a909..d1227dd4ac 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -96,7 +96,9 @@ class DeviceStoreTestCase(HomeserverTestCase):
 
         # Add two device updates with sequential `stream_id`s
         self.get_success(
-            self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"])
+            self.store.add_device_change_to_streams(
+                "user_id", device_ids, ["somehost"], ["!some:room"]
+            )
         )
 
         # Get all device updates ever meant for this remote
@@ -122,7 +124,9 @@ class DeviceStoreTestCase(HomeserverTestCase):
             "device_id5",
         ]
         self.get_success(
-            self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"])
+            self.store.add_device_change_to_streams(
+                "user_id", device_ids, ["somehost"], ["!some:room"]
+            )
         )
 
         # Get device updates meant for this remote
@@ -144,7 +148,9 @@ class DeviceStoreTestCase(HomeserverTestCase):
         # Add some more device updates to ensure it still resumes properly
         device_ids = ["device_id6", "device_id7"]
         self.get_success(
-            self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"])
+            self.store.add_device_change_to_streams(
+                "user_id", device_ids, ["somehost"], ["!some:room"]
+            )
         )
 
         # Get the next batch of device updates
@@ -220,7 +226,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
 
         self.get_success(
             self.store.add_device_change_to_streams(
-                "@user_id:test", device_ids, ["somehost"]
+                "@user_id:test", device_ids, ["somehost"], ["!some:room"]
             )
         )
 
diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py
index 395396340b..2d8d1f860f 100644
--- a/tests/storage/test_id_generators.py
+++ b/tests/storage/test_id_generators.py
@@ -157,10 +157,10 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         self.assertEqual(id_gen.get_positions(), {"master": 7})
         self.assertEqual(id_gen.get_current_token_for_writer("master"), 7)
 
-        ctx1 = self.get_success(id_gen.get_next())
-        ctx2 = self.get_success(id_gen.get_next())
-        ctx3 = self.get_success(id_gen.get_next())
-        ctx4 = self.get_success(id_gen.get_next())
+        ctx1 = id_gen.get_next()
+        ctx2 = id_gen.get_next()
+        ctx3 = id_gen.get_next()
+        ctx4 = id_gen.get_next()
 
         s1 = self.get_success(ctx1.__aenter__())
         s2 = self.get_success(ctx2.__aenter__())
@@ -362,8 +362,8 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
         self.assertEqual(id_gen.get_current_token_for_writer("master"), 7)
 
         # Persist two rows at once
-        ctx1 = self.get_success(id_gen.get_next())
-        ctx2 = self.get_success(id_gen.get_next())
+        ctx1 = id_gen.get_next()
+        ctx2 = id_gen.get_next()
 
         s1 = self.get_success(ctx1.__aenter__())
         s2 = self.get_success(ctx2.__aenter__())
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index 03e9cc7d4a..d8d17ef379 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -119,11 +119,9 @@ class RedactionTestCase(unittest.HomeserverTestCase):
         return event
 
     def test_redact(self):
-        self.get_success(
-            self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
-        )
+        self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
 
-        msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t"))
+        msg_event = self.inject_message(self.room1, self.u_alice, "t")
 
         # Check event has not been redacted:
         event = self.get_success(self.store.get_event(msg_event.event_id))
@@ -141,9 +139,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
 
         # Redact event
         reason = "Because I said so"
-        self.get_success(
-            self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason)
-        )
+        self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason)
 
         event = self.get_success(self.store.get_event(msg_event.event_id))
 
@@ -170,14 +166,10 @@ class RedactionTestCase(unittest.HomeserverTestCase):
         )
 
     def test_redact_join(self):
-        self.get_success(
-            self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
-        )
+        self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
 
-        msg_event = self.get_success(
-            self.inject_room_member(
-                self.room1, self.u_bob, Membership.JOIN, extra_content={"blue": "red"}
-            )
+        msg_event = self.inject_room_member(
+            self.room1, self.u_bob, Membership.JOIN, extra_content={"blue": "red"}
         )
 
         event = self.get_success(self.store.get_event(msg_event.event_id))
@@ -195,9 +187,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
 
         # Redact event
         reason = "Because I said so"
-        self.get_success(
-            self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason)
-        )
+        self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason)
 
         # Check redaction
 
@@ -311,11 +301,9 @@ class RedactionTestCase(unittest.HomeserverTestCase):
     def test_redact_censor(self):
         """Test that a redacted event gets censored in the DB after a month"""
 
-        self.get_success(
-            self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
-        )
+        self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
 
-        msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t"))
+        msg_event = self.inject_message(self.room1, self.u_alice, "t")
 
         # Check event has not been redacted:
         event = self.get_success(self.store.get_event(msg_event.event_id))
@@ -333,9 +321,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
 
         # Redact event
         reason = "Because I said so"
-        self.get_success(
-            self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason)
-        )
+        self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason)
 
         event = self.get_success(self.store.get_event(msg_event.event_id))
 
@@ -381,25 +367,19 @@ class RedactionTestCase(unittest.HomeserverTestCase):
     def test_redact_redaction(self):
         """Tests that we can redact a redaction and can fetch it again."""
 
-        self.get_success(
-            self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
-        )
+        self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
 
-        msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t"))
+        msg_event = self.inject_message(self.room1, self.u_alice, "t")
 
-        first_redact_event = self.get_success(
-            self.inject_redaction(
-                self.room1, msg_event.event_id, self.u_alice, "Redacting message"
-            )
+        first_redact_event = self.inject_redaction(
+            self.room1, msg_event.event_id, self.u_alice, "Redacting message"
         )
 
-        self.get_success(
-            self.inject_redaction(
-                self.room1,
-                first_redact_event.event_id,
-                self.u_alice,
-                "Redacting redaction",
-            )
+        self.inject_redaction(
+            self.room1,
+            first_redact_event.event_id,
+            self.u_alice,
+            "Redacting redaction",
         )
 
         # Now lets jump to the future where we have censored the redaction event
@@ -414,9 +394,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
     def test_store_redacted_redaction(self):
         """Tests that we can store a redacted redaction."""
 
-        self.get_success(
-            self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
-        )
+        self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
 
         builder = self.event_builder_factory.for_room_version(
             RoomVersions.V1,
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index eaa0d7d749..52e41cdab4 100644
--- a/tests/storage/test_stream.py
+++ b/tests/storage/test_stream.py
@@ -110,9 +110,7 @@ class PaginationTestCase(HomeserverTestCase):
     def _filter_messages(self, filter: JsonDict) -> List[EventBase]:
         """Make a request to /messages with a filter, returns the chunk of events."""
 
-        from_token = self.get_success(
-            self.hs.get_event_sources().get_current_token_for_pagination()
-        )
+        from_token = self.hs.get_event_sources().get_current_token_for_pagination()
 
         events, next_key = self.get_success(
             self.hs.get_datastores().main.paginate_room_events(
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index 532e3fe9cd..d0230f9ebb 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -17,6 +17,7 @@ from unittest.mock import patch
 
 from synapse.api.room_versions import RoomVersions
 from synapse.events import EventBase, make_event_from_dict
+from synapse.events.snapshot import EventContext
 from synapse.types import JsonDict, create_requester
 from synapse.visibility import filter_events_for_client, filter_events_for_server
 
@@ -47,17 +48,15 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
         #
 
         # before we do that, we persist some other events to act as state.
-        self.get_success(self._inject_visibility("@admin:hs", "joined"))
+        self._inject_visibility("@admin:hs", "joined")
         for i in range(0, 10):
-            self.get_success(self._inject_room_member("@resident%i:hs" % i))
+            self._inject_room_member("@resident%i:hs" % i)
 
         events_to_filter = []
 
         for i in range(0, 10):
             user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
-            evt = self.get_success(
-                self._inject_room_member(user, extra_content={"a": "b"})
-            )
+            evt = self._inject_room_member(user, extra_content={"a": "b"})
             events_to_filter.append(evt)
 
         filtered = self.get_success(
@@ -73,24 +72,57 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
             self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
             self.assertEqual(filtered[i].content["a"], "b")
 
+    def test_filter_outlier(self) -> None:
+        # outlier events must be returned, for the good of the collective federation
+        self._inject_room_member("@resident:remote_hs")
+        self._inject_visibility("@resident:remote_hs", "joined")
+
+        outlier = self._inject_outlier()
+        self.assertEqual(
+            self.get_success(
+                filter_events_for_server(self.storage, "remote_hs", [outlier])
+            ),
+            [outlier],
+        )
+
+        # it should also work when there are other events in the list
+        evt = self._inject_message("@unerased:local_hs")
+
+        filtered = self.get_success(
+            filter_events_for_server(self.storage, "remote_hs", [outlier, evt])
+        )
+        self.assertEqual(len(filtered), 2, f"expected 2 results, got: {filtered}")
+        self.assertEqual(filtered[0], outlier)
+        self.assertEqual(filtered[1].event_id, evt.event_id)
+        self.assertEqual(filtered[1].content, evt.content)
+
+        # ... but other servers should only be able to see the outlier (the other should
+        # be redacted)
+        filtered = self.get_success(
+            filter_events_for_server(self.storage, "other_server", [outlier, evt])
+        )
+        self.assertEqual(filtered[0], outlier)
+        self.assertEqual(filtered[1].event_id, evt.event_id)
+        self.assertNotIn("body", filtered[1].content)
+
     def test_erased_user(self) -> None:
         # 4 message events, from erased and unerased users, with a membership
         # change in the middle of them.
         events_to_filter = []
 
-        evt = self.get_success(self._inject_message("@unerased:local_hs"))
+        evt = self._inject_message("@unerased:local_hs")
         events_to_filter.append(evt)
 
-        evt = self.get_success(self._inject_message("@erased:local_hs"))
+        evt = self._inject_message("@erased:local_hs")
         events_to_filter.append(evt)
 
-        evt = self.get_success(self._inject_room_member("@joiner:remote_hs"))
+        evt = self._inject_room_member("@joiner:remote_hs")
         events_to_filter.append(evt)
 
-        evt = self.get_success(self._inject_message("@unerased:local_hs"))
+        evt = self._inject_message("@unerased:local_hs")
         events_to_filter.append(evt)
 
-        evt = self.get_success(self._inject_message("@erased:local_hs"))
+        evt = self._inject_message("@erased:local_hs")
         events_to_filter.append(evt)
 
         # the erasey user gets erased
@@ -187,6 +219,25 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
         self.get_success(self.storage.persistence.persist_event(event, context))
         return event
 
+    def _inject_outlier(self) -> EventBase:
+        builder = self.event_builder_factory.for_room_version(
+            RoomVersions.V1,
+            {
+                "type": "m.room.member",
+                "sender": "@test:user",
+                "state_key": "@test:user",
+                "room_id": TEST_ROOM_ID,
+                "content": {"membership": "join"},
+            },
+        )
+
+        event = self.get_success(builder.build(prev_event_ids=[], auth_event_ids=[]))
+        event.internal_metadata.outlier = True
+        self.get_success(
+            self.storage.persistence.persist_event(event, EventContext.for_outlier())
+        )
+        return event
+
 
 class FilterEventsForClientTestCase(unittest.FederatingHomeserverTestCase):
     def test_out_of_band_invite_rejection(self):
diff --git a/tests/unittest.py b/tests/unittest.py
index 326895f4c9..9afa68c164 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -16,17 +16,17 @@
 import gc
 import hashlib
 import hmac
-import inspect
 import json
 import logging
 import secrets
 import time
 from typing import (
     Any,
-    AnyStr,
+    Awaitable,
     Callable,
     ClassVar,
     Dict,
+    Generic,
     Iterable,
     List,
     Optional,
@@ -40,6 +40,7 @@ from unittest.mock import Mock, patch
 import canonicaljson
 import signedjson.key
 import unpaddedbase64
+from typing_extensions import Protocol
 
 from twisted.internet.defer import Deferred, ensureDeferred
 from twisted.python.failure import Failure
@@ -50,7 +51,7 @@ from twisted.web.resource import Resource
 from twisted.web.server import Request
 
 from synapse import events
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventTypes
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.server import DEFAULT_ROOM_VERSION
@@ -71,7 +72,13 @@ from synapse.types import JsonDict, UserID, create_requester
 from synapse.util import Clock
 from synapse.util.httpresourcetree import create_resource_tree
 
-from tests.server import FakeChannel, get_clock, make_request, setup_test_homeserver
+from tests.server import (
+    CustomHeaderType,
+    FakeChannel,
+    get_clock,
+    make_request,
+    setup_test_homeserver,
+)
 from tests.test_utils import event_injection, setup_awaitable_errors
 from tests.test_utils.logging_setup import setup_logging
 from tests.utils import default_config, setupdb
@@ -79,6 +86,17 @@ from tests.utils import default_config, setupdb
 setupdb()
 setup_logging()
 
+TV = TypeVar("TV")
+_ExcType = TypeVar("_ExcType", bound=BaseException, covariant=True)
+
+
+class _TypedFailure(Generic[_ExcType], Protocol):
+    """Extension to twisted.Failure, where the 'value' has a certain type."""
+
+    @property
+    def value(self) -> _ExcType:
+        ...
+
 
 def around(target):
     """A CLOS-style 'around' modifier, which wraps the original method of the
@@ -277,6 +295,7 @@ class HomeserverTestCase(TestCase):
 
         if hasattr(self, "user_id"):
             if self.hijack_auth:
+                assert self.helper.auth_user_id is not None
 
                 # We need a valid token ID to satisfy foreign key constraints.
                 token_id = self.get_success(
@@ -289,6 +308,7 @@ class HomeserverTestCase(TestCase):
                 )
 
                 async def get_user_by_access_token(token=None, allow_guest=False):
+                    assert self.helper.auth_user_id is not None
                     return {
                         "user": UserID.from_string(self.helper.auth_user_id),
                         "token_id": token_id,
@@ -296,6 +316,7 @@ class HomeserverTestCase(TestCase):
                     }
 
                 async def get_user_by_req(request, allow_guest=False, rights="access"):
+                    assert self.helper.auth_user_id is not None
                     return create_requester(
                         UserID.from_string(self.helper.auth_user_id),
                         token_id,
@@ -312,7 +333,7 @@ class HomeserverTestCase(TestCase):
                 )
 
         if self.needs_threadpool:
-            self.reactor.threadpool = ThreadPool()
+            self.reactor.threadpool = ThreadPool()  # type: ignore[assignment]
             self.addCleanup(self.reactor.threadpool.stop)
             self.reactor.threadpool.start()
 
@@ -427,7 +448,7 @@ class HomeserverTestCase(TestCase):
         federation_auth_origin: Optional[bytes] = None,
         content_is_form: bool = False,
         await_result: bool = True,
-        custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None,
+        custom_headers: Optional[Iterable[CustomHeaderType]] = None,
         client_ip: str = "127.0.0.1",
     ) -> FakeChannel:
         """
@@ -512,40 +533,36 @@ class HomeserverTestCase(TestCase):
 
         return hs
 
-    def pump(self, by=0.0):
+    def pump(self, by: float = 0.0) -> None:
         """
         Pump the reactor enough that Deferreds will fire.
         """
         self.reactor.pump([by] * 100)
 
-    def get_success(self, d, by=0.0):
-        if inspect.isawaitable(d):
-            d = ensureDeferred(d)
-        if not isinstance(d, Deferred):
-            return d
+    def get_success(
+        self,
+        d: Awaitable[TV],
+        by: float = 0.0,
+    ) -> TV:
+        deferred: Deferred[TV] = ensureDeferred(d)  # type: ignore[arg-type]
         self.pump(by=by)
-        return self.successResultOf(d)
+        return self.successResultOf(deferred)
 
-    def get_failure(self, d, exc):
+    def get_failure(
+        self, d: Awaitable[Any], exc: Type[_ExcType]
+    ) -> _TypedFailure[_ExcType]:
         """
         Run a Deferred and get a Failure from it. The failure must be of the type `exc`.
         """
-        if inspect.isawaitable(d):
-            d = ensureDeferred(d)
-        if not isinstance(d, Deferred):
-            return d
+        deferred: Deferred[Any] = ensureDeferred(d)  # type: ignore[arg-type]
         self.pump()
-        return self.failureResultOf(d, exc)
+        return self.failureResultOf(deferred, exc)
 
-    def get_success_or_raise(self, d, by=0.0):
+    def get_success_or_raise(self, d: Awaitable[TV], by: float = 0.0) -> TV:
         """Drive deferred to completion and return result or raise exception
         on failure.
         """
-
-        if inspect.isawaitable(d):
-            deferred = ensureDeferred(d)
-        if not isinstance(deferred, Deferred):
-            return d
+        deferred: Deferred[TV] = ensureDeferred(d)  # type: ignore[arg-type]
 
         results: list = []
         deferred.addBoth(results.append)
@@ -653,11 +670,11 @@ class HomeserverTestCase(TestCase):
 
     def login(
         self,
-        username,
-        password,
-        device_id=None,
-        custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None,
-    ):
+        username: str,
+        password: str,
+        device_id: Optional[str] = None,
+        custom_headers: Optional[Iterable[CustomHeaderType]] = None,
+    ) -> str:
         """
         Log in a user, and get an access token. Requires the Login API be
         registered.
@@ -679,18 +696,22 @@ class HomeserverTestCase(TestCase):
         return access_token
 
     def create_and_send_event(
-        self, room_id, user, soft_failed=False, prev_event_ids=None
-    ):
+        self,
+        room_id: str,
+        user: UserID,
+        soft_failed: bool = False,
+        prev_event_ids: Optional[List[str]] = None,
+    ) -> str:
         """
         Create and send an event.
 
         Args:
-            soft_failed (bool): Whether to create a soft failed event or not
-            prev_event_ids (list[str]|None): Explicitly set the prev events,
+            soft_failed: Whether to create a soft failed event or not
+            prev_event_ids: Explicitly set the prev events,
                 or if None just use the default
 
         Returns:
-            str: The new event's ID.
+            The new event's ID.
         """
         event_creator = self.hs.get_event_creation_handler()
         requester = create_requester(user)
@@ -717,34 +738,7 @@ class HomeserverTestCase(TestCase):
 
         return event.event_id
 
-    def add_extremity(self, room_id, event_id):
-        """
-        Add the given event as an extremity to the room.
-        """
-        self.get_success(
-            self.hs.get_datastores().main.db_pool.simple_insert(
-                table="event_forward_extremities",
-                values={"room_id": room_id, "event_id": event_id},
-                desc="test_add_extremity",
-            )
-        )
-
-        self.hs.get_datastores().main.get_latest_event_ids_in_room.invalidate(
-            (room_id,)
-        )
-
-    def attempt_wrong_password_login(self, username, password):
-        """Attempts to login as the user with the given password, asserting
-        that the attempt *fails*.
-        """
-        body = {"type": "m.login.password", "user": username, "password": password}
-
-        channel = self.make_request(
-            "POST", "/_matrix/client/r0/login", json.dumps(body).encode("utf8")
-        )
-        self.assertEqual(channel.code, 403, channel.result)
-
-    def inject_room_member(self, room: str, user: str, membership: Membership) -> None:
+    def inject_room_member(self, room: str, user: str, membership: str) -> None:
         """
         Inject a membership event into a room.
 
@@ -804,7 +798,7 @@ class FederatingHomeserverTestCase(HomeserverTestCase):
         path: str,
         content: Optional[JsonDict] = None,
         await_result: bool = True,
-        custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None,
+        custom_headers: Optional[Iterable[CustomHeaderType]] = None,
         client_ip: str = "127.0.0.1",
     ) -> FakeChannel:
         """Make an inbound signed federation request to this server
@@ -837,7 +831,7 @@ class FederatingHomeserverTestCase(HomeserverTestCase):
             self.site,
             method=method,
             path=path,
-            content=content,
+            content=content or "",
             shorthand=False,
             await_result=await_result,
             custom_headers=custom_headers,
@@ -916,9 +910,6 @@ def override_config(extra_config):
     return decorator
 
 
-TV = TypeVar("TV")
-
-
 def skip_unless(condition: bool, reason: str) -> Callable[[TV], TV]:
     """A test decorator which will skip the decorated test unless a condition is set
 
diff --git a/tox.ini b/tox.ini
index 3ffd2c3e97..69476b5869 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = packaging, py37, py38, py39, py310, check_codestyle, check_isort
+envlist = py37, py38, py39, py310, check_codestyle, check_isort
 
 # we require tox>=2.3.2 for the fix to https://github.com/tox-dev/tox/issues/208
 minversion = 2.3.2
@@ -138,14 +138,6 @@ setenv =
 commands =
     python -m synmark {posargs:}
 
-[testenv:packaging]
-skip_install = true
-usedevelop = false
-deps =
-    check-manifest
-commands =
-    check-manifest
-
 [testenv:check_codestyle]
 extras = lint
 commands =