summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.git-blame-ignore-revs12
-rw-r--r--Cargo.lock12
-rw-r--r--changelog.d/15503.feature1
-rw-r--r--changelog.d/16051.misc1
-rw-r--r--changelog.d/16532.misc1
-rw-r--r--changelog.d/16564.misc1
-rw-r--r--changelog.d/16583.misc1
-rw-r--r--changelog.d/16584.misc1
-rw-r--r--changelog.d/16585.misc1
-rw-r--r--changelog.d/16586.misc1
-rw-r--r--changelog.d/16588.misc1
-rw-r--r--changelog.d/16589.misc1
-rw-r--r--changelog.d/16590.misc1
-rw-r--r--changelog.d/16596.misc1
-rw-r--r--changelog.d/16605.misc1
-rw-r--r--changelog.d/16609.bugfix1
-rw-r--r--changelog.d/16611.misc1
-rw-r--r--changelog.d/16612.misc1
-rw-r--r--changelog.d/16613.feature1
-rw-r--r--changelog.d/16615.misc1
-rw-r--r--changelog.d/16616.feature1
-rw-r--r--changelog.d/16617.bugfix1
-rw-r--r--changelog.d/16618.misc1
-rw-r--r--changelog.d/16628.doc1
-rw-r--r--changelog.d/16631.doc1
-rw-r--r--changelog.d/16634.misc1
-rw-r--r--changelog.d/16637.misc1
-rw-r--r--changelog.d/16638.misc1
-rw-r--r--changelog.d/16639.bugfix1
-rw-r--r--changelog.d/16640.misc1
-rw-r--r--changelog.d/16643.misc1
-rw-r--r--changelog.d/16649.misc1
-rw-r--r--changelog.d/16654.doc1
-rw-r--r--changelog.d/16656.misc1
-rw-r--r--debian/changelog2
-rw-r--r--docs/admin_api/rooms.md3
-rw-r--r--docs/admin_api/user_admin_api.md56
-rw-r--r--docs/changelogs/CHANGES-pre-1.0.md1614
-rw-r--r--docs/postgres.md2
-rw-r--r--docs/usage/configuration/config_documentation.md43
-rw-r--r--mypy.ini4
-rw-r--r--poetry.lock135
-rw-r--r--pyproject.toml13
-rwxr-xr-xsynapse/_scripts/synapse_port_db.py3
-rw-r--r--synapse/api/errors.py2
-rw-r--r--synapse/app/generic_worker.py4
-rw-r--r--synapse/config/ratelimiting.py7
-rw-r--r--synapse/config/repository.py6
-rw-r--r--synapse/federation/federation_server.py2
-rw-r--r--synapse/federation/sender/__init__.py4
-rw-r--r--synapse/handlers/admin.py2
-rw-r--r--synapse/handlers/device.py10
-rw-r--r--synapse/handlers/e2e_keys.py20
-rw-r--r--synapse/handlers/federation_event.py20
-rw-r--r--synapse/handlers/presence.py2
-rw-r--r--synapse/handlers/profile.py15
-rw-r--r--synapse/handlers/room.py6
-rw-r--r--synapse/handlers/room_list.py43
-rw-r--r--synapse/handlers/room_member.py3
-rw-r--r--synapse/handlers/room_summary.py26
-rw-r--r--synapse/handlers/sso.py2
-rw-r--r--synapse/handlers/sync.py4
-rw-r--r--synapse/handlers/user_directory.py8
-rw-r--r--synapse/http/matrixfederationclient.py14
-rw-r--r--synapse/logging/opentracing.py7
-rw-r--r--synapse/media/_base.py6
-rw-r--r--synapse/media/media_repository.py284
-rw-r--r--synapse/media/url_previewer.py11
-rw-r--r--synapse/metrics/_reactor_metrics.py130
-rw-r--r--synapse/module_api/__init__.py3
-rw-r--r--synapse/module_api/callbacks/third_party_event_rules_callbacks.py3
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py56
-rw-r--r--synapse/replication/tcp/handler.py72
-rw-r--r--synapse/replication/tcp/redis.py2
-rw-r--r--synapse/replication/tcp/resource.py17
-rw-r--r--synapse/replication/tcp/streams/_base.py18
-rw-r--r--synapse/rest/admin/__init__.py2
-rw-r--r--synapse/rest/admin/media.py6
-rw-r--r--synapse/rest/admin/registration_tokens.py13
-rw-r--r--synapse/rest/admin/rooms.py19
-rw-r--r--synapse/rest/admin/users.py50
-rw-r--r--synapse/rest/client/account.py19
-rw-r--r--synapse/rest/client/directory.py2
-rw-r--r--synapse/rest/client/keys.py16
-rw-r--r--synapse/rest/media/create_resource.py83
-rw-r--r--synapse/rest/media/download_resource.py22
-rw-r--r--synapse/rest/media/media_repository_resource.py8
-rw-r--r--synapse/rest/media/thumbnail_resource.py81
-rw-r--r--synapse/rest/media/upload_resource.py75
-rw-r--r--synapse/storage/background_updates.py32
-rw-r--r--synapse/storage/controllers/persist_events.py252
-rw-r--r--synapse/storage/database.py77
-rw-r--r--synapse/storage/databases/__init__.py2
-rw-r--r--synapse/storage/databases/main/__init__.py52
-rw-r--r--synapse/storage/databases/main/account_data.py24
-rw-r--r--synapse/storage/databases/main/cache.py75
-rw-r--r--synapse/storage/databases/main/deviceinbox.py106
-rw-r--r--synapse/storage/databases/main/devices.py100
-rw-r--r--synapse/storage/databases/main/e2e_room_keys.py31
-rw-r--r--synapse/storage/databases/main/end_to_end_keys.py114
-rw-r--r--synapse/storage/databases/main/event_federation.py24
-rw-r--r--synapse/storage/databases/main/events.py415
-rw-r--r--synapse/storage/databases/main/events_bg_updates.py19
-rw-r--r--synapse/storage/databases/main/events_worker.py8
-rw-r--r--synapse/storage/databases/main/keys.py17
-rw-r--r--synapse/storage/databases/main/media_repository.py249
-rw-r--r--synapse/storage/databases/main/monthly_active_users.py2
-rw-r--r--synapse/storage/databases/main/presence.py9
-rw-r--r--synapse/storage/databases/main/profile.py28
-rw-r--r--synapse/storage/databases/main/purge_events.py33
-rw-r--r--synapse/storage/databases/main/push_rule.py94
-rw-r--r--synapse/storage/databases/main/receipts.py4
-rw-r--r--synapse/storage/databases/main/registration.py149
-rw-r--r--synapse/storage/databases/main/room.py129
-rw-r--r--synapse/storage/databases/main/roommember.py19
-rw-r--r--synapse/storage/databases/main/search.py12
-rw-r--r--synapse/storage/databases/main/stream.py30
-rw-r--r--synapse/storage/databases/main/task_scheduler.py48
-rw-r--r--synapse/storage/databases/main/transactions.py20
-rw-r--r--synapse/storage/databases/main/ui_auth.py31
-rw-r--r--synapse/storage/databases/main/user_directory.py27
-rw-r--r--synapse/storage/databases/state/bg_updates.py4
-rw-r--r--synapse/storage/engines/postgres.py3
-rw-r--r--synapse/storage/schema/__init__.py3
-rw-r--r--synapse/storage/schema/main/delta/54/delete_forward_extremities.sql2
-rw-r--r--synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql3
-rw-r--r--synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql3
-rw-r--r--synapse/storage/schema/main/delta/83/05_cross_signing_key_update_grant.sql15
-rw-r--r--synapse/storage/util/id_generators.py56
-rw-r--r--synapse/util/__init__.py2
-rw-r--r--synapse/util/async_helpers.py14
-rw-r--r--synapse/util/check_dependencies.py3
-rw-r--r--synapse/util/iterutils.py51
-rw-r--r--synapse/util/task_scheduler.py4
-rw-r--r--sytest-blacklist2
-rw-r--r--tests/federation/test_federation_sender.py2
-rw-r--r--tests/handlers/test_e2e_keys.py47
-rw-r--r--tests/handlers/test_federation.py6
-rw-r--r--tests/handlers/test_register.py22
-rw-r--r--tests/handlers/test_stats.py4
-rw-r--r--tests/handlers/test_user_directory.py4
-rw-r--r--tests/http/__init__.py2
-rw-r--r--tests/http/test_matrixfederationclient.py3
-rw-r--r--tests/media/test_media_storage.py6
-rw-r--r--tests/push/test_bulk_push_rule_evaluator.py2
-rw-r--r--tests/replication/tcp/streams/test_to_device.py2
-rw-r--r--tests/replication/tcp/streams/test_typing.py8
-rw-r--r--tests/rest/admin/test_media.py16
-rw-r--r--tests/rest/admin/test_user.py60
-rw-r--r--tests/rest/client/test_account.py8
-rw-r--r--tests/rest/client/test_events.py2
-rw-r--r--tests/rest/client/test_keys.py188
-rw-r--r--tests/rest/client/test_profile.py6
-rw-r--r--tests/rest/client/test_register.py12
-rw-r--r--tests/rest/client/test_rooms.py3
-rw-r--r--tests/rest/client/test_sync.py2
-rw-r--r--tests/rest/media/test_media_retention.py20
-rw-r--r--tests/server.py23
-rw-r--r--tests/storage/databases/main/test_cache.py117
-rw-r--r--tests/storage/databases/main/test_end_to_end_keys.py121
-rw-r--r--tests/storage/databases/main/test_lock.py18
-rw-r--r--tests/storage/test_base.py635
-rw-r--r--tests/storage/test_database.py3
-rw-r--r--tests/storage/test_event_federation.py2
-rw-r--r--tests/storage/test_main.py4
-rw-r--r--tests/storage/test_room.py24
-rw-r--r--tests/storage/test_room_search.py2
-rw-r--r--tests/util/test_check_dependencies.py10
-rw-r--r--tests/util/test_itertools.py76
-rw-r--r--tests/utils.py2
170 files changed, 4796 insertions, 2199 deletions
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index 839b895c82..4c7b0335e6 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -8,21 +8,21 @@
 # If ignoring a pull request that was not squash merged, only the merge
 # commit needs to be put here. Child commits will be resolved from it.
 
-# Run black (#3679).
+# Run black (https://github.com/matrix-org/synapse/pull/3679).
 8b3d9b6b199abb87246f982d5db356f1966db925
 
-# Black reformatting (#5482).
+# Black reformatting (https://github.com/matrix-org/synapse/pull/5482).
 32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
 
-# Target Python 3.5 with black (#8664).
+# Target Python 3.5 with black (https://github.com/matrix-org/synapse/pull/8664).
 aff1eb7c671b0a3813407321d2702ec46c71fa56
 
-# Update black to 20.8b1 (#9381).
+# Update black to 20.8b1 (https://github.com/matrix-org/synapse/pull/9381).
 0a00b7ff14890987f09112a2ae696c61001e6cf1
 
-# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
+# Convert tests/rest/admin/test_room.py to unix file endings (https://github.com/matrix-org/synapse/pull/7953).
 c4268e3da64f1abb5b31deaeb5769adb6510c0a7
 
-# Update black to 23.1.0 (#15103)
+# Update black to 23.1.0 (https://github.com/matrix-org/synapse/pull/15103)
 9bb2eac71962970d02842bca441f4bcdbbf93a11
 
diff --git a/Cargo.lock b/Cargo.lock
index 3f7e66909b..1b3dade625 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -332,18 +332,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
 
 [[package]]
 name = "serde"
-version = "1.0.190"
+version = "1.0.192"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "91d3c334ca1ee894a2c6f6ad698fe8c435b76d504b13d436f0685d648d6d96f7"
+checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001"
 dependencies = [
  "serde_derive",
 ]
 
 [[package]]
 name = "serde_derive"
-version = "1.0.190"
+version = "1.0.192"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3"
+checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -352,9 +352,9 @@ dependencies = [
 
 [[package]]
 name = "serde_json"
-version = "1.0.107"
+version = "1.0.108"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65"
+checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b"
 dependencies = [
  "itoa",
  "ryu",
diff --git a/changelog.d/15503.feature b/changelog.d/15503.feature
new file mode 100644
index 0000000000..b6ca97a2cf
--- /dev/null
+++ b/changelog.d/15503.feature
@@ -0,0 +1 @@
+Add support for asynchronous uploads as defined by [MSC2246](https://github.com/matrix-org/matrix-spec-proposals/pull/2246). Contributed by @sumnerevans at @beeper.
diff --git a/changelog.d/16051.misc b/changelog.d/16051.misc
new file mode 100644
index 0000000000..1420d2eb3f
--- /dev/null
+++ b/changelog.d/16051.misc
@@ -0,0 +1 @@
+Remove whole table locks on push rule modifications. Contributed by Nick @ Beeper (@fizzadar).
diff --git a/changelog.d/16532.misc b/changelog.d/16532.misc
new file mode 100644
index 0000000000..437e00210b
--- /dev/null
+++ b/changelog.d/16532.misc
@@ -0,0 +1 @@
+Support reactor tick timings on more types of event loops.
diff --git a/changelog.d/16564.misc b/changelog.d/16564.misc
new file mode 100644
index 0000000000..93ceaeafc9
--- /dev/null
+++ b/changelog.d/16564.misc
@@ -0,0 +1 @@
+Improve type hints.
diff --git a/changelog.d/16583.misc b/changelog.d/16583.misc
new file mode 100644
index 0000000000..df5b27b112
--- /dev/null
+++ b/changelog.d/16583.misc
@@ -0,0 +1 @@
+Avoid executing no-op queries.
diff --git a/changelog.d/16584.misc b/changelog.d/16584.misc
new file mode 100644
index 0000000000..beec8f2301
--- /dev/null
+++ b/changelog.d/16584.misc
@@ -0,0 +1 @@
+Simplify persistance code to be per-room.
diff --git a/changelog.d/16585.misc b/changelog.d/16585.misc
new file mode 100644
index 0000000000..01f3ecc843
--- /dev/null
+++ b/changelog.d/16585.misc
@@ -0,0 +1 @@
+Use standard SQL helpers in persistence code.
\ No newline at end of file
diff --git a/changelog.d/16586.misc b/changelog.d/16586.misc
new file mode 100644
index 0000000000..f02c4a2060
--- /dev/null
+++ b/changelog.d/16586.misc
@@ -0,0 +1 @@
+Avoid updating the stream cache unnecessarily.
diff --git a/changelog.d/16588.misc b/changelog.d/16588.misc
new file mode 100644
index 0000000000..c12b6cfc28
--- /dev/null
+++ b/changelog.d/16588.misc
@@ -0,0 +1 @@
+Bump twisted from 23.8.0 to 23.10.0.
diff --git a/changelog.d/16589.misc b/changelog.d/16589.misc
new file mode 100644
index 0000000000..6e69368bbf
--- /dev/null
+++ b/changelog.d/16589.misc
@@ -0,0 +1 @@
+Improve performance when using opentracing.
diff --git a/changelog.d/16590.misc b/changelog.d/16590.misc
new file mode 100644
index 0000000000..6db04b0c98
--- /dev/null
+++ b/changelog.d/16590.misc
@@ -0,0 +1 @@
+Run push rule evaluator setup in parallel.
diff --git a/changelog.d/16596.misc b/changelog.d/16596.misc
new file mode 100644
index 0000000000..fa457b12e5
--- /dev/null
+++ b/changelog.d/16596.misc
@@ -0,0 +1 @@
+Improve tests of the SQL generator.
diff --git a/changelog.d/16605.misc b/changelog.d/16605.misc
new file mode 100644
index 0000000000..2db7da5692
--- /dev/null
+++ b/changelog.d/16605.misc
@@ -0,0 +1 @@
+Bump setuptools-rust from 1.8.0 to 1.8.1.
diff --git a/changelog.d/16609.bugfix b/changelog.d/16609.bugfix
new file mode 100644
index 0000000000..a52d395cd3
--- /dev/null
+++ b/changelog.d/16609.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where some queries updated the same row twice. Introduced in Synapse 1.57.0.
diff --git a/changelog.d/16611.misc b/changelog.d/16611.misc
new file mode 100644
index 0000000000..93ceaeafc9
--- /dev/null
+++ b/changelog.d/16611.misc
@@ -0,0 +1 @@
+Improve type hints.
diff --git a/changelog.d/16612.misc b/changelog.d/16612.misc
new file mode 100644
index 0000000000..93ceaeafc9
--- /dev/null
+++ b/changelog.d/16612.misc
@@ -0,0 +1 @@
+Improve type hints.
diff --git a/changelog.d/16613.feature b/changelog.d/16613.feature
new file mode 100644
index 0000000000..419c56fb83
--- /dev/null
+++ b/changelog.d/16613.feature
@@ -0,0 +1 @@
+Improve the performance of some operations in multi-worker deployments.
diff --git a/changelog.d/16615.misc b/changelog.d/16615.misc
new file mode 100644
index 0000000000..37ab711dc6
--- /dev/null
+++ b/changelog.d/16615.misc
@@ -0,0 +1 @@
+Use more generic database methods.
diff --git a/changelog.d/16616.feature b/changelog.d/16616.feature
new file mode 100644
index 0000000000..419c56fb83
--- /dev/null
+++ b/changelog.d/16616.feature
@@ -0,0 +1 @@
+Improve the performance of some operations in multi-worker deployments.
diff --git a/changelog.d/16617.bugfix b/changelog.d/16617.bugfix
new file mode 100644
index 0000000000..7e66799a6c
--- /dev/null
+++ b/changelog.d/16617.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where Synapse would not unbind third-party identifiers for Application Service users when deactivated and would not emit a compliant response.
\ No newline at end of file
diff --git a/changelog.d/16618.misc b/changelog.d/16618.misc
new file mode 100644
index 0000000000..c026e6b995
--- /dev/null
+++ b/changelog.d/16618.misc
@@ -0,0 +1 @@
+Use `dbname` instead of the deprecated `database` connection parameter for psycopg2.
diff --git a/changelog.d/16628.doc b/changelog.d/16628.doc
new file mode 100644
index 0000000000..4dd1e4874e
--- /dev/null
+++ b/changelog.d/16628.doc
@@ -0,0 +1 @@
+Note that the option [`outbound_federation_restricted_to`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#outbound_federation_restricted_to) was added in Synapse 1.89.0, and fix a nearby formatting error.
diff --git a/changelog.d/16631.doc b/changelog.d/16631.doc
new file mode 100644
index 0000000000..1128a080ea
--- /dev/null
+++ b/changelog.d/16631.doc
@@ -0,0 +1 @@
+Update parameter information for the `/timestamp_to_event` admin API.
diff --git a/changelog.d/16634.misc b/changelog.d/16634.misc
new file mode 100644
index 0000000000..f81cf39691
--- /dev/null
+++ b/changelog.d/16634.misc
@@ -0,0 +1 @@
+Add an internal [Admin API endpoint](https://matrix-org.github.io/synapse/v1.97/usage/configuration/config_documentation.html#allow-replacing-master-cross-signing-key-without-user-interactive-auth) to temporarily grant the ability to update an existing cross-signing key without UIA.
diff --git a/changelog.d/16637.misc b/changelog.d/16637.misc
new file mode 100644
index 0000000000..f5068ac291
--- /dev/null
+++ b/changelog.d/16637.misc
@@ -0,0 +1 @@
+Improve references to GitHub issues.
diff --git a/changelog.d/16638.misc b/changelog.d/16638.misc
new file mode 100644
index 0000000000..f5068ac291
--- /dev/null
+++ b/changelog.d/16638.misc
@@ -0,0 +1 @@
+Improve references to GitHub issues.
diff --git a/changelog.d/16639.bugfix b/changelog.d/16639.bugfix
new file mode 100644
index 0000000000..3feff89af6
--- /dev/null
+++ b/changelog.d/16639.bugfix
@@ -0,0 +1 @@
+Fix sending out of order `POSITION` over replication, causing additional database load.
diff --git a/changelog.d/16640.misc b/changelog.d/16640.misc
new file mode 100644
index 0000000000..3b1cc2185d
--- /dev/null
+++ b/changelog.d/16640.misc
@@ -0,0 +1 @@
+More efficiently handle no-op `POSITION` over replication.
diff --git a/changelog.d/16643.misc b/changelog.d/16643.misc
new file mode 100644
index 0000000000..cc0cf0901f
--- /dev/null
+++ b/changelog.d/16643.misc
@@ -0,0 +1 @@
+Speed up deleting of device messages when deleting a device.
diff --git a/changelog.d/16649.misc b/changelog.d/16649.misc
new file mode 100644
index 0000000000..cebd6aaee5
--- /dev/null
+++ b/changelog.d/16649.misc
@@ -0,0 +1 @@
+Speed up persisting large number of outliers.
diff --git a/changelog.d/16654.doc b/changelog.d/16654.doc
new file mode 100644
index 0000000000..61019e0367
--- /dev/null
+++ b/changelog.d/16654.doc
@@ -0,0 +1 @@
+Provide an example for a common encrypted media response from the admin user media API and mention possible null values.
diff --git a/changelog.d/16656.misc b/changelog.d/16656.misc
new file mode 100644
index 0000000000..6763685b9d
--- /dev/null
+++ b/changelog.d/16656.misc
@@ -0,0 +1 @@
+Reduce max concurrency of background tasks, reducing potential max DB load.
diff --git a/debian/changelog b/debian/changelog
index 25d9f15cdb..1d9a135d98 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1649,7 +1649,7 @@ matrix-synapse-py3 (0.99.3.1) stable; urgency=medium
 matrix-synapse-py3 (0.99.3) stable; urgency=medium
 
   [ Richard van der Hoff ]
-  * Fix warning during preconfiguration. (Fixes: #4819)
+  * Fix warning during preconfiguration. (Fixes: https://github.com/matrix-org/synapse/issues/4819)
 
   [ Synapse Packaging team ]
   * New synapse release 0.99.3.
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
index 90b06045a8..ad011e5c36 100644
--- a/docs/admin_api/rooms.md
+++ b/docs/admin_api/rooms.md
@@ -536,7 +536,8 @@ The following query parameters are available:
 
 **Response**
 
-* `event_id` - converted from timestamp
+* `event_id` - The event ID closest to the given timestamp.
+* `origin_server_ts` - The timestamp of the event in milliseconds since the Unix epoch.
 
 # Block Room API
 The Block Room admin API allows server admins to block and unblock rooms,
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index b91848dd27..e8e492d095 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -618,6 +618,16 @@ A response body like the following is returned:
       "quarantined_by": null,
       "safe_from_quarantine": false,
       "upload_name": "test2.png"
+    },
+    {
+      "created_ts": 300400,
+      "last_access_ts": 300700,
+      "media_id": "BzYNLRUgGHphBkdKGbzXwbjX",
+      "media_length": 1337,
+      "media_type": "application/octet-stream",
+      "quarantined_by": null,
+      "safe_from_quarantine": false,
+      "upload_name": null
     }
   ],
   "next_token": 3,
@@ -679,16 +689,17 @@ The following fields are returned in the JSON response body:
 - `media` - An array of objects, each containing information about a media.
   Media objects contain the following fields:
   - `created_ts` - integer - Timestamp when the content was uploaded in ms.
-  - `last_access_ts` - integer - Timestamp when the content was last accessed in ms.
+  - `last_access_ts` - integer or null - Timestamp when the content was last accessed in ms.
+     Null if there was no access, yet.
   - `media_id` - string - The id used to refer to the media. Details about the format
     are documented under
     [media repository](../media_repository.md).
   - `media_length` - integer - Length of the media in bytes.
   - `media_type` - string - The MIME-type of the media.
-  - `quarantined_by` - string - The user ID that initiated the quarantine request
-    for this media.
+  - `quarantined_by` - string or null - The user ID that initiated the quarantine request
+    for this media. Null if not quarantined.
   - `safe_from_quarantine` - bool - Status if this media is safe from quarantining.
-  - `upload_name` - string - The name the media was uploaded with.
+  - `upload_name` - string or null - The name the media was uploaded with. Null if not provided during upload.
 - `next_token`: integer - Indication for pagination. See above.
 - `total` - integer - Total number of media.
 
@@ -773,6 +784,43 @@ Note: The token will expire if the *admin* user calls `/logout/all` from any
 of their devices, but the token will *not* expire if the target user does the
 same.
 
+## Allow replacing master cross-signing key without User-Interactive Auth
+
+This endpoint is not intended for server administrator usage;
+we describe it here for completeness.
+
+This API temporarily permits a user to replace their master cross-signing key
+without going through
+[user-interactive authentication](https://spec.matrix.org/v1.8/client-server-api/#user-interactive-authentication-api) (UIA).
+This is useful when Synapse has delegated its authentication to the
+[Matrix Authentication Service](https://github.com/matrix-org/matrix-authentication-service/);
+as Synapse cannot perform UIA is not possible in these circumstances.
+
+The API is
+
+```http request
+POST /_synapse/admin/v1/users/<user_id>/_allow_cross_signing_replacement_without_uia
+{}
+```
+
+If the user does not exist, or does exist but has no master cross-signing key,
+this will return with status code `404 Not Found`.
+
+Otherwise, a response body like the following is returned, with status `200 OK`:
+
+```json
+{
+    "updatable_without_uia_before_ms": 1234567890
+}
+```
+
+The response body is a JSON object with a single field:
+
+- `updatable_without_uia_before_ms`: integer. The timestamp in milliseconds
+  before which the user is permitted to replace their cross-signing key without
+  going through UIA.
+
+_Added in Synapse 1.97.0._
 
 ## User devices
 
diff --git a/docs/changelogs/CHANGES-pre-1.0.md b/docs/changelogs/CHANGES-pre-1.0.md
index a08f867b67..5ebc4009c7 100644
--- a/docs/changelogs/CHANGES-pre-1.0.md
+++ b/docs/changelogs/CHANGES-pre-1.0.md
@@ -97,7 +97,7 @@ Bugfixes
 - start.sh: Fix the --no-rate-limit option for messages and make it bypass rate limit on registration and login too. ([\#4981](https://github.com/matrix-org/synapse/issues/4981))
 - Transfer related groups on room upgrade. ([\#4990](https://github.com/matrix-org/synapse/issues/4990))
 - Prevent the ability to kick users from a room they aren't in. ([\#4999](https://github.com/matrix-org/synapse/issues/4999))
-- Fix issue #4596 so synapse_port_db script works with --curses option on Python 3. Contributed by Anders Jensen-Waud <anders@jensenwaud.com>. ([\#5003](https://github.com/matrix-org/synapse/issues/5003))
+- Fix issue [\#4596](https://github.com/matrix-org/synapse/issues/4596) so synapse_port_db script works with --curses option on Python 3. Contributed by Anders Jensen-Waud <anders@jensenwaud.com>. ([\#5003](https://github.com/matrix-org/synapse/issues/5003))
 - Clients timing out/disappearing while downloading from the media repository will now no longer log a spurious "Producer was not unregistered" message. ([\#5009](https://github.com/matrix-org/synapse/issues/5009))
 - Fix "cannot import name execute_batch" error with postgres. ([\#5032](https://github.com/matrix-org/synapse/issues/5032))
 - Fix disappearing exceptions in manhole. ([\#5035](https://github.com/matrix-org/synapse/issues/5035))
@@ -111,7 +111,7 @@ Bugfixes
 Internal Changes
 ----------------
 
-- Add test to verify threepid auth check added in #4435. ([\#4474](https://github.com/matrix-org/synapse/issues/4474))
+- Add test to verify threepid auth check added in [\#4435](https://github.com/matrix-org/synapse/issues/4435). ([\#4474](https://github.com/matrix-org/synapse/issues/4474))
 - Fix/improve some docstrings in the replication code. ([\#4949](https://github.com/matrix-org/synapse/issues/4949))
 - Split synapse.replication.tcp.streams into smaller files. ([\#4953](https://github.com/matrix-org/synapse/issues/4953))
 - Refactor replication row generation/parsing. ([\#4954](https://github.com/matrix-org/synapse/issues/4954))
@@ -186,7 +186,7 @@ Features
 - Add support for /keys/query and /keys/changes REST endpoints to client_reader worker. ([\#4796](https://github.com/matrix-org/synapse/issues/4796))
 - Add checks to incoming events over federation for events evading auth (aka "soft fail"). ([\#4814](https://github.com/matrix-org/synapse/issues/4814))
 - Add configurable rate limiting to the /login endpoint. ([\#4821](https://github.com/matrix-org/synapse/issues/4821), [\#4865](https://github.com/matrix-org/synapse/issues/4865))
-- Remove trailing slashes from certain outbound federation requests. Retry if receiving a 404. Context: #3622. ([\#4840](https://github.com/matrix-org/synapse/issues/4840))
+- Remove trailing slashes from certain outbound federation requests. Retry if receiving a 404. Context: [\#3622](https://github.com/matrix-org/synapse/issues/3622). ([\#4840](https://github.com/matrix-org/synapse/issues/4840))
 - Allow passing --daemonize flags to workers in the same way as with master. ([\#4853](https://github.com/matrix-org/synapse/issues/4853))
 - Batch up outgoing read-receipts to reduce federation traffic. ([\#4890](https://github.com/matrix-org/synapse/issues/4890), [\#4927](https://github.com/matrix-org/synapse/issues/4927))
 - Add option to disable searching the user directory. ([\#4895](https://github.com/matrix-org/synapse/issues/4895))
@@ -231,12 +231,12 @@ Internal Changes
 - Add some debug about processing read receipts. ([\#4798](https://github.com/matrix-org/synapse/issues/4798))
 - Clean up some replication code. ([\#4799](https://github.com/matrix-org/synapse/issues/4799))
 - Add some docstrings. ([\#4815](https://github.com/matrix-org/synapse/issues/4815))
-- Add debug logger to try and track down #4422. ([\#4816](https://github.com/matrix-org/synapse/issues/4816))
+- Add debug logger to try and track down [\#4422](https://github.com/matrix-org/synapse/issues/4422). ([\#4816](https://github.com/matrix-org/synapse/issues/4816))
 - Make shutdown API send explanation message to room after users have been forced joined. ([\#4817](https://github.com/matrix-org/synapse/issues/4817))
 - Update example_log_config.yaml. ([\#4820](https://github.com/matrix-org/synapse/issues/4820))
 - Document the `generate` option for the docker image. ([\#4824](https://github.com/matrix-org/synapse/issues/4824))
 - Fix check-newsfragment for debian-only changes. ([\#4825](https://github.com/matrix-org/synapse/issues/4825))
-- Add some debug logging for device list updates to help with #4828. ([\#4828](https://github.com/matrix-org/synapse/issues/4828))
+- Add some debug logging for device list updates to help with [\#4828](https://github.com/matrix-org/synapse/issues/4828). ([\#4828](https://github.com/matrix-org/synapse/issues/4828))
 - Improve federation documentation, specifically .well-known support. Many thanks to @vaab. ([\#4832](https://github.com/matrix-org/synapse/issues/4832))
 - Disable captcha registration by default in unit tests. ([\#4839](https://github.com/matrix-org/synapse/issues/4839))
 - Add stuff back to the .gitignore. ([\#4843](https://github.com/matrix-org/synapse/issues/4843))
@@ -895,7 +895,7 @@ Bugfixes
 - Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted. ([\#3804](https://github.com/matrix-org/synapse/issues/3804))
 - Fix existing room tags not coming down sync when joining a room ([\#3810](https://github.com/matrix-org/synapse/issues/3810))
 - Fix jwt import check ([\#3824](https://github.com/matrix-org/synapse/issues/3824))
-- fix VOIP crashes under Python 3 (#3821) ([\#3835](https://github.com/matrix-org/synapse/issues/3835))
+- fix VOIP crashes under Python 3 (issue [\#3821](https://github.com/matrix-org/synapse/issues/3821)). ([\#3835](https://github.com/matrix-org/synapse/issues/3835))
 - Fix manhole so that it works with latest openssh clients ([\#3841](https://github.com/matrix-org/synapse/issues/3841))
 - Fix outbound requests occasionally wedging, which can result in federation breaking between servers. ([\#3845](https://github.com/matrix-org/synapse/issues/3845))
 - Show heroes if room name/canonical alias has been deleted ([\#3851](https://github.com/matrix-org/synapse/issues/3851))
@@ -1123,7 +1123,7 @@ Bugfixes
 - Catch failures saving metrics captured by Measure, and instead log the faulty metrics information for further analysis. ([\#3548](https://github.com/matrix-org/synapse/issues/3548))
 - Unicode passwords are now normalised before hashing, preventing the instance where two different devices or browsers might send a different UTF-8 sequence for the password. ([\#3569](https://github.com/matrix-org/synapse/issues/3569))
 - Fix potential stack overflow and deadlock under heavy load ([\#3570](https://github.com/matrix-org/synapse/issues/3570))
-- Respond with M_NOT_FOUND when profiles are not found locally or over federation. Fixes #3585 ([\#3585](https://github.com/matrix-org/synapse/issues/3585))
+- Respond with M_NOT_FOUND when profiles are not found locally or over federation. Fixes [\#3585](https://github.com/matrix-org/synapse/issues/3585). ([\#3585](https://github.com/matrix-org/synapse/issues/3585))
 - Fix failure to persist events over federation under load ([\#3601](https://github.com/matrix-org/synapse/issues/3601))
 - Fix updating of cached remote profiles ([\#3605](https://github.com/matrix-org/synapse/issues/3605))
 - Fix 'tuple index out of range' error ([\#3607](https://github.com/matrix-org/synapse/issues/3607))
@@ -1272,7 +1272,7 @@ Misc
 Changes in synapse v0.31.2 (2018-06-14)
 =======================================
 
-SECURITY UPDATE: Prevent unauthorised users from setting state events in a room when there is no `m.room.power_levels` event in force in the room. (PR #3397)
+SECURITY UPDATE: Prevent unauthorised users from setting state events in a room when there is no `m.room.power_levels` event in force in the room. ([\#3397](https://github.com/matrix-org/synapse/issues/3397))
 
 Discussion around the Matrix Spec change proposal for this change can be followed at <https://github.com/matrix-org/matrix-doc/issues/1304>.
 
@@ -1285,7 +1285,7 @@ We are not aware of it being actively exploited but please upgrade asap.
 
 Bug Fixes:
 
--   Fix event filtering in `get_missing_events` handler (PR #3371)
+-   Fix event filtering in `get_missing_events` handler. ([\#3371](https://github.com/matrix-org/synapse/issues/3371))
 
 Changes in synapse v0.31.0 (2018-06-06)
 =======================================
@@ -1294,7 +1294,7 @@ Most notable change from v0.30.0 is to switch to the python prometheus library t
 
 Bug Fixes:
 
--   Fix metric documentation tables (PR #3341)
+-   Fix metric documentation tables. ([\#3341](https://github.com/matrix-org/synapse/issues/3341))
 -   Fix LaterGauge error handling (694968f)
 -   Fix replication metrics (b7e7fd2)
 
@@ -1303,41 +1303,41 @@ Changes in synapse v0.31.0-rc1 (2018-06-04)
 
 Features:
 
--   Switch to the Python Prometheus library (PR #3256, #3274)
--   Let users leave the server notice room after joining (PR #3287)
+-   Switch to the Python Prometheus library. ([\#3256](https://github.com/matrix-org/synapse/issues/3256), [\#3274](https://github.com/matrix-org/synapse/issues/3274))
+-   Let users leave the server notice room after joining. ([\#3287](https://github.com/matrix-org/synapse/issues/3287))
 
 Changes:
 
--   daily user type phone home stats (PR #3264)
--   Use `iter*` methods for `_filter_events_for_server` (PR #3267)
--   Docs on consent bits (PR #3268)
--   Remove users from user directory on deactivate (PR #3277)
--   Avoid sending consent notice to guest users (PR #3288)
--   disable CPUMetrics if no /proc/self/stat (PR #3299)
--   Consistently use six's iteritems and wrap lazy keys/values in list() if they're not meant to be lazy (PR #3307)
--   Add private IPv6 addresses to example config for url preview blacklist (PR #3317) Thanks to @thegcat!
--   Reduce stuck read-receipts: ignore depth when updating (PR #3318)
--   Put python's logs into Trial when running unit tests (PR #3319)
+-   daily user type phone home stats. ([\#3264](https://github.com/matrix-org/synapse/issues/3264))
+-   Use `iter*` methods for `_filter_events_for_server`. ([\#3267](https://github.com/matrix-org/synapse/issues/3267))
+-   Docs on consent bits. ([\#3268](https://github.com/matrix-org/synapse/issues/3268))
+-   Remove users from user directory on deactivate. ([\#3277](https://github.com/matrix-org/synapse/issues/3277))
+-   Avoid sending consent notice to guest users. ([\#3288](https://github.com/matrix-org/synapse/issues/3288))
+-   disable CPUMetrics if no /proc/self/stat. ([\#3299](https://github.com/matrix-org/synapse/issues/3299))
+-   Consistently use six's iteritems and wrap lazy keys/values in list() if they're not meant to be lazy. ([\#3307](https://github.com/matrix-org/synapse/issues/3307))
+-   Add private IPv6 addresses to example config for url preview blacklist. Thanks to @thegcat! ([\#3317](https://github.com/matrix-org/synapse/issues/3317))
+-   Reduce stuck read-receipts: ignore depth when updating. ([\#3318](https://github.com/matrix-org/synapse/issues/3318))
+-   Put python's logs into Trial when running unit tests. ([\#3319](https://github.com/matrix-org/synapse/issues/3319))
 
 Changes, python 3 migration:
 
--   Replace some more comparisons with six (PR #3243) Thanks to @NotAFile!
--   replace some iteritems with six (PR #3244) Thanks to @NotAFile!
--   Add `batch_iter` to utils (PR #3245) Thanks to @NotAFile!
--   use repr, not str (PR #3246) Thanks to @NotAFile!
--   Misc Python3 fixes (PR #3247) Thanks to @NotAFile!
--   Py3 `storage/_base.py` (PR #3278) Thanks to @NotAFile!
--   more six iteritems (PR #3279) Thanks to @NotAFile!
--   More Misc. py3 fixes (PR #3280) Thanks to @NotAFile!
--   remaining isintance fixes (PR #3281) Thanks to @NotAFile!
--   py3-ize state.py (PR #3283) Thanks to @NotAFile!
--   extend tox testing for py3 to avoid regressions (PR #3302) Thanks to @krombel!
--   use memoryview in py3 (PR #3303) Thanks to @NotAFile!
+-   Replace some more comparisons with six. Thanks to @NotAFile! ([\#3243](https://github.com/matrix-org/synapse/issues/3243))
+-   replace some iteritems with six. Thanks to @NotAFile! ([\#3244](https://github.com/matrix-org/synapse/issues/3244))
+-   Add `batch_iter` to utils. Thanks to @NotAFile! ([\#3245](https://github.com/matrix-org/synapse/issues/3245))
+-   use repr, not str. Thanks to @NotAFile! ([\#3246](https://github.com/matrix-org/synapse/issues/3246))
+-   Misc Python3 fixes. Thanks to @NotAFile! ([\#3247](https://github.com/matrix-org/synapse/issues/3247))
+-   Py3 `storage/_base.py`. Thanks to @NotAFile! ([\#3278](https://github.com/matrix-org/synapse/issues/3278))
+-   more six iteritems. Thanks to @NotAFile! ([\#3279](https://github.com/matrix-org/synapse/issues/3279))
+-   More Misc. py3 fixes. Thanks to @NotAFile! ([\#3280](https://github.com/matrix-org/synapse/issues/3280))
+-   remaining isintance fixes. Thanks to @NotAFile! ([\#3281](https://github.com/matrix-org/synapse/issues/3281))
+-   py3-ize state.py. Thanks to @NotAFile! ([\#3283](https://github.com/matrix-org/synapse/issues/3283))
+-   extend tox testing for py3 to avoid regressions. Thanks to @krombel! ([\#3302](https://github.com/matrix-org/synapse/issues/3302))
+-   use memoryview in py3. Thanks to @NotAFile! ([\#3303](https://github.com/matrix-org/synapse/issues/3303))
 
 Bugs:
 
--   Fix federation backfill bugs (PR #3261)
--   federation: fix LaterGauge usage (PR #3328) Thanks to @intelfx!
+-   Fix federation backfill bugs. ([\#3261](https://github.com/matrix-org/synapse/issues/3261))
+-   federation: fix LaterGauge usage. Thanks to @intelfx! ([\#3328](https://github.com/matrix-org/synapse/issues/3328))
 
 Changes in synapse v0.30.0 (2018-05-24)
 =======================================
@@ -1350,50 +1350,50 @@ This feature is specific to Synapse, but uses standard Matrix communication mech
 
 Further Server Notices/Consent Tracking Support:
 
--   Allow overriding the `server_notices` user's avatar (PR #3273)
--   Use the localpart in the consent uri (PR #3272)
--   Support for putting `%(consent_uri)s` in messages (PR #3271)
--   Block attempts to send server notices to remote users (PR #3270)
--   Docs on consent bits (PR #3268)
+-   Allow overriding the `server_notices` user's avatar. ([\#3273](https://github.com/matrix-org/synapse/issues/3273))
+-   Use the localpart in the consent uri. ([\#3272](https://github.com/matrix-org/synapse/issues/3272))
+-   Support for putting `%(consent_uri)s` in messages. ([\#3271](https://github.com/matrix-org/synapse/issues/3271))
+-   Block attempts to send server notices to remote users. ([\#3270](https://github.com/matrix-org/synapse/issues/3270))
+-   Docs on consent bits. ([\#3268](https://github.com/matrix-org/synapse/issues/3268))
 
 Changes in synapse v0.30.0-rc1 (2018-05-23)
 ===========================================
 
 Server Notices/Consent Tracking Support:
 
--   ConsentResource to gather policy consent from users (PR #3213)
--   Move RoomCreationHandler out of synapse.handlers.Handlers (PR #3225)
--   Infrastructure for a server notices room (PR #3232)
--   Send users a server notice about consent (PR #3236)
--   Reject attempts to send event before privacy consent is given (PR #3257)
--   Add a `has_consented` template var to consent forms (PR #3262)
--   Fix dependency on jinja2 (PR #3263)
+-   ConsentResource to gather policy consent from users. ([\#3213](https://github.com/matrix-org/synapse/issues/3213))
+-   Move RoomCreationHandler out of synapse.handlers.Handlers. ([\#3225](https://github.com/matrix-org/synapse/issues/3225))
+-   Infrastructure for a server notices room. ([\#3232](https://github.com/matrix-org/synapse/issues/3232))
+-   Send users a server notice about consent. ([\#3236](https://github.com/matrix-org/synapse/issues/3236))
+-   Reject attempts to send event before privacy consent is given. ([\#3257](https://github.com/matrix-org/synapse/issues/3257))
+-   Add a `has_consented` template var to consent forms. ([\#3262](https://github.com/matrix-org/synapse/issues/3262))
+-   Fix dependency on jinja2. ([\#3263](https://github.com/matrix-org/synapse/issues/3263))
 
 Features:
 
--   Cohort analytics (PR #3163, #3241, #3251)
--   Add lxml to docker image for web previews (PR #3239) Thanks to @ptman!
--   Add in flight request metrics (PR #3252)
+-   Cohort analytics. ([\#3163](https://github.com/matrix-org/synapse/issues/3163), [\#3241](https://github.com/matrix-org/synapse/issues/3241), [\#3251](https://github.com/matrix-org/synapse/issues/3251))
+-   Add lxml to docker image for web previews. Thanks to @ptman! ([\#3239](https://github.com/matrix-org/synapse/issues/3239))
+-   Add in flight request metrics. ([\#3252](https://github.com/matrix-org/synapse/issues/3252))
 
 Changes:
 
--   Remove unused `update_external_syncs` (PR #3233)
--   Use stream rather depth ordering for push actions (PR #3212)
--   Make `purge_history` operate on tokens (PR #3221)
--   Don't support limitless pagination (PR #3265)
+-   Remove unused `update_external_syncs`. ([\#3233](https://github.com/matrix-org/synapse/issues/3233))
+-   Use stream rather depth ordering for push actions. ([\#3212](https://github.com/matrix-org/synapse/issues/3212))
+-   Make `purge_history` operate on tokens. ([\#3221](https://github.com/matrix-org/synapse/issues/3221))
+-   Don't support limitless pagination. ([\#3265](https://github.com/matrix-org/synapse/issues/3265))
 
 Bug Fixes:
 
--   Fix logcontext resource usage tracking (PR #3258)
--   Fix error in handling receipts (PR #3235)
--   Stop the transaction cache caching failures (PR #3255)
+-   Fix logcontext resource usage tracking. ([\#3258](https://github.com/matrix-org/synapse/issues/3258))
+-   Fix error in handling receipts. ([\#3235](https://github.com/matrix-org/synapse/issues/3235))
+-   Stop the transaction cache caching failures. ([\#3255](https://github.com/matrix-org/synapse/issues/3255))
 
 Changes in synapse v0.29.1 (2018-05-17)
 =======================================
 
 Changes:
 
--   Update docker documentation (PR #3222)
+-   Update docker documentation. ([\#3222](https://github.com/matrix-org/synapse/issues/3222))
 
 Changes in synapse v0.29.0 (2018-05-16)
 =======================================
@@ -1407,7 +1407,7 @@ Notable changes, a docker file for running Synapse (Thanks to @kaiyou!) and a cl
 
 Potentially breaking change:
 
--   Make Client-Server API return 401 for invalid token (PR #3161).
+-   Make Client-Server API return 401 for invalid token. ([\#3161](https://github.com/matrix-org/synapse/issues/3161))
 
     This changes the Client-server spec to return a 401 error code instead of 403 when the access token is unrecognised. This is the behaviour required by the specification, but some clients may be relying on the old, incorrect behaviour.
 
@@ -1415,64 +1415,64 @@ Potentially breaking change:
 
 Features:
 
--   Add a Dockerfile for synapse (PR #2846) Thanks to @kaiyou!
+-   Add a Dockerfile for synapse. Thanks to @kaiyou! ([\#2846](https://github.com/matrix-org/synapse/issues/2846))
 
 Changes - General:
 
--   nuke-room-from-db.sh: added postgresql option and help (PR #2337) Thanks to @rubo77!
--   Part user from rooms on account deactivate (PR #3201)
--   Make "unexpected logging context" into warnings (PR #3007)
--   Set Server header in SynapseRequest (PR #3208)
--   remove duplicates from groups tables (PR #3129)
--   Improve exception handling for background processes (PR #3138)
--   Add missing consumeErrors to improve exception handling (PR #3139)
--   reraise exceptions more carefully (PR #3142)
--   Remove redundant call to `preserve_fn` (PR #3143)
--   Trap exceptions thrown within `run_in_background` (PR #3144)
+-   nuke-room-from-db.sh: added postgresql option and help. Thanks to @rubo77! ([\#2337](https://github.com/matrix-org/synapse/issues/2337))
+-   Part user from rooms on account deactivate. ([\#3201](https://github.com/matrix-org/synapse/issues/3201))
+-   Make "unexpected logging context" into warnings. ([\#3007](https://github.com/matrix-org/synapse/issues/3007))
+-   Set Server header in SynapseRequest. ([\#3208](https://github.com/matrix-org/synapse/issues/3208))
+-   remove duplicates from groups tables. ([\#3129](https://github.com/matrix-org/synapse/issues/3129))
+-   Improve exception handling for background processes. ([\#3138](https://github.com/matrix-org/synapse/issues/3138))
+-   Add missing consumeErrors to improve exception handling. ([\#3139](https://github.com/matrix-org/synapse/issues/3139))
+-   reraise exceptions more carefully. ([\#3142](https://github.com/matrix-org/synapse/issues/3142))
+-   Remove redundant call to `preserve_fn`. ([\#3143](https://github.com/matrix-org/synapse/issues/3143))
+-   Trap exceptions thrown within `run_in_background`. ([\#3144](https://github.com/matrix-org/synapse/issues/3144))
 
 Changes - Refactors:
 
--   Refactor /context to reuse pagination storage functions (PR #3193)
--   Refactor recent events func to use pagination func (PR #3195)
--   Refactor pagination DB API to return concrete type (PR #3196)
--   Refactor `get_recent_events_for_room` return type (PR #3198)
--   Refactor sync APIs to reuse pagination API (PR #3199)
--   Remove unused code path from member change DB func (PR #3200)
--   Refactor request handling wrappers (PR #3203)
--   `transaction_id`, destination defined twice (PR #3209) Thanks to @damir-manapov!
--   Refactor event storage to prepare for changes in state calculations (PR #3141)
--   Set Server header in SynapseRequest (PR #3208)
--   Use deferred.addTimeout instead of `time_bound_deferred` (PR #3127, #3178)
--   Use `run_in_background` in preference to `preserve_fn` (PR #3140)
+-   Refactor /context to reuse pagination storage functions. ([\#3193](https://github.com/matrix-org/synapse/issues/3193))
+-   Refactor recent events func to use pagination func. ([\#3195](https://github.com/matrix-org/synapse/issues/3195))
+-   Refactor pagination DB API to return concrete type. ([\#3196](https://github.com/matrix-org/synapse/issues/3196))
+-   Refactor `get_recent_events_for_room` return type. ([\#3198](https://github.com/matrix-org/synapse/issues/3198))
+-   Refactor sync APIs to reuse pagination API. ([\#3199](https://github.com/matrix-org/synapse/issues/3199))
+-   Remove unused code path from member change DB func. ([\#3200](https://github.com/matrix-org/synapse/issues/3200))
+-   Refactor request handling wrappers. ([\#3203](https://github.com/matrix-org/synapse/issues/3203))
+-   `transaction_id`, destination defined twice. Thanks to @damir-manapov! ([\#3209](https://github.com/matrix-org/synapse/issues/3209))
+-   Refactor event storage to prepare for changes in state calculations. ([\#3141](https://github.com/matrix-org/synapse/issues/3141))
+-   Set Server header in SynapseRequest. ([\#3208](https://github.com/matrix-org/synapse/issues/3208))
+-   Use deferred.addTimeout instead of `time_bound_deferred`. ([\#3127](https://github.com/matrix-org/synapse/issues/3127), [\#3178](https://github.com/matrix-org/synapse/issues/3178))
+-   Use `run_in_background` in preference to `preserve_fn`. ([\#3140](https://github.com/matrix-org/synapse/issues/3140))
 
 Changes - Python 3 migration:
 
--   Construct HMAC as bytes on py3 (PR #3156) Thanks to @NotAFile!
--   run config tests on py3 (PR #3159) Thanks to @NotAFile!
--   Open certificate files as bytes (PR #3084) Thanks to @NotAFile!
--   Open config file in non-bytes mode (PR #3085) Thanks to @NotAFile!
--   Make event properties raise AttributeError instead (PR #3102) Thanks to @NotAFile!
--   Use six.moves.urlparse (PR #3108) Thanks to @NotAFile!
--   Add py3 tests to tox with folders that work (PR #3145) Thanks to @NotAFile!
--   Don't yield in list comprehensions (PR #3150) Thanks to @NotAFile!
--   Move more xrange to six (PR #3151) Thanks to @NotAFile!
--   make imports local (PR #3152) Thanks to @NotAFile!
--   move httplib import to six (PR #3153) Thanks to @NotAFile!
--   Replace stringIO imports with six (PR #3154, #3168) Thanks to @NotAFile!
--   more bytes strings (PR #3155) Thanks to @NotAFile!
+-   Construct HMAC as bytes on py3. Thanks to @NotAFile! ([\#3156](https://github.com/matrix-org/synapse/issues/3156))
+-   run config tests on py3. Thanks to @NotAFile! ([\#3159](https://github.com/matrix-org/synapse/issues/3159))
+-   Open certificate files as bytes. Thanks to @NotAFile! ([\#3084](https://github.com/matrix-org/synapse/issues/3084))
+-   Open config file in non-bytes mode. Thanks to @NotAFile! ([\#3085](https://github.com/matrix-org/synapse/issues/3085))
+-   Make event properties raise AttributeError instead. Thanks to @NotAFile! ([\#3102](https://github.com/matrix-org/synapse/issues/3102))
+-   Use six.moves.urlparse. Thanks to @NotAFile! ([\#3108](https://github.com/matrix-org/synapse/issues/3108))
+-   Add py3 tests to tox with folders that work. Thanks to @NotAFile! ([\#3145](https://github.com/matrix-org/synapse/issues/3145))
+-   Don't yield in list comprehensions. Thanks to @NotAFile! ([\#3150](https://github.com/matrix-org/synapse/issues/3150))
+-   Move more xrange to six. Thanks to @NotAFile! ([\#3151](https://github.com/matrix-org/synapse/issues/3151))
+-   make imports local. Thanks to @NotAFile! ([\#3152](https://github.com/matrix-org/synapse/issues/3152))
+-   move httplib import to six. Thanks to @NotAFile! ([\#3153](https://github.com/matrix-org/synapse/issues/3153))
+-   Replace stringIO imports with six. Thanks to @NotAFile! ([\#3154](https://github.com/matrix-org/synapse/issues/3154), [\#3168](https://github.com/matrix-org/synapse/issues/3168))
+-   more bytes strings. Thanks to @NotAFile! ([\#3155](https://github.com/matrix-org/synapse/issues/3155))
 
 Bug Fixes:
 
--   synapse fails to start under Twisted >= 18.4 (PR #3157)
--   Fix a class of logcontext leaks (PR #3170)
--   Fix a couple of logcontext leaks in unit tests (PR #3172)
--   Fix logcontext leak in media repo (PR #3174)
--   Escape label values in prometheus metrics (PR #3175, #3186)
--   Fix "Unhandled Error" logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot!
--   Fix logcontext leaks in rate limiter (PR #3183)
--   notifications: Convert `next_token` to string according to the spec (PR #3190) Thanks to @mujx!
--   nuke-room-from-db.sh: fix deletion from search table (PR #3194) Thanks to @rubo77!
--   add guard for None on `purge_history` api (PR #3160) Thanks to @krombel!
+-   synapse fails to start under Twisted >= 18.4. ([\#3157](https://github.com/matrix-org/synapse/issues/3157))
+-   Fix a class of logcontext leaks. ([\#3170](https://github.com/matrix-org/synapse/issues/3170))
+-   Fix a couple of logcontext leaks in unit tests. ([\#3172](https://github.com/matrix-org/synapse/issues/3172))
+-   Fix logcontext leak in media repo. ([\#3174](https://github.com/matrix-org/synapse/issues/3174))
+-   Escape label values in prometheus metrics. ([\#3175](https://github.com/matrix-org/synapse/issues/3175), [\#3186](https://github.com/matrix-org/synapse/issues/3186))
+-   Fix "Unhandled Error" logs with Twisted 18.4. Thanks to @Half-Shot! ([\#3182](https://github.com/matrix-org/synapse/issues/3182))
+-   Fix logcontext leaks in rate limiter. ([\#3183](https://github.com/matrix-org/synapse/issues/3183))
+-   notifications: Convert `next_token` to string according to the spec. Thanks to @mujx! ([\#3190](https://github.com/matrix-org/synapse/issues/3190))
+-   nuke-room-from-db.sh: fix deletion from search table. Thanks to @rubo77! ([\#3194](https://github.com/matrix-org/synapse/issues/3194))
+-   add guard for None on `purge_history` api. Thanks to @krombel! ([\#3160](https://github.com/matrix-org/synapse/issues/3160))
 
 Changes in synapse v0.28.1 (2018-05-01)
 =======================================
@@ -1492,8 +1492,8 @@ Changes in synapse v0.28.0 (2018-04-26)
 
 Bug Fixes:
 
--   Fix quarantine media admin API and search reindex (PR #3130)
--   Fix media admin APIs (PR #3134)
+-   Fix quarantine media admin API and search reindex. ([\#3130](https://github.com/matrix-org/synapse/issues/3130))
+-   Fix media admin APIs. ([\#3134](https://github.com/matrix-org/synapse/issues/3134))
 
 Changes in synapse v0.28.0-rc1 (2018-04-24)
 ===========================================
@@ -1504,49 +1504,49 @@ Minor performance improvement to federation sending and bug fixes.
 
 Features:
 
--   Add metrics for event processing lag (PR #3090)
--   Add metrics for ResponseCache (PR #3092)
+-   Add metrics for event processing lag. ([\#3090](https://github.com/matrix-org/synapse/issues/3090))
+-   Add metrics for ResponseCache. ([\#3092](https://github.com/matrix-org/synapse/issues/3092))
 
 Changes:
 
--   Synapse on PyPy (PR #2760) Thanks to @Valodim!
--   move handling of `auto_join_rooms` to RegisterHandler (PR #2996) Thanks to @krombel!
--   Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh!
--   Document the behaviour of ResponseCache (PR #3059)
--   Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile!
--   update prometheus dashboard to use new metric names (PR #3069) Thanks to @krombel!
--   use python3-compatible prints (PR #3074) Thanks to @NotAFile!
--   Send federation events concurrently (PR #3078)
--   Limit concurrent event sends for a room (PR #3079)
--   Improve R30 stat definition (PR #3086)
--   Send events to ASes concurrently (PR #3088)
--   Refactor ResponseCache usage (PR #3093)
--   Clarify that SRV may not point to a CNAME (PR #3100) Thanks to @silkeh!
--   Use str(e) instead of e.message (PR #3103) Thanks to @NotAFile!
--   Use six.itervalues in some places (PR #3106) Thanks to @NotAFile!
--   Refactor `store.have_events` (PR #3117)
+-   Synapse on PyPy. Thanks to @Valodim! ([\#2760](https://github.com/matrix-org/synapse/issues/2760))
+-   move handling of `auto_join_rooms` to RegisterHandler. Thanks to @krombel! ([\#2996](https://github.com/matrix-org/synapse/issues/2996))
+-   Improve handling of SRV records for federation connections. Thanks to @silkeh! ([\#3016](https://github.com/matrix-org/synapse/issues/3016))
+-   Document the behaviour of ResponseCache. ([\#3059](https://github.com/matrix-org/synapse/issues/3059))
+-   Preparation for py3. Thanks to @NotAFile! ([\#3061](https://github.com/matrix-org/synapse/issues/3061), [\#3073](https://github.com/matrix-org/synapse/issues/3073), [\#3074](https://github.com/matrix-org/synapse/issues/3074), [\#3075](https://github.com/matrix-org/synapse/issues/3075), [\#3103](https://github.com/matrix-org/synapse/issues/3103), [\#3104](https://github.com/matrix-org/synapse/issues/3104), [\#3106](https://github.com/matrix-org/synapse/issues/3106), [\#3107](https://github.com/matrix-org/synapse/issues/3107), [\#3109](https://github.com/matrix-org/synapse/issues/3109), [\#3110](https://github.com/matrix-org/synapse/issues/3110))
+-   update prometheus dashboard to use new metric names. Thanks to @krombel! ([\#3069](https://github.com/matrix-org/synapse/issues/3069))
+-   use python3-compatible prints. Thanks to @NotAFile! ([\#3074](https://github.com/matrix-org/synapse/issues/3074))
+-   Send federation events concurrently. ([\#3078](https://github.com/matrix-org/synapse/issues/3078))
+-   Limit concurrent event sends for a room. ([\#3079](https://github.com/matrix-org/synapse/issues/3079))
+-   Improve R30 stat definition. ([\#3086](https://github.com/matrix-org/synapse/issues/3086))
+-   Send events to ASes concurrently. ([\#3088](https://github.com/matrix-org/synapse/issues/3088))
+-   Refactor ResponseCache usage. ([\#3093](https://github.com/matrix-org/synapse/issues/3093))
+-   Clarify that SRV may not point to a CNAME. Thanks to @silkeh! ([\#3100](https://github.com/matrix-org/synapse/issues/3100))
+-   Use str(e) instead of e.message. Thanks to @NotAFile! ([\#3103](https://github.com/matrix-org/synapse/issues/3103))
+-   Use six.itervalues in some places. Thanks to @NotAFile! ([\#3106](https://github.com/matrix-org/synapse/issues/3106))
+-   Refactor `store.have_events`. ([\#3117](https://github.com/matrix-org/synapse/issues/3117))
 
 Bug Fixes:
 
--   Return 401 for invalid `access_token` on logout (PR #2938) Thanks to @dklug!
--   Return a 404 rather than a 500 on rejoining empty rooms (PR #3080)
--   fix `federation_domain_whitelist` (PR #3099)
--   Avoid creating events with huge numbers of `prev_events` (PR #3113)
--   Reject events which have lots of `prev_events` (PR #3118)
+-   Return 401 for invalid `access_token` on logout. Thanks to @dklug! ([\#2938](https://github.com/matrix-org/synapse/issues/2938))
+-   Return a 404 rather than a 500 on rejoining empty rooms. ([\#3080](https://github.com/matrix-org/synapse/issues/3080))
+-   fix `federation_domain_whitelist`. ([\#3099](https://github.com/matrix-org/synapse/issues/3099))
+-   Avoid creating events with huge numbers of `prev_events`. ([\#3113](https://github.com/matrix-org/synapse/issues/3113))
+-   Reject events which have lots of `prev_events`. ([\#3118](https://github.com/matrix-org/synapse/issues/3118))
 
 Changes in synapse v0.27.4 (2018-04-13)
 =======================================
 
 Changes:
 
--   Update canonicaljson dependency (\#3095)
+-   Update canonicaljson dependency. ([\#3095](https://github.com/matrix-org/synapse/issues/3095))
 
 Changes in synapse v0.27.3 (2018-04-11)
 ======================================
 
 Bug fixes:
 
--   URL quote path segments over federation (\#3082)
+-   URL quote path segments over federation. ([\#3082](https://github.com/matrix-org/synapse/issues/3082))
 
 Changes in synapse v0.27.3-rc2 (2018-04-09)
 ===========================================
@@ -1566,43 +1566,43 @@ Counts the number of native 30 day retained users, defined as:
 
 Features:
 
--   Add joinability for groups (PR #3045)
--   Implement group join API (PR #3046)
--   Add counter metrics for calculating state delta (PR #3033)
--   R30 stats (PR #3041)
--   Measure time it takes to calculate state group ID (PR #3043)
--   Add basic performance statistics to phone home (PR #3044)
--   Add response size metrics (PR #3071)
--   phone home cache size configurations (PR #3063)
+-   Add joinability for groups. ([\#3045](https://github.com/matrix-org/synapse/issues/3045))
+-   Implement group join API. ([\#3046](https://github.com/matrix-org/synapse/issues/3046))
+-   Add counter metrics for calculating state delta. ([\#3033](https://github.com/matrix-org/synapse/issues/3033))
+-   R30 stats. ([\#3041](https://github.com/matrix-org/synapse/issues/3041))
+-   Measure time it takes to calculate state group ID. ([\#3043](https://github.com/matrix-org/synapse/issues/3043))
+-   Add basic performance statistics to phone home. ([\#3044](https://github.com/matrix-org/synapse/issues/3044))
+-   Add response size metrics. ([\#3071](https://github.com/matrix-org/synapse/issues/3071))
+-   phone home cache size configurations. ([\#3063](https://github.com/matrix-org/synapse/issues/3063))
 
 Changes:
 
--   Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live!
--   Replace old style error catching with `as` keyword (PR #3000) Thanks to @NotAFile!
--   Use `.iter*` to avoid copies in StateHandler (PR #3006)
--   Linearize calls to `_generate_user_id` (PR #3029)
--   Remove last usage of ujson (PR #3030)
--   Use simplejson throughout (PR #3048)
--   Use static JSONEncoders (PR #3049)
--   Remove uses of events.content (PR #3060)
--   Improve database cache performance (PR #3068)
+-   Add a blurb explaining the main synapse worker. Thanks to @turt2live! ([\#2886](https://github.com/matrix-org/synapse/issues/2886))
+-   Replace old style error catching with `as` keyword. Thanks to @NotAFile! ([\#3000](https://github.com/matrix-org/synapse/issues/3000))
+-   Use `.iter*` to avoid copies in StateHandler. ([\#3006](https://github.com/matrix-org/synapse/issues/3006))
+-   Linearize calls to `_generate_user_id`. ([\#3029](https://github.com/matrix-org/synapse/issues/3029))
+-   Remove last usage of ujson. ([\#3030](https://github.com/matrix-org/synapse/issues/3030))
+-   Use simplejson throughout. ([\#3048](https://github.com/matrix-org/synapse/issues/3048))
+-   Use static JSONEncoders. ([\#3049](https://github.com/matrix-org/synapse/issues/3049))
+-   Remove uses of events.content. ([\#3060](https://github.com/matrix-org/synapse/issues/3060))
+-   Improve database cache performance. ([\#3068](https://github.com/matrix-org/synapse/issues/3068))
 
 Bug fixes:
 
--   Add `room_id` to the response of rooms/{roomId}/join (PR #2986) Thanks to @jplatte!
--   Fix replication after switch to simplejson (PR #3015)
--   404 correctly on missing paths via NoResource (PR #3022)
--   Fix error when claiming e2e keys from offline servers (PR #3034)
--   fix `tests/storage/test_user_directory.py` (PR #3042)
--   use `PUT` instead of `POST` for federating `groups`/`m.join_policy` (PR #3070) Thanks to @krombel!
--   postgres port script: fix `state_groups_pkey` error (PR #3072)
+-   Add `room_id` to the response of rooms/{roomId}/join. Thanks to @jplatte! ([\#2986](https://github.com/matrix-org/synapse/issues/2986))
+-   Fix replication after switch to simplejson. ([\#3015](https://github.com/matrix-org/synapse/issues/3015))
+-   404 correctly on missing paths via NoResource. ([\#3022](https://github.com/matrix-org/synapse/issues/3022))
+-   Fix error when claiming e2e keys from offline servers. ([\#3034](https://github.com/matrix-org/synapse/issues/3034))
+-   fix `tests/storage/test_user_directory.py`. ([\#3042](https://github.com/matrix-org/synapse/issues/3042))
+-   use `PUT` instead of `POST` for federating `groups`/`m.join_policy`. Thanks to @krombel! ([\#3070](https://github.com/matrix-org/synapse/issues/3070))
+-   postgres port script: fix `state_groups_pkey` error. ([\#3072](https://github.com/matrix-org/synapse/issues/3072))
 
 Changes in synapse v0.27.2 (2018-03-26)
 =======================================
 
 Bug fixes:
 
--   Fix bug which broke TCP replication between workers (PR #3015)
+-   Fix bug which broke TCP replication between workers. ([\#3015](https://github.com/matrix-org/synapse/issues/3015))
 
 Changes in synapse v0.27.1 (2018-03-26)
 =======================================
@@ -1621,14 +1621,14 @@ Pulls in v0.26.1
 
 Bug fixes:
 
--   Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache (PR #3005)
+-   Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache. ([\#3005](https://github.com/matrix-org/synapse/issues/3005))
 
 Changes in synapse v0.26.1 (2018-03-15)
 =======================================
 
 Bug fixes:
 
--   Fix bug where an invalid event caused server to stop functioning correctly, due to parsing and serializing bugs in ujson library (PR #3008)
+-   Fix bug where an invalid event caused server to stop functioning correctly, due to parsing and serializing bugs in ujson library. ([\#3008](https://github.com/matrix-org/synapse/issues/3008))
 
 Changes in synapse v0.27.0-rc1 (2018-03-14)
 ===========================================
@@ -1639,41 +1639,41 @@ This release also begins the process of renaming a number of the metrics reporte
 
 Features:
 
--   Add ability for ASes to override message send time (PR #2754)
--   Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767)
--   Add purge API features, see [docs/admin_api/purge_history_api.rst](docs/admin_api/purge_history_api.rst) for full details (PR #2858, #2867, #2882, #2946, #2962, #2943)
--   Add support for whitelisting 3PIDs that users can register. (PR #2813)
--   Add `/room/{id}/event/{id}` API (PR #2766)
--   Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live!
--   Add `federation_domain_whitelist` option (PR #2820, #2821)
+-   Add ability for ASes to override message send time. ([\#2754](https://github.com/matrix-org/synapse/issues/2754))
+-   Add support for custom storage providers for media repository. ([\#2867](https://github.com/matrix-org/synapse/issues/2867), [\#2777](https://github.com/matrix-org/synapse/issues/2777), [\#2783](https://github.com/matrix-org/synapse/issues/2783), [\#2789](https://github.com/matrix-org/synapse/issues/2789), [\#2791](https://github.com/matrix-org/synapse/issues/2791), [\#2804](https://github.com/matrix-org/synapse/issues/2804), [\#2812](https://github.com/matrix-org/synapse/issues/2812), [\#2814](https://github.com/matrix-org/synapse/issues/2814), [\#2857](https://github.com/matrix-org/synapse/issues/2857), [\#2868](https://github.com/matrix-org/synapse/issues/2868), [\#2767](https://github.com/matrix-org/synapse/issues/2767))
+-   Add purge API features, see [docs/admin_api/purge_history_api.rst](docs/admin_api/purge_history_api.rst) for full details. ([\#2858](https://github.com/matrix-org/synapse/issues/2858), [\#2867](https://github.com/matrix-org/synapse/issues/2867), [\#2882](https://github.com/matrix-org/synapse/issues/2882), [\#2946](https://github.com/matrix-org/synapse/issues/2946), [\#2962](https://github.com/matrix-org/synapse/issues/2962), [\#2943](https://github.com/matrix-org/synapse/issues/2943))
+-   Add support for whitelisting 3PIDs that users can register. ([\#2813](https://github.com/matrix-org/synapse/issues/2813))
+-   Add `/room/{id}/event/{id}` API. ([\#2766](https://github.com/matrix-org/synapse/issues/2766))
+-   Add an admin API to get all the media in a room. Thanks to @turt2live! ([\#2818](https://github.com/matrix-org/synapse/issues/2818))
+-   Add `federation_domain_whitelist` option. ([\#2820](https://github.com/matrix-org/synapse/issues/2820), [\#2821](https://github.com/matrix-org/synapse/issues/2821))
 
 Changes:
 
--   Continue to factor out processing from main process and into worker processes. See updated [docs/workers.rst](docs/workers.rst) (PR #2892 - \#2904, #2913, #2920 - \#2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - \#2984, #2987 - \#2989, #2991 - \#2993, #2995, #2784)
--   Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849)
--   Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh!
--   No longer require a specific version of saml2 (PR #2695) Thanks to @okurz!
--   Remove `verbosity`/`log_file` from generated config (PR #2755)
--   Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838)
--   When using synctl with workers, Don't start the main synapse automatically (PR #2774)
--   Minor performance improvements (PR #2773, #2792)
--   Use a connection pool for non-federation outbound connections (PR #2817)
--   Make it possible to run unit tests against postgres (PR #2829)
--   Update pynacl dependency to 1.2.1 or higher (PR #2888) Thanks to @bachp!
--   Remove ability for AS users to call /events and /sync (PR #2948)
--   Use bcrypt.checkpw (PR #2949) Thanks to @krombel!
+-   Continue to factor out processing from main process and into worker processes. See updated [docs/workers.rst](docs/workers.rst) ([\#2892](https://github.com/matrix-org/synapse/issues/2892), [\#2893](https://github.com/matrix-org/synapse/issues/2893), [\#2894](https://github.com/matrix-org/synapse/issues/2894), [\#2896](https://github.com/matrix-org/synapse/issues/2896), [\#2897](https://github.com/matrix-org/synapse/issues/2897), [\#2898](https://github.com/matrix-org/synapse/issues/2898), [\#2899](https://github.com/matrix-org/synapse/issues/2899), [\#2900](https://github.com/matrix-org/synapse/issues/2900), [\#2901](https://github.com/matrix-org/synapse/issues/2901), [\#2902](https://github.com/matrix-org/synapse/issues/2902), [\#2903](https://github.com/matrix-org/synapse/issues/2903), [\#2904](https://github.com/matrix-org/synapse/issues/2904), [\#2913](https://github.com/matrix-org/synapse/issues/2913), [\#2920](https://github.com/matrix-org/synapse/issues/2920), [\#2921](https://github.com/matrix-org/synapse/issues/2921), [\#2922](https://github.com/matrix-org/synapse/issues/2922), [\#2923](https://github.com/matrix-org/synapse/issues/2923), [\#2924](https://github.com/matrix-org/synapse/issues/2924), [\#2925](https://github.com/matrix-org/synapse/issues/2925), [\#2926](https://github.com/matrix-org/synapse/issues/2926), [\#2947](https://github.com/matrix-org/synapse/issues/2947), [\#2847](https://github.com/matrix-org/synapse/issues/2847), [\#2854](https://github.com/matrix-org/synapse/issues/2854), [\#2872](https://github.com/matrix-org/synapse/issues/2872), [\#2873](https://github.com/matrix-org/synapse/issues/2873), [\#2874](https://github.com/matrix-org/synapse/issues/2874), [\#2928](https://github.com/matrix-org/synapse/issues/2928), [\#2929](https://github.com/matrix-org/synapse/issues/2929), [\#2934](https://github.com/matrix-org/synapse/issues/2934), [\#2856](https://github.com/matrix-org/synapse/issues/2856), [\#2976](https://github.com/matrix-org/synapse/issues/2976), [\#2977](https://github.com/matrix-org/synapse/issues/2977), [\#2978](https://github.com/matrix-org/synapse/issues/2978), [\#2979](https://github.com/matrix-org/synapse/issues/2979), [\#2980](https://github.com/matrix-org/synapse/issues/2980), [\#2981](https://github.com/matrix-org/synapse/issues/2981), [\#2982](https://github.com/matrix-org/synapse/issues/2982), [\#2983](https://github.com/matrix-org/synapse/issues/2983), [\#2984](https://github.com/matrix-org/synapse/issues/2984), [\#2987](https://github.com/matrix-org/synapse/issues/2987), [\#2988](https://github.com/matrix-org/synapse/issues/2988), [\#2989](https://github.com/matrix-org/synapse/issues/2989), [\#2991](https://github.com/matrix-org/synapse/issues/2991), [\#2992](https://github.com/matrix-org/synapse/issues/2992), [\#2993](https://github.com/matrix-org/synapse/issues/2993), [\#2995](https://github.com/matrix-org/synapse/issues/2995), [\#2784](https://github.com/matrix-org/synapse/issues/2784))
+-   Ensure state cache is used when persisting events. ([\#2864](https://github.com/matrix-org/synapse/issues/2864), [\#2871](https://github.com/matrix-org/synapse/issues/2871), [\#2802](https://github.com/matrix-org/synapse/issues/2802), [\#2835](https://github.com/matrix-org/synapse/issues/2835), [\#2836](https://github.com/matrix-org/synapse/issues/2836), [\#2841](https://github.com/matrix-org/synapse/issues/2841), [\#2842](https://github.com/matrix-org/synapse/issues/2842), [\#2849](https://github.com/matrix-org/synapse/issues/2849))
+-   Change the default config to bind on both IPv4 and IPv6 on all platforms. Thanks to @silkeh! ([\#2435](https://github.com/matrix-org/synapse/issues/2435))
+-   No longer require a specific version of saml2. Thanks to @okurz! ([\#2695](https://github.com/matrix-org/synapse/issues/2695))
+-   Remove `verbosity`/`log_file` from generated config. ([\#2755](https://github.com/matrix-org/synapse/issues/2755))
+-   Add and improve metrics and logging. ([\#2770](https://github.com/matrix-org/synapse/issues/2770), [\#2778](https://github.com/matrix-org/synapse/issues/2778), [\#2785](https://github.com/matrix-org/synapse/issues/2785), [\#2786](https://github.com/matrix-org/synapse/issues/2786), [\#2787](https://github.com/matrix-org/synapse/issues/2787), [\#2793](https://github.com/matrix-org/synapse/issues/2793), [\#2794](https://github.com/matrix-org/synapse/issues/2794), [\#2795](https://github.com/matrix-org/synapse/issues/2795), [\#2809](https://github.com/matrix-org/synapse/issues/2809), [\#2810](https://github.com/matrix-org/synapse/issues/2810), [\#2833](https://github.com/matrix-org/synapse/issues/2833), [\#2834](https://github.com/matrix-org/synapse/issues/2834), [\#2844](https://github.com/matrix-org/synapse/issues/2844), [\#2965](https://github.com/matrix-org/synapse/issues/2965), [\#2927](https://github.com/matrix-org/synapse/issues/2927), [\#2975](https://github.com/matrix-org/synapse/issues/2975), [\#2790](https://github.com/matrix-org/synapse/issues/2790), [\#2796](https://github.com/matrix-org/synapse/issues/2796), [\#2838](https://github.com/matrix-org/synapse/issues/2838))
+-   When using synctl with workers, Don't start the main synapse automatically. ([\#2774](https://github.com/matrix-org/synapse/issues/2774))
+-   Minor performance improvements. ([\#2773](https://github.com/matrix-org/synapse/issues/2773), [\#2792](https://github.com/matrix-org/synapse/issues/2792))
+-   Use a connection pool for non-federation outbound connections. ([\#2817](https://github.com/matrix-org/synapse/issues/2817))
+-   Make it possible to run unit tests against postgres. ([\#2829](https://github.com/matrix-org/synapse/issues/2829))
+-   Update pynacl dependency to 1.2.1 or higher. Thanks to @bachp! ([\#2888](https://github.com/matrix-org/synapse/issues/2888))
+-   Remove ability for AS users to call /events and /sync. ([\#2948](https://github.com/matrix-org/synapse/issues/2948))
+-   Use bcrypt.checkpw. Thanks to @krombel! ([\#2949](https://github.com/matrix-org/synapse/issues/2949))
 
 Bug fixes:
 
--   Fix broken `ldap_config` config option (PR #2683) Thanks to @seckrv!
--   Fix error message when user is not allowed to unban (PR #2761) Thanks to @turt2live!
--   Fix publicised groups GET API (singular) over federation (PR #2772)
--   Fix user directory when using `user_directory_search_all_users` config option (PR #2803, #2831)
--   Fix error on `/publicRooms` when no rooms exist (PR #2827)
--   Fix bug in `quarantine_media` (PR #2837)
--   Fix `url_previews` when no `Content-Type` is returned from URL (PR #2845)
--   Fix rare race in sync API when joining room (PR #2944)
--   Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848)
+-   Fix broken `ldap_config` config option. Thanks to @seckrv! ([\#2683](https://github.com/matrix-org/synapse/issues/2683))
+-   Fix error message when user is not allowed to unban. Thanks to @turt2live! ([\#2761](https://github.com/matrix-org/synapse/issues/2761))
+-   Fix publicised groups GET API (singular) over federation. ([\#2772](https://github.com/matrix-org/synapse/issues/2772))
+-   Fix user directory when using `user_directory_search_all_users` config option. ([\#2803](https://github.com/matrix-org/synapse/issues/2803), [\#2831](https://github.com/matrix-org/synapse/issues/2831))
+-   Fix error on `/publicRooms` when no rooms exist. ([\#2827](https://github.com/matrix-org/synapse/issues/2827))
+-   Fix bug in `quarantine_media`. ([\#2837](https://github.com/matrix-org/synapse/issues/2837))
+-   Fix `url_previews` when no `Content-Type` is returned from URL. ([\#2845](https://github.com/matrix-org/synapse/issues/2845))
+-   Fix rare race in sync API when joining room. ([\#2944](https://github.com/matrix-org/synapse/issues/2944))
+-   Fix slow event search, switch back from GIST to GIN indexes. ([\#2769](https://github.com/matrix-org/synapse/issues/2769), [\#2848](https://github.com/matrix-org/synapse/issues/2848))
 
 Changes in synapse v0.26.0 (2018-01-05)
 =======================================
@@ -1685,93 +1685,93 @@ Changes in synapse v0.26.0-rc1 (2017-12-13)
 
 Features:
 
--   Add ability for ASes to publicise groups for their users (PR #2686)
--   Add all local users to the `user_directory` and optionally search them (PR #2723)
--   Add support for custom login types for validating users (PR #2729)
+-   Add ability for ASes to publicise groups for their users. ([\#2686](https://github.com/matrix-org/synapse/issues/2686))
+-   Add all local users to the `user_directory` and optionally search them. ([\#2723](https://github.com/matrix-org/synapse/issues/2723))
+-   Add support for custom login types for validating users. ([\#2729](https://github.com/matrix-org/synapse/issues/2729))
 
 Changes:
 
--   Update example Prometheus config to new format (PR #2648) Thanks to @krombel!
--   Rename `redact_content` option to `include_content` in Push API (PR #2650)
--   Declare support for r0.3.0 (PR #2677)
--   Improve upserts (PR #2684, #2688, #2689, #2713)
--   Improve documentation of workers (PR #2700)
--   Improve tracebacks on exceptions (PR #2705)
--   Allow guest access to group APIs for reading (PR #2715)
--   Support for posting content in `federation_client` script (PR #2716)
--   Delete devices and pushers on logouts etc (PR #2722)
+-   Update example Prometheus config to new format. Thanks to @krombel! ([\#2648](https://github.com/matrix-org/synapse/issues/2648))
+-   Rename `redact_content` option to `include_content` in Push API. ([\#2650](https://github.com/matrix-org/synapse/issues/2650))
+-   Declare support for r0.3.0. ([\#2677](https://github.com/matrix-org/synapse/issues/2677))
+-   Improve upserts. ([\#2684](https://github.com/matrix-org/synapse/issues/2684), [\#2688](https://github.com/matrix-org/synapse/issues/2688), [\#2689](https://github.com/matrix-org/synapse/issues/2689), [\#2713](https://github.com/matrix-org/synapse/issues/2713))
+-   Improve documentation of workers. ([\#2700](https://github.com/matrix-org/synapse/issues/2700))
+-   Improve tracebacks on exceptions. ([\#2705](https://github.com/matrix-org/synapse/issues/2705))
+-   Allow guest access to group APIs for reading. ([\#2715](https://github.com/matrix-org/synapse/issues/2715))
+-   Support for posting content in `federation_client` script. ([\#2716](https://github.com/matrix-org/synapse/issues/2716))
+-   Delete devices and pushers on logouts etc. ([\#2722](https://github.com/matrix-org/synapse/issues/2722))
 
 Bug fixes:
 
--   Fix database port script (PR #2673)
--   Fix internal server error on login with `ldap_auth_provider` (PR #2678) Thanks to @jkolo!
--   Fix error on sqlite 3.7 (PR #2697)
--   Fix `OPTIONS` on `preview_url` (PR #2707)
--   Fix error handling on dns lookup (PR #2711)
--   Fix wrong avatars when inviting multiple users when creating room (PR #2717)
--   Fix 500 when joining matrix-dev (PR #2719)
+-   Fix database port script. ([\#2673](https://github.com/matrix-org/synapse/issues/2673))
+-   Fix internal server error on login with `ldap_auth_provider`. Thanks to @jkolo! ([\#2678](https://github.com/matrix-org/synapse/issues/2678))
+-   Fix error on sqlite 3.7. ([\#2697](https://github.com/matrix-org/synapse/issues/2697))
+-   Fix `OPTIONS` on `preview_url`. ([\#2707](https://github.com/matrix-org/synapse/issues/2707))
+-   Fix error handling on dns lookup. ([\#2711](https://github.com/matrix-org/synapse/issues/2711))
+-   Fix wrong avatars when inviting multiple users when creating room. ([\#2717](https://github.com/matrix-org/synapse/issues/2717))
+-   Fix 500 when joining matrix-dev. ([\#2719](https://github.com/matrix-org/synapse/issues/2719))
 
 Changes in synapse v0.25.1 (2017-11-17)
 =======================================
 
 Bug fixes:
 
--   Fix login with LDAP and other password provider modules (PR #2678). Thanks to @jkolo!
+-   Fix login with LDAP and other password provider modules. Thanks to @jkolo! ([\#2678](https://github.com/matrix-org/synapse/issues/2678))
 
 Changes in synapse v0.25.0 (2017-11-15)
 =======================================
 
 Bug fixes:
 
--   Fix port script (PR #2673)
+-   Fix port script. ([\#2673](https://github.com/matrix-org/synapse/issues/2673))
 
 Changes in synapse v0.25.0-rc1 (2017-11-14)
 ===========================================
 
 Features:
 
--   Add `is_public` to groups table to allow for private groups (PR #2582)
--   Add a route for determining who you are (PR #2668) Thanks to @turt2live!
--   Add more features to the password providers (PR #2608, #2610, #2620, #2622, #2623, #2624, #2626, #2628, #2629)
--   Add a hook for custom rest endpoints (PR #2627)
--   Add API to update group room visibility (PR #2651)
+-   Add `is_public` to groups table to allow for private groups. ([\#2582](https://github.com/matrix-org/synapse/issues/2582))
+-   Add a route for determining who you are. Thanks to @turt2live! ([\#2668](https://github.com/matrix-org/synapse/issues/2668))
+-   Add more features to the password providers ([\#2608](https://github.com/matrix-org/synapse/issues/2608), [\#2610](https://github.com/matrix-org/synapse/issues/2610), [\#2620](https://github.com/matrix-org/synapse/issues/2620), [\#2622](https://github.com/matrix-org/synapse/issues/2622), [\#2623](https://github.com/matrix-org/synapse/issues/2623), [\#2624](https://github.com/matrix-org/synapse/issues/2624), [\#2626](https://github.com/matrix-org/synapse/issues/2626), [\#2628](https://github.com/matrix-org/synapse/issues/2628), [\#2629](https://github.com/matrix-org/synapse/issues/2629))
+-   Add a hook for custom rest endpoints. ([\#2627](https://github.com/matrix-org/synapse/issues/2627))
+-   Add API to update group room visibility. ([\#2651](https://github.com/matrix-org/synapse/issues/2651))
 
 Changes:
 
--   Ignore `<noscript\>` tags when generating URL preview descriptions (PR #2576) Thanks to @maximevaillancourt!
--   Register some /unstable endpoints in /r0 as well (PR #2579) Thanks to @krombel!
--   Support /keys/upload on /r0 as well as /unstable (PR #2585)
--   Front-end proxy: pass through auth header (PR #2586)
--   Allow ASes to deactivate their own users (PR #2589)
--   Remove refresh tokens (PR #2613)
--   Automatically set default displayname on register (PR #2617)
--   Log login requests (PR #2618)
--   Always return `is_public` in the `/groups/:group_id/rooms` API (PR #2630)
--   Avoid no-op media deletes (PR #2637) Thanks to @spantaleev!
--   Fix various embarrassing typos around `user_directory` and add some doc. (PR #2643)
--   Return whether a user is an admin within a group (PR #2647)
--   Namespace visibility options for groups (PR #2657)
--   Downcase UserIDs on registration (PR #2662)
--   Cache failures when fetching URL previews (PR #2669)
+-   Ignore `<noscript\>` tags when generating URL preview descriptions. Thanks to @maximevaillancourt! ([\#2576](https://github.com/matrix-org/synapse/issues/2576))
+-   Register some /unstable endpoints in /r0 as well. Thanks to @krombel! ([\#2579](https://github.com/matrix-org/synapse/issues/2579))
+-   Support /keys/upload on /r0 as well as /unstable. ([\#2585](https://github.com/matrix-org/synapse/issues/2585))
+-   Front-end proxy: pass through auth header. ([\#2586](https://github.com/matrix-org/synapse/issues/2586))
+-   Allow ASes to deactivate their own users. ([\#2589](https://github.com/matrix-org/synapse/issues/2589))
+-   Remove refresh tokens. ([\#2613](https://github.com/matrix-org/synapse/issues/2613))
+-   Automatically set default displayname on register. ([\#2617](https://github.com/matrix-org/synapse/issues/2617))
+-   Log login requests. ([\#2618](https://github.com/matrix-org/synapse/issues/2618))
+-   Always return `is_public` in the `/groups/:group_id/rooms` API. ([\#2630](https://github.com/matrix-org/synapse/issues/2630))
+-   Avoid no-op media deletes. Thanks to @spantaleev! ([\#2637](https://github.com/matrix-org/synapse/issues/2637))
+-   Fix various embarrassing typos around `user_directory` and add some doc. ([\#2643](https://github.com/matrix-org/synapse/issues/2643))
+-   Return whether a user is an admin within a group. ([\#2647](https://github.com/matrix-org/synapse/issues/2647))
+-   Namespace visibility options for groups. ([\#2657](https://github.com/matrix-org/synapse/issues/2657))
+-   Downcase UserIDs on registration. ([\#2662](https://github.com/matrix-org/synapse/issues/2662))
+-   Cache failures when fetching URL previews. ([\#2669](https://github.com/matrix-org/synapse/issues/2669))
 
 Bug fixes:
 
--   Fix port script (PR #2577)
--   Fix error when running synapse with no logfile (PR #2581)
--   Fix UI auth when deleting devices (PR #2591)
--   Fix typo when checking if user is invited to group (PR #2599)
--   Fix the port script to drop NUL values in all tables (PR #2611)
--   Fix appservices being backlogged and not receiving new events due to a bug in `notify_interested_services` (PR #2631) Thanks to @xyzz!
--   Fix updating rooms avatar/display name when modified by admin (PR #2636) Thanks to @farialima!
--   Fix bug in state group storage (PR #2649)
--   Fix 500 on invalid utf-8 in request (PR #2663)
+-   Fix port script. ([\#2577](https://github.com/matrix-org/synapse/issues/2577))
+-   Fix error when running synapse with no logfile. ([\#2581](https://github.com/matrix-org/synapse/issues/2581))
+-   Fix UI auth when deleting devices. ([\#2591](https://github.com/matrix-org/synapse/issues/2591))
+-   Fix typo when checking if user is invited to group. ([\#2599](https://github.com/matrix-org/synapse/issues/2599))
+-   Fix the port script to drop NUL values in all tables. ([\#2611](https://github.com/matrix-org/synapse/issues/2611))
+-   Fix appservices being backlogged and not receiving new events due to a bug in `notify_interested_services`. Thanks to @xyzz! ([\#2631](https://github.com/matrix-org/synapse/issues/2631))
+-   Fix updating rooms avatar/display name when modified by admin. Thanks to @farialima! ([\#2636](https://github.com/matrix-org/synapse/issues/2636))
+-   Fix bug in state group storage. ([\#2649](https://github.com/matrix-org/synapse/issues/2649))
+-   Fix 500 on invalid utf-8 in request. ([\#2663](https://github.com/matrix-org/synapse/issues/2663))
 
 Changes in synapse v0.24.1 (2017-10-24)
 =======================================
 
 Bug fixes:
 
--   Fix updating group profiles over federation (PR #2567)
+-   Fix updating group profiles over federation. ([\#2567](https://github.com/matrix-org/synapse/issues/2567))
 
 Changes in synapse v0.24.0 (2017-10-23)
 =======================================
@@ -1783,31 +1783,31 @@ Changes in synapse v0.24.0-rc1 (2017-10-19)
 
 Features:
 
--   Add Group Server (PR #2352, #2363, #2374, #2377, #2378, #2382, #2410, #2426, #2430, #2454, #2471, #2472, #2544)
--   Add support for channel notifications (PR #2501)
--   Add basic implementation of backup media store (PR #2538)
--   Add config option to auto-join new users to rooms (PR #2545)
+-   Add Group Server ([\#2352](https://github.com/matrix-org/synapse/issues/2352), [\#2363](https://github.com/matrix-org/synapse/issues/2363), [\#2374](https://github.com/matrix-org/synapse/issues/2374), [\#2377](https://github.com/matrix-org/synapse/issues/2377), [\#2378](https://github.com/matrix-org/synapse/issues/2378), [\#2382](https://github.com/matrix-org/synapse/issues/2382), [\#2410](https://github.com/matrix-org/synapse/issues/2410), [\#2426](https://github.com/matrix-org/synapse/issues/2426), [\#2430](https://github.com/matrix-org/synapse/issues/2430), [\#2454](https://github.com/matrix-org/synapse/issues/2454), [\#2471](https://github.com/matrix-org/synapse/issues/2471), [\#2472](https://github.com/matrix-org/synapse/issues/2472), [\#2544](https://github.com/matrix-org/synapse/issues/2544))
+-   Add support for channel notifications. ([\#2501](https://github.com/matrix-org/synapse/issues/2501))
+-   Add basic implementation of backup media store. ([\#2538](https://github.com/matrix-org/synapse/issues/2538))
+-   Add config option to auto-join new users to rooms. ([\#2545](https://github.com/matrix-org/synapse/issues/2545))
 
 Changes:
 
--   Make the spam checker a module (PR #2474)
--   Delete expired url cache data (PR #2478)
--   Ignore incoming events for rooms that we have left (PR #2490)
--   Allow spam checker to reject invites too (PR #2492)
--   Add room creation checks to spam checker (PR #2495)
--   Spam checking: add the invitee to `user_may_invite` (PR #2502)
--   Process events from federation for different rooms in parallel (PR #2520)
--   Allow error strings from spam checker (PR #2531)
--   Improve error handling for missing files in config (PR #2551)
+-   Make the spam checker a module. ([\#2474](https://github.com/matrix-org/synapse/issues/2474))
+-   Delete expired url cache data. ([\#2478](https://github.com/matrix-org/synapse/issues/2478))
+-   Ignore incoming events for rooms that we have left. ([\#2490](https://github.com/matrix-org/synapse/issues/2490))
+-   Allow spam checker to reject invites too. ([\#2492](https://github.com/matrix-org/synapse/issues/2492))
+-   Add room creation checks to spam checker. ([\#2495](https://github.com/matrix-org/synapse/issues/2495))
+-   Spam checking: add the invitee to `user_may_invite`. ([\#2502](https://github.com/matrix-org/synapse/issues/2502))
+-   Process events from federation for different rooms in parallel. ([\#2520](https://github.com/matrix-org/synapse/issues/2520))
+-   Allow error strings from spam checker. ([\#2531](https://github.com/matrix-org/synapse/issues/2531))
+-   Improve error handling for missing files in config. ([\#2551](https://github.com/matrix-org/synapse/issues/2551))
 
 Bug fixes:
 
--   Fix handling SERVFAILs when doing AAAA lookups for federation (PR #2477)
--   Fix incompatibility with newer versions of ujson (PR #2483) Thanks to @jeremycline!
--   Fix notification keywords that start/end with non-word chars (PR #2500)
--   Fix stack overflow and logcontexts from linearizer (PR #2532)
--   Fix 500 error when fields missing from `power_levels` event (PR #2552)
--   Fix 500 error when we get an error handling a PDU (PR #2553)
+-   Fix handling SERVFAILs when doing AAAA lookups for federation. ([\#2477](https://github.com/matrix-org/synapse/issues/2477))
+-   Fix incompatibility with newer versions of ujson. Thanks to @jeremycline! ([\#2483](https://github.com/matrix-org/synapse/issues/2483))
+-   Fix notification keywords that start/end with non-word chars. ([\#2500](https://github.com/matrix-org/synapse/issues/2500))
+-   Fix stack overflow and logcontexts from linearizer. ([\#2532](https://github.com/matrix-org/synapse/issues/2532))
+-   Fix 500 error when fields missing from `power_levels` event. ([\#2552](https://github.com/matrix-org/synapse/issues/2552))
+-   Fix 500 error when we get an error handling a PDU. ([\#2553](https://github.com/matrix-org/synapse/issues/2553))
 
 Changes in synapse v0.23.1 (2017-10-02)
 =======================================
@@ -1826,42 +1826,42 @@ Changes in synapse v0.23.0-rc2 (2017-09-26)
 
 Bug fixes:
 
--   Fix regression in performance of syncs (PR #2470)
+-   Fix regression in performance of syncs. ([\#2470](https://github.com/matrix-org/synapse/issues/2470))
 
 Changes in synapse v0.23.0-rc1 (2017-09-25)
 ===========================================
 
 Features:
 
--   Add a frontend proxy worker (PR #2344)
--   Add support for `event_id_only` push format (PR #2450)
--   Add a PoC for filtering spammy events (PR #2456)
--   Add a config option to block all room invites (PR #2457)
+-   Add a frontend proxy worker. ([\#2344](https://github.com/matrix-org/synapse/issues/2344))
+-   Add support for `event_id_only` push format. ([\#2450](https://github.com/matrix-org/synapse/issues/2450))
+-   Add a PoC for filtering spammy events. ([\#2456](https://github.com/matrix-org/synapse/issues/2456))
+-   Add a config option to block all room invites. ([\#2457](https://github.com/matrix-org/synapse/issues/2457))
 
 Changes:
 
--   Use bcrypt module instead of py-bcrypt (PR #2288) Thanks to @kyrias!
--   Improve performance of generating push notifications (PR #2343, #2357, #2365, #2366, #2371)
--   Improve DB performance for device list handling in sync (PR #2362)
--   Include a sample prometheus config (PR #2416)
--   Document known to work postgres version (PR #2433) Thanks to @ptman!
+-   Use bcrypt module instead of py-bcrypt. Thanks to @kyrias! ([\#2288](https://github.com/matrix-org/synapse/issues/2288))
+-   Improve performance of generating push notifications. ([\#2343](https://github.com/matrix-org/synapse/issues/2343), [\#2357](https://github.com/matrix-org/synapse/issues/2357), [\#2365](https://github.com/matrix-org/synapse/issues/2365), [\#2366](https://github.com/matrix-org/synapse/issues/2366), [\#2371](https://github.com/matrix-org/synapse/issues/2371))
+-   Improve DB performance for device list handling in sync. ([\#2362](https://github.com/matrix-org/synapse/issues/2362))
+-   Include a sample prometheus config. ([\#2416](https://github.com/matrix-org/synapse/issues/2416))
+-   Document known to work postgres version. Thanks to @ptman! ([\#2433](https://github.com/matrix-org/synapse/issues/2433))
 
 Bug fixes:
 
--   Fix caching error in the push evaluator (PR #2332)
--   Fix bug where pusherpool didn't start and broke some rooms (PR #2342)
--   Fix port script for user directory tables (PR #2375)
--   Fix device lists notifications when user rejoins a room (PR #2443, #2449)
--   Fix sync to always send down current state events in timeline (PR #2451)
--   Fix bug where guest users were incorrectly kicked (PR #2453)
--   Fix bug talking to IPv6 only servers using SRV records (PR #2462)
+-   Fix caching error in the push evaluator. ([\#2332](https://github.com/matrix-org/synapse/issues/2332))
+-   Fix bug where pusherpool didn't start and broke some rooms. ([\#2342](https://github.com/matrix-org/synapse/issues/2342))
+-   Fix port script for user directory tables. ([\#2375](https://github.com/matrix-org/synapse/issues/2375))
+-   Fix device lists notifications when user rejoins a room. ([\#2443](https://github.com/matrix-org/synapse/issues/2443), [\#2449](https://github.com/matrix-org/synapse/issues/2449))
+-   Fix sync to always send down current state events in timeline. ([\#2451](https://github.com/matrix-org/synapse/issues/2451))
+-   Fix bug where guest users were incorrectly kicked. ([\#2453](https://github.com/matrix-org/synapse/issues/2453))
+-   Fix bug talking to IPv6 only servers using SRV records. ([\#2462](https://github.com/matrix-org/synapse/issues/2462))
 
 Changes in synapse v0.22.1 (2017-07-06)
 =======================================
 
 Bug fixes:
 
--   Fix bug where pusher pool didn't start and caused issues when interacting with some rooms (PR #2342)
+-   Fix bug where pusher pool didn't start and caused issues when interacting with some rooms. ([\#2342](https://github.com/matrix-org/synapse/issues/2342))
 
 Changes in synapse v0.22.0 (2017-07-06)
 =======================================
@@ -1873,49 +1873,49 @@ Changes in synapse v0.22.0-rc2 (2017-07-04)
 
 Changes:
 
--   Improve performance of storing user IPs (PR #2307, #2308)
--   Slightly improve performance of verifying access tokens (PR #2320)
--   Slightly improve performance of event persistence (PR #2321)
--   Increase default cache factor size from 0.1 to 0.5 (PR #2330)
+-   Improve performance of storing user IPs. ([\#2307](https://github.com/matrix-org/synapse/issues/2307), [\#2308](https://github.com/matrix-org/synapse/issues/2308))
+-   Slightly improve performance of verifying access tokens. ([\#2320](https://github.com/matrix-org/synapse/issues/2320))
+-   Slightly improve performance of event persistence. ([\#2321](https://github.com/matrix-org/synapse/issues/2321))
+-   Increase default cache factor size from 0.1 to 0.5. ([\#2330](https://github.com/matrix-org/synapse/issues/2330))
 
 Bug fixes:
 
--   Fix bug with storing registration sessions that caused frequent CPU churn (PR #2319)
+-   Fix bug with storing registration sessions that caused frequent CPU churn. ([\#2319](https://github.com/matrix-org/synapse/issues/2319))
 
 Changes in synapse v0.22.0-rc1 (2017-06-26)
 ===========================================
 
 Features:
 
--   Add a user directory API (PR #2252, and many more)
--   Add shutdown room API to remove room from local server (PR #2291)
--   Add API to quarantine media (PR #2292)
--   Add new config option to not send event contents to push servers (PR #2301) Thanks to @cjdelisle!
+-   Add a user directory API ([\#2252](https://github.com/matrix-org/synapse/issues/2252), and many more)
+-   Add shutdown room API to remove room from local server. ([\#2291](https://github.com/matrix-org/synapse/issues/2291))
+-   Add API to quarantine media. ([\#2292](https://github.com/matrix-org/synapse/issues/2292))
+-   Add new config option to not send event contents to push servers. Thanks to @cjdelisle! ([\#2301](https://github.com/matrix-org/synapse/issues/2301))
 
 Changes:
 
--   Various performance fixes (PR #2177, #2233, #2230, #2238, #2248, #2256, #2274)
--   Deduplicate sync filters (PR #2219) Thanks to @krombel!
--   Correct a typo in UPGRADE.rst (PR #2231) Thanks to @aaronraimist!
--   Add count of one time keys to sync stream (PR #2237)
--   Only store `event_auth` for state events (PR #2247)
--   Store URL cache preview downloads separately (PR #2299)
+-   Various performance fixes. ([\#2177](https://github.com/matrix-org/synapse/issues/2177), [\#2233](https://github.com/matrix-org/synapse/issues/2233), [\#2230](https://github.com/matrix-org/synapse/issues/2230), [\#2238](https://github.com/matrix-org/synapse/issues/2238), [\#2248](https://github.com/matrix-org/synapse/issues/2248), [\#2256](https://github.com/matrix-org/synapse/issues/2256), [\#2274](https://github.com/matrix-org/synapse/issues/2274))
+-   Deduplicate sync filters. Thanks to @krombel! ([\#2219](https://github.com/matrix-org/synapse/issues/2219))
+-   Correct a typo in UPGRADE.rst. Thanks to @aaronraimist! ([\#2231](https://github.com/matrix-org/synapse/issues/2231))
+-   Add count of one time keys to sync stream. ([\#2237](https://github.com/matrix-org/synapse/issues/2237))
+-   Only store `event_auth` for state events. ([\#2247](https://github.com/matrix-org/synapse/issues/2247))
+-   Store URL cache preview downloads separately. ([\#2299](https://github.com/matrix-org/synapse/issues/2299))
 
 Bug fixes:
 
--   Fix users not getting notifications when AS listened to that `user_id` (PR #2216) Thanks to @slipeer!
--   Fix users without push set up not getting notifications after joining rooms (PR #2236)
--   Fix preview url API to trim long descriptions (PR #2243)
--   Fix bug where we used cached but unpersisted state group as prev group, resulting in broken state of restart (PR #2263)
--   Fix removing of pushers when using workers (PR #2267)
--   Fix CORS headers to allow Authorization header (PR #2285) Thanks to @krombel!
+-   Fix users not getting notifications when AS listened to that `user_id`. Thanks to @slipeer! ([\#2216](https://github.com/matrix-org/synapse/issues/2216))
+-   Fix users without push set up not getting notifications after joining rooms. ([\#2236](https://github.com/matrix-org/synapse/issues/2236))
+-   Fix preview url API to trim long descriptions. ([\#2243](https://github.com/matrix-org/synapse/issues/2243))
+-   Fix bug where we used cached but unpersisted state group as prev group, resulting in broken state of restart. ([\#2263](https://github.com/matrix-org/synapse/issues/2263))
+-   Fix removing of pushers when using workers. ([\#2267](https://github.com/matrix-org/synapse/issues/2267))
+-   Fix CORS headers to allow Authorization header. Thanks to @krombel! ([\#2285](https://github.com/matrix-org/synapse/issues/2285))
 
 Changes in synapse v0.21.1 (2017-06-15)
 =======================================
 
 Bug fixes:
 
--   Fix bug in anonymous usage statistic reporting (PR #2281)
+-   Fix bug in anonymous usage statistic reporting. ([\#2281](https://github.com/matrix-org/synapse/issues/2281))
 
 Changes in synapse v0.21.0 (2017-05-18)
 =======================================
@@ -1927,116 +1927,116 @@ Changes in synapse v0.21.0-rc3 (2017-05-17)
 
 Features:
 
--   Add per user rate-limiting overrides (PR #2208)
--   Add config option to limit maximum number of events requested by `/sync` and `/messages` (PR #2221) Thanks to @psaavedra!
+-   Add per user rate-limiting overrides. ([\#2208](https://github.com/matrix-org/synapse/issues/2208))
+-   Add config option to limit maximum number of events requested by `/sync` and `/messages`. Thanks to @psaavedra! ([\#2221](https://github.com/matrix-org/synapse/issues/2221))
 
 Changes:
 
--   Various small performance fixes (PR #2201, #2202, #2224, #2226, #2227, #2228, #2229)
--   Update username availability checker API (PR #2209, #2213)
--   When purging, Don't de-delta state groups we're about to delete (PR #2214)
--   Documentation to check synapse version (PR #2215) Thanks to @hamber-dick!
--   Add an index to `event_search` to speed up purge history API (PR #2218)
+-   Various small performance fixes. ([\#2201](https://github.com/matrix-org/synapse/issues/2201), [\#2202](https://github.com/matrix-org/synapse/issues/2202), [\#2224](https://github.com/matrix-org/synapse/issues/2224), [\#2226](https://github.com/matrix-org/synapse/issues/2226), [\#2227](https://github.com/matrix-org/synapse/issues/2227), [\#2228](https://github.com/matrix-org/synapse/issues/2228), [\#2229](https://github.com/matrix-org/synapse/issues/2229))
+-   Update username availability checker API. ([\#2209](https://github.com/matrix-org/synapse/issues/2209), [\#2213](https://github.com/matrix-org/synapse/issues/2213))
+-   When purging, Don't de-delta state groups we're about to delete. ([\#2214](https://github.com/matrix-org/synapse/issues/2214))
+-   Documentation to check synapse version. Thanks to @hamber-dick! ([\#2215](https://github.com/matrix-org/synapse/issues/2215))
+-   Add an index to `event_search` to speed up purge history API. ([\#2218](https://github.com/matrix-org/synapse/issues/2218))
 
 Bug fixes:
 
--   Fix API to allow clients to upload one-time-keys with new sigs (PR #2206)
+-   Fix API to allow clients to upload one-time-keys with new sigs. ([\#2206](https://github.com/matrix-org/synapse/issues/2206))
 
 Changes in synapse v0.21.0-rc2 (2017-05-08)
 ===========================================
 
 Changes:
 
--   Always mark remotes as up if we receive a signed request from them (PR #2190)
+-   Always mark remotes as up if we receive a signed request from them. ([\#2190](https://github.com/matrix-org/synapse/issues/2190))
 
 Bug fixes:
 
--   Fix bug where users got pushed for rooms they had muted (PR #2200)
+-   Fix bug where users got pushed for rooms they had muted. ([\#2200](https://github.com/matrix-org/synapse/issues/2200))
 
 Changes in synapse v0.21.0-rc1 (2017-05-08)
 ===========================================
 
 Features:
 
--   Add username availability checker API (PR #2183)
--   Add read marker API (PR #2120)
+-   Add username availability checker API. ([\#2183](https://github.com/matrix-org/synapse/issues/2183))
+-   Add read marker API. ([\#2120](https://github.com/matrix-org/synapse/issues/2120))
 
 Changes:
 
--   Enable guest access for the 3pl/3pid APIs (PR #1986)
--   Add setting to support TURN for guests (PR #2011)
--   Various performance improvements (PR #2075, #2076, #2080, #2083, #2108, #2158, #2176, #2185)
--   Make synctl a bit more user friendly (PR #2078, #2127) Thanks @APwhitehat!
--   Replace HTTP replication with TCP replication (PR #2082, #2097, #2098, #2099, #2103, #2014, #2016, #2115, #2116, #2117)
--   Support authenticated SMTP (PR #2102) Thanks @DanielDent!
--   Add a counter metric for successfully-sent transactions (PR #2121)
--   Propagate errors sensibly from proxied IS requests (PR #2147)
--   Add more granular event send metrics (PR #2178)
+-   Enable guest access for the 3pl/3pid APIs. ([\#1986](https://github.com/matrix-org/synapse/issues/1986))
+-   Add setting to support TURN for guests. ([\#2011](https://github.com/matrix-org/synapse/issues/2011))
+-   Various performance improvements. ([\#2075](https://github.com/matrix-org/synapse/issues/2075), [\#2076](https://github.com/matrix-org/synapse/issues/2076), [\#2080](https://github.com/matrix-org/synapse/issues/2080), [\#2083](https://github.com/matrix-org/synapse/issues/2083), [\#2108](https://github.com/matrix-org/synapse/issues/2108), [\#2158](https://github.com/matrix-org/synapse/issues/2158), [\#2176](https://github.com/matrix-org/synapse/issues/2176), [\#2185](https://github.com/matrix-org/synapse/issues/2185))
+-   Make synctl a bit more user friendly. ([\#2078](https://github.com/matrix-org/synapse/issues/2078), [\#2127](https://github.com/matrix-org/synapse/issues/2127)) Thanks @APwhitehat!
+-   Replace HTTP replication with TCP replication. ([\#2082](https://github.com/matrix-org/synapse/issues/2082), [\#2097](https://github.com/matrix-org/synapse/issues/2097), [\#2098](https://github.com/matrix-org/synapse/issues/2098), [\#2099](https://github.com/matrix-org/synapse/issues/2099), [\#2103](https://github.com/matrix-org/synapse/issues/2103), [\#2014](https://github.com/matrix-org/synapse/issues/2014), [\#2016](https://github.com/matrix-org/synapse/issues/2016), [\#2115](https://github.com/matrix-org/synapse/issues/2115), [\#2116](https://github.com/matrix-org/synapse/issues/2116), [\#2117](https://github.com/matrix-org/synapse/issues/2117))
+-   Support authenticated SMTP. Thanks @DanielDent! ([\#2102](https://github.com/matrix-org/synapse/issues/2102))
+-   Add a counter metric for successfully-sent transactions. ([\#2121](https://github.com/matrix-org/synapse/issues/2121))
+-   Propagate errors sensibly from proxied IS requests. ([\#2147](https://github.com/matrix-org/synapse/issues/2147))
+-   Add more granular event send metrics. ([\#2178](https://github.com/matrix-org/synapse/issues/2178))
 
 Bug fixes:
 
--   Fix nuke-room script to work with current schema (PR #1927) Thanks @zuckschwerdt!
--   Fix db port script to not assume postgres tables are in the public schema (PR #2024) Thanks @jerrykan!
--   Fix getting latest device IP for user with no devices (PR #2118)
--   Fix rejection of invites to unreachable servers (PR #2145)
--   Fix code for reporting old verify keys in synapse (PR #2156)
--   Fix invite state to always include all events (PR #2163)
--   Fix bug where synapse would always fetch state for any missing event (PR #2170)
--   Fix a leak with timed out HTTP connections (PR #2180)
--   Fix bug where we didn't time out HTTP requests to ASes (PR #2192)
+-   Fix nuke-room script to work with current schema. Thanks @zuckschwerdt! ([\#1927](https://github.com/matrix-org/synapse/issues/1927))
+-   Fix db port script to not assume postgres tables are in the public schema. Thanks @jerrykan! ([\#2024](https://github.com/matrix-org/synapse/issues/2024))
+-   Fix getting latest device IP for user with no devices. ([\#2118](https://github.com/matrix-org/synapse/issues/2118))
+-   Fix rejection of invites to unreachable servers. ([\#2145](https://github.com/matrix-org/synapse/issues/2145))
+-   Fix code for reporting old verify keys in synapse. ([\#2156](https://github.com/matrix-org/synapse/issues/2156))
+-   Fix invite state to always include all events. ([\#2163](https://github.com/matrix-org/synapse/issues/2163))
+-   Fix bug where synapse would always fetch state for any missing event. ([\#2170](https://github.com/matrix-org/synapse/issues/2170))
+-   Fix a leak with timed out HTTP connections. ([\#2180](https://github.com/matrix-org/synapse/issues/2180))
+-   Fix bug where we didn't time out HTTP requests to ASes. ([\#2192](https://github.com/matrix-org/synapse/issues/2192))
 
 Docs:
 
--   Clarify doc for SQLite to PostgreSQL port (PR #1961) Thanks @benhylau!
--   Fix typo in synctl help (PR #2107) Thanks @HarHarLinks!
--   `web_client_location` documentation fix (PR #2131) Thanks @matthewjwolff!
--   Update README.rst with FreeBSD changes (PR #2132) Thanks @feld!
--   Clarify setting up metrics (PR #2149) Thanks @encks!
+-   Clarify doc for SQLite to PostgreSQL port. Thanks @benhylau! ([\#1961](https://github.com/matrix-org/synapse/issues/1961))
+-   Fix typo in synctl help. Thanks @HarHarLinks! ([\#2107](https://github.com/matrix-org/synapse/issues/2107))
+-   `web_client_location` documentation fix. Thanks @matthewjwolff! ([\#2131](https://github.com/matrix-org/synapse/issues/2131))
+-   Update README.rst with FreeBSD changes. Thanks @feld! ([\#2132](https://github.com/matrix-org/synapse/issues/2132))
+-   Clarify setting up metrics. Thanks @encks! ([\#2149](https://github.com/matrix-org/synapse/issues/2149))
 
 Changes in synapse v0.20.0 (2017-04-11)
 =======================================
 
 Bug fixes:
 
--   Fix joining rooms over federation where not all servers in the room saw the new server had joined (PR #2094)
+-   Fix joining rooms over federation where not all servers in the room saw the new server had joined. ([\#2094](https://github.com/matrix-org/synapse/issues/2094))
 
 Changes in synapse v0.20.0-rc1 (2017-03-30)
 ===========================================
 
 Features:
 
--   Add `delete_devices` API (PR #1993)
--   Add phone number registration/login support (PR #1994, #2055)
+-   Add `delete_devices` API. ([\#1993](https://github.com/matrix-org/synapse/issues/1993))
+-   Add phone number registration/login support. ([\#1994](https://github.com/matrix-org/synapse/issues/1994), [\#2055](https://github.com/matrix-org/synapse/issues/2055))
 
 Changes:
 
--   Use JSONSchema for validation of filters. Thanks @pik! (PR #1783)
--   Reread log config on SIGHUP (PR #1982)
--   Speed up public room list (PR #1989)
--   Add helpful texts to logger config options (PR #1990)
--   Minor `/sync` performance improvements. (PR #2002, #2013, #2022)
--   Add some debug to help diagnose weird federation issue (PR #2035)
--   Correctly limit retries for all federation requests (PR #2050, #2061)
--   Don't lock table when persisting new one time keys (PR #2053)
--   Reduce some CPU work on DB threads (PR #2054)
--   Cache hosts in room (PR #2060)
--   Batch sending of device list pokes (PR #2063)
--   Speed up persist event path in certain edge cases (PR #2070)
+-   Use JSONSchema for validation of filters. Thanks @pik! ([\#1783](https://github.com/matrix-org/synapse/issues/1783))
+-   Reread log config on SIGHUP. ([\#1982](https://github.com/matrix-org/synapse/issues/1982))
+-   Speed up public room list. ([\#1989](https://github.com/matrix-org/synapse/issues/1989))
+-   Add helpful texts to logger config options. ([\#1990](https://github.com/matrix-org/synapse/issues/1990))
+-   Minor `/sync` performance improvements. ([\#2002](https://github.com/matrix-org/synapse/issues/2002), [\#2013](https://github.com/matrix-org/synapse/issues/2013), [\#2022](https://github.com/matrix-org/synapse/issues/2022))
+-   Add some debug to help diagnose weird federation issue. ([\#2035](https://github.com/matrix-org/synapse/issues/2035))
+-   Correctly limit retries for all federation requests. ([\#2050](https://github.com/matrix-org/synapse/issues/2050), [\#2061](https://github.com/matrix-org/synapse/issues/2061))
+-   Don't lock table when persisting new one time keys. ([\#2053](https://github.com/matrix-org/synapse/issues/2053))
+-   Reduce some CPU work on DB threads. ([\#2054](https://github.com/matrix-org/synapse/issues/2054))
+-   Cache hosts in room. ([\#2060](https://github.com/matrix-org/synapse/issues/2060))
+-   Batch sending of device list pokes. ([\#2063](https://github.com/matrix-org/synapse/issues/2063))
+-   Speed up persist event path in certain edge cases. ([\#2070](https://github.com/matrix-org/synapse/issues/2070))
 
 Bug fixes:
 
--   Fix bug where `current_state_events` renamed to `current_state_ids` (PR #1849)
--   Fix routing loop when fetching remote media (PR #1992)
--   Fix `current_state_events` table to not lie (PR #1996)
--   Fix CAS login to handle PartialDownloadError (PR #1997)
--   Fix assertion to stop transaction queue getting wedged (PR #2010)
--   Fix presence to fallback to `last_active_ts` if it beats the last sync time. Thanks @Half-Shot! (PR #2014)
--   Fix bug when federation received a PDU while a room join is in progress (PR #2016)
--   Fix resetting state on rejected events (PR #2025)
--   Fix installation issues in readme. Thanks @ricco386 (PR #2037)
--   Fix caching of remote servers' signature keys (PR #2042)
--   Fix some leaking log context (PR #2048, #2049, #2057, #2058)
--   Fix rejection of invites not reaching sync (PR #2056)
+-   Fix bug where `current_state_events` renamed to `current_state_ids`. ([\#1849](https://github.com/matrix-org/synapse/issues/1849))
+-   Fix routing loop when fetching remote media. ([\#1992](https://github.com/matrix-org/synapse/issues/1992))
+-   Fix `current_state_events` table to not lie. ([\#1996](https://github.com/matrix-org/synapse/issues/1996))
+-   Fix CAS login to handle PartialDownloadError. ([\#1997](https://github.com/matrix-org/synapse/issues/1997))
+-   Fix assertion to stop transaction queue getting wedged. ([\#2010](https://github.com/matrix-org/synapse/issues/2010))
+-   Fix presence to fallback to `last_active_ts` if it beats the last sync time. Thanks @Half-Shot! ([\#2014](https://github.com/matrix-org/synapse/issues/2014))
+-   Fix bug when federation received a PDU while a room join is in progress. ([\#2016](https://github.com/matrix-org/synapse/issues/2016))
+-   Fix resetting state on rejected events. ([\#2025](https://github.com/matrix-org/synapse/issues/2025))
+-   Fix installation issues in readme. Thanks @ricco386. ([\#2037](https://github.com/matrix-org/synapse/issues/2037))
+-   Fix caching of remote servers' signature keys. ([\#2042](https://github.com/matrix-org/synapse/issues/2042))
+-   Fix some leaking log context. ([\#2048](https://github.com/matrix-org/synapse/issues/2048), [\#2049](https://github.com/matrix-org/synapse/issues/2049), [\#2057](https://github.com/matrix-org/synapse/issues/2057), [\#2058](https://github.com/matrix-org/synapse/issues/2058))
+-   Fix rejection of invites not reaching sync. ([\#2056](https://github.com/matrix-org/synapse/issues/2056))
 
 Changes in synapse v0.19.3 (2017-03-20)
 =======================================
@@ -2055,36 +2055,36 @@ Changes in synapse v0.19.3-rc1 (2017-03-08)
 
 Features:
 
--   Add some administration functionalities. Thanks to morteza-araby! (PR #1784)
+-   Add some administration functionalities. Thanks to morteza-araby! ([\#1784](https://github.com/matrix-org/synapse/issues/1784))
 
 Changes:
 
--   Reduce database table sizes (PR #1873, #1916, #1923, #1963)
--   Update contrib/ to not use syutil. Thanks to andrewshadura! (PR #1907)
--   Don't fetch current state when sending an event in common case (PR #1955)
+-   Reduce database table sizes. ([\#1873](https://github.com/matrix-org/synapse/issues/1873), [\#1916](https://github.com/matrix-org/synapse/issues/1916), [\#1923](https://github.com/matrix-org/synapse/issues/1923), [\#1963](https://github.com/matrix-org/synapse/issues/1963))
+-   Update contrib/ to not use syutil. Thanks to andrewshadura! ([\#1907](https://github.com/matrix-org/synapse/issues/1907))
+-   Don't fetch current state when sending an event in common case. ([\#1955](https://github.com/matrix-org/synapse/issues/1955))
 
 Bug fixes:
 
--   Fix synapse_port_db failure. Thanks to Pneumaticat! (PR #1904)
--   Fix caching to not cache error responses (PR #1913)
--   Fix APIs to make kick & ban reasons work (PR #1917)
--   Fix bugs in the /keys/changes api (PR #1921)
--   Fix bug where users couldn't forget rooms they were banned from (PR #1922)
--   Fix issue with long language values in pushers API (PR #1925)
--   Fix a race in transaction queue (PR #1930)
--   Fix dynamic thumbnailing to preserve aspect ratio. Thanks to jkolo! (PR #1945)
--   Fix device list update to not constantly resync (PR #1964)
--   Fix potential for huge memory usage when getting device that have changed (PR #1969)
+-   Fix synapse_port_db failure. Thanks to Pneumaticat! ([\#1904](https://github.com/matrix-org/synapse/issues/1904))
+-   Fix caching to not cache error responses. ([\#1913](https://github.com/matrix-org/synapse/issues/1913))
+-   Fix APIs to make kick & ban reasons work. ([\#1917](https://github.com/matrix-org/synapse/issues/1917))
+-   Fix bugs in the /keys/changes api. ([\#1921](https://github.com/matrix-org/synapse/issues/1921))
+-   Fix bug where users couldn't forget rooms they were banned from. ([\#1922](https://github.com/matrix-org/synapse/issues/1922))
+-   Fix issue with long language values in pushers API. ([\#1925](https://github.com/matrix-org/synapse/issues/1925))
+-   Fix a race in transaction queue. ([\#1930](https://github.com/matrix-org/synapse/issues/1930))
+-   Fix dynamic thumbnailing to preserve aspect ratio. Thanks to jkolo! ([\#1945](https://github.com/matrix-org/synapse/issues/1945))
+-   Fix device list update to not constantly resync. ([\#1964](https://github.com/matrix-org/synapse/issues/1964))
+-   Fix potential for huge memory usage when getting device that have changed. ([\#1969](https://github.com/matrix-org/synapse/issues/1969))
 
 Changes in synapse v0.19.2 (2017-02-20)
 =======================================
 
--   Fix bug with event visibility check in /context/ API. Thanks to Tokodomo for pointing it out! (PR #1929)
+-   Fix bug with event visibility check in /context/ API. Thanks to Tokodomo for pointing it out! ([\#1929](https://github.com/matrix-org/synapse/issues/1929))
 
 Changes in synapse v0.19.1 (2017-02-09)
 =======================================
 
--   Fix bug where state was incorrectly reset in a room when synapse received an event over federation that did not pass auth checks (PR #1892)
+-   Fix bug where state was incorrectly reset in a room when synapse received an event over federation that did not pass auth checks. ([\#1892](https://github.com/matrix-org/synapse/issues/1892))
 
 Changes in synapse v0.19.0 (2017-02-04)
 =======================================
@@ -2094,59 +2094,59 @@ No changes since RC 4.
 Changes in synapse v0.19.0-rc4 (2017-02-02)
 ===========================================
 
--   Bump cache sizes for common membership queries (PR #1879)
+-   Bump cache sizes for common membership queries. ([\#1879](https://github.com/matrix-org/synapse/issues/1879))
 
 Changes in synapse v0.19.0-rc3 (2017-02-02)
 ===========================================
 
--   Fix email push in pusher worker (PR #1875)
--   Make `presence.get_new_events` a bit faster (PR #1876)
--   Make /keys/changes a bit more performant (PR #1877)
+-   Fix email push in pusher worker. ([\#1875](https://github.com/matrix-org/synapse/issues/1875))
+-   Make `presence.get_new_events` a bit faster. ([\#1876](https://github.com/matrix-org/synapse/issues/1876))
+-   Make /keys/changes a bit more performant. ([\#1877](https://github.com/matrix-org/synapse/issues/1877))
 
 Changes in synapse v0.19.0-rc2 (2017-02-02)
 ===========================================
 
--   Include newly joined users in /keys/changes API (PR #1872)
+-   Include newly joined users in /keys/changes API. ([\#1872](https://github.com/matrix-org/synapse/issues/1872))
 
 Changes in synapse v0.19.0-rc1 (2017-02-02)
 ===========================================
 
 Features:
 
--   Add support for specifying multiple bind addresses (PR #1709, #1712, #1795, #1835). Thanks to @kyrias!
--   Add /account/3pid/delete endpoint (PR #1714)
--   Add config option to configure the Riot URL used in notification emails (PR #1811). Thanks to @aperezdc!
--   Add username and password config options for turn server (PR #1832). Thanks to @xsteadfastx!
--   Implement device lists updates over federation (PR #1857, #1861, #1864)
--   Implement /keys/changes (PR #1869, #1872)
+-   Add support for specifying multiple bind addresses. Thanks to @kyrias! ([\#1709](https://github.com/matrix-org/synapse/issues/1709), [\#1712](https://github.com/matrix-org/synapse/issues/1712), [\#1795](https://github.com/matrix-org/synapse/issues/1795), [\#1835](https://github.com/matrix-org/synapse/issues/1835))
+-   Add /account/3pid/delete endpoint. ([\#1714](https://github.com/matrix-org/synapse/issues/1714))
+-   Add config option to configure the Riot URL used in notification emails. Thanks to @aperezdc! ([\#1811](https://github.com/matrix-org/synapse/issues/1811))
+-   Add username and password config options for turn server. Thanks to @xsteadfastx! ([\#1832](https://github.com/matrix-org/synapse/issues/1832))
+-   Implement device lists updates over federation. ([\#1857](https://github.com/matrix-org/synapse/issues/1857), [\#1861](https://github.com/matrix-org/synapse/issues/1861), [\#1864](https://github.com/matrix-org/synapse/issues/1864))
+-   Implement /keys/changes. ([\#1869](https://github.com/matrix-org/synapse/issues/1869), [\#1872](https://github.com/matrix-org/synapse/issues/1872))
 
 Changes:
 
--   Improve IPv6 support (PR #1696). Thanks to @kyrias and @glyph!
--   Log which files we saved attachments to in the `media_repository` (PR #1791)
--   Linearize updates to membership via PUT /state/ to better handle multiple joins (PR #1787)
--   Limit number of entries to prefill from cache on startup (PR #1792)
--   Remove `full_twisted_stacktraces` option (PR #1802)
--   Measure size of some caches by sum of the size of cached values (PR #1815)
--   Measure metrics of `string_cache` (PR #1821)
--   Reduce logging verbosity (PR #1822, #1823, #1824)
--   Don't clobber a displayname or `avatar_url` if provided by an m.room.member event (PR #1852)
--   Better handle 401/404 response for federation /send/ (PR #1866, #1871)
+-   Improve IPv6 support. Thanks to @kyrias and @glyph! ([\#1696](https://github.com/matrix-org/synapse/issues/1696))
+-   Log which files we saved attachments to in the `media_repository`. ([\#1791](https://github.com/matrix-org/synapse/issues/1791))
+-   Linearize updates to membership via PUT /state/ to better handle multiple joins. ([\#1787](https://github.com/matrix-org/synapse/issues/1787))
+-   Limit number of entries to prefill from cache on startup. ([\#1792](https://github.com/matrix-org/synapse/issues/1792))
+-   Remove `full_twisted_stacktraces` option. ([\#1802](https://github.com/matrix-org/synapse/issues/1802))
+-   Measure size of some caches by sum of the size of cached values. ([\#1815](https://github.com/matrix-org/synapse/issues/1815))
+-   Measure metrics of `string_cache`. ([\#1821](https://github.com/matrix-org/synapse/issues/1821))
+-   Reduce logging verbosity. ([\#1822](https://github.com/matrix-org/synapse/issues/1822), [\#1823](https://github.com/matrix-org/synapse/issues/1823), [\#1824](https://github.com/matrix-org/synapse/issues/1824))
+-   Don't clobber a displayname or `avatar_url` if provided by an m.room.member event. ([\#1852](https://github.com/matrix-org/synapse/issues/1852))
+-   Better handle 401/404 response for federation /send/. ([\#1866](https://github.com/matrix-org/synapse/issues/1866), [\#1871](https://github.com/matrix-org/synapse/issues/1871))
 
 Fixes:
 
--   Fix ability to change password to a non-ascii one (PR #1711)
--   Fix push getting stuck due to looking at the wrong view of state (PR #1820)
--   Fix email address comparison to be case insensitive (PR #1827)
--   Fix occasional inconsistencies of room membership (PR #1836, #1840)
+-   Fix ability to change password to a non-ascii one. ([\#1711](https://github.com/matrix-org/synapse/issues/1711))
+-   Fix push getting stuck due to looking at the wrong view of state. ([\#1820](https://github.com/matrix-org/synapse/issues/1820))
+-   Fix email address comparison to be case insensitive. ([\#1827](https://github.com/matrix-org/synapse/issues/1827))
+-   Fix occasional inconsistencies of room membership. ([\#1836](https://github.com/matrix-org/synapse/issues/1836), [\#1840](https://github.com/matrix-org/synapse/issues/1840))
 
 Performance:
 
--   Don't block messages sending on bumping presence (PR #1789)
--   Change `device_inbox` stream index to include user (PR #1793)
--   Optimise state resolution (PR #1818)
--   Use DB cache of joined users for presence (PR #1862)
--   Add an index to make membership queries faster (PR #1867)
+-   Don't block messages sending on bumping presence. ([\#1789](https://github.com/matrix-org/synapse/issues/1789))
+-   Change `device_inbox` stream index to include user. ([\#1793](https://github.com/matrix-org/synapse/issues/1793))
+-   Optimise state resolution. ([\#1818](https://github.com/matrix-org/synapse/issues/1818))
+-   Use DB cache of joined users for presence. ([\#1862](https://github.com/matrix-org/synapse/issues/1862))
+-   Add an index to make membership queries faster. ([\#1867](https://github.com/matrix-org/synapse/issues/1867))
 
 Changes in synapse v0.18.7 (2017-01-09)
 =======================================
@@ -2165,79 +2165,79 @@ Changes in synapse v0.18.7-rc1 (2017-01-06)
 
 Bug fixes:
 
--   Fix error in \#PR 1764 to actually fix the nightmare \#1753 bug.
+-   Fix error in [\#1764](https://github.com/matrix-org/synapse/issues/1764) to actually fix the nightmare [\#1753](https://github.com/matrix-org/synapse/issues/1753) bug.
 -   Improve deadlock logging further
--   Discard inbound federation traffic from invalid domains, to immunise against \#1753
+-   Discard inbound federation traffic from invalid domains, to immunise against [\#1753](https://github.com/matrix-org/synapse/issues/1753).
 
 Changes in synapse v0.18.6 (2017-01-06)
 =======================================
 
 Bug fixes:
 
--   Fix bug when checking if a guest user is allowed to join a room (PR #1772) Thanks to Patrik Oldsberg for diagnosing and the fix!
+-   Fix bug when checking if a guest user is allowed to join a room. Thanks to Patrik Oldsberg for diagnosing and the fix! ([\#1772](https://github.com/matrix-org/synapse/issues/1772))
 
 Changes in synapse v0.18.6-rc3 (2017-01-05)
 ===========================================
 
 Bug fixes:
 
--   Fix bug where we failed to send ban events to the banned server (PR #1758)
--   Fix bug where we sent event that didn't originate on this server to other servers (PR #1764)
--   Fix bug where processing an event from a remote server took a long time because we were making long HTTP requests (PR #1765, PR #1744)
+-   Fix bug where we failed to send ban events to the banned server. ([\#1758](https://github.com/matrix-org/synapse/issues/1758))
+-   Fix bug where we sent event that didn't originate on this server to other servers. ([\#1764](https://github.com/matrix-org/synapse/issues/1764))
+-   Fix bug where processing an event from a remote server took a long time because we were making long HTTP requests. ([\#1765](https://github.com/matrix-org/synapse/issues/1765), [\#1744](https://github.com/matrix-org/synapse/issues/1744))
 
 Changes:
 
--   Improve logging for debugging deadlocks (PR #1766, PR #1767)
+-   Improve logging for debugging deadlocks. ([\#1766](https://github.com/matrix-org/synapse/issues/1766), [\#1767](https://github.com/matrix-org/synapse/issues/1767))
 
 Changes in synapse v0.18.6-rc2 (2016-12-30)
 ===========================================
 
 Bug fixes:
 
--   Fix memory leak in twisted by initialising logging correctly (PR #1731)
--   Fix bug where fetching missing events took an unacceptable amount of time in large rooms (PR #1734)
+-   Fix memory leak in twisted by initialising logging correctly. ([\#1731](https://github.com/matrix-org/synapse/issues/1731))
+-   Fix bug where fetching missing events took an unacceptable amount of time in large rooms. ([\#1734](https://github.com/matrix-org/synapse/issues/1734))
 
 Changes in synapse v0.18.6-rc1 (2016-12-29)
 ===========================================
 
 Bug fixes:
 
--   Make sure that outbound connections are closed (PR #1725)
+-   Make sure that outbound connections are closed. ([\#1725](https://github.com/matrix-org/synapse/issues/1725))
 
 Changes in synapse v0.18.5 (2016-12-16)
 =======================================
 
 Bug fixes:
 
--   Fix federation /backfill returning events it shouldn't (PR #1700)
--   Fix crash in url preview (PR #1701)
+-   Fix federation /backfill returning events it shouldn't. ([\#1700](https://github.com/matrix-org/synapse/issues/1700))
+-   Fix crash in url preview. ([\#1701](https://github.com/matrix-org/synapse/issues/1701))
 
 Changes in synapse v0.18.5-rc3 (2016-12-13)
 ===========================================
 
 Features:
 
--   Add support for E2E for guests (PR #1653)
--   Add new API appservice specific public room list (PR #1676)
--   Add new room membership APIs (PR #1680)
+-   Add support for E2E for guests. ([\#1653](https://github.com/matrix-org/synapse/issues/1653))
+-   Add new API appservice specific public room list. ([\#1676](https://github.com/matrix-org/synapse/issues/1676))
+-   Add new room membership APIs. ([\#1680](https://github.com/matrix-org/synapse/issues/1680))
 
 Changes:
 
--   Enable guest access for private rooms by default (PR #653)
--   Limit the number of events that can be created on a given room concurrently (PR #1620)
--   Log the args that we have on UI auth completion (PR #1649)
--   Stop generating `refresh_tokens` (PR #1654)
--   Stop putting a time caveat on access tokens (PR #1656)
--   Remove unspecced GET endpoints for e2e keys (PR #1694)
+-   Enable guest access for private rooms by default. ([\#653](https://github.com/matrix-org/synapse/issues/653))
+-   Limit the number of events that can be created on a given room concurrently. ([\#1620](https://github.com/matrix-org/synapse/issues/1620))
+-   Log the args that we have on UI auth completion. ([\#1649](https://github.com/matrix-org/synapse/issues/1649))
+-   Stop generating `refresh_tokens`. ([\#1654](https://github.com/matrix-org/synapse/issues/1654))
+-   Stop putting a time caveat on access tokens. ([\#1656](https://github.com/matrix-org/synapse/issues/1656))
+-   Remove unspecced GET endpoints for e2e keys. ([\#1694](https://github.com/matrix-org/synapse/issues/1694))
 
 Bug fixes:
 
--   Fix handling of 500 and 429's over federation (PR #1650)
--   Fix Content-Type header parsing (PR #1660)
--   Fix error when previewing sites that include unicode, thanks to kyrias (PR #1664)
--   Fix some cases where we drop read receipts (PR #1678)
--   Fix bug where calls to `/sync` didn't correctly timeout (PR #1683)
--   Fix bug where E2E key query would fail if a single remote host failed (PR #1686)
+-   Fix handling of 500 and 429's over federation. ([\#1650](https://github.com/matrix-org/synapse/issues/1650))
+-   Fix Content-Type header parsing. ([\#1660](https://github.com/matrix-org/synapse/issues/1660))
+-   Fix error when previewing sites that include unicode, thanks to kyrias. ([\#1664](https://github.com/matrix-org/synapse/issues/1664))
+-   Fix some cases where we drop read receipts. ([\#1678](https://github.com/matrix-org/synapse/issues/1678))
+-   Fix bug where calls to `/sync` didn't correctly timeout. ([\#1683](https://github.com/matrix-org/synapse/issues/1683))
+-   Fix bug where E2E key query would fail if a single remote host failed. ([\#1686](https://github.com/matrix-org/synapse/issues/1686))
 
 Changes in synapse v0.18.5-rc2 (2016-11-24)
 ===========================================
@@ -2251,37 +2251,37 @@ Changes in synapse v0.18.5-rc1 (2016-11-24)
 
 Features:
 
--   Implement `event_fields` in filters (PR #1638)
+-   Implement `event_fields` in filters. ([\#1638](https://github.com/matrix-org/synapse/issues/1638))
 
 Changes:
 
--   Use external ldap auth package (PR #1628)
--   Split out federation transaction sending to a worker (PR #1635)
--   Fail with a coherent error message if /sync?filter= is invalid (PR #1636)
--   More efficient notif count queries (PR #1644)
+-   Use external ldap auth package. ([\#1628](https://github.com/matrix-org/synapse/issues/1628))
+-   Split out federation transaction sending to a worker. ([\#1635](https://github.com/matrix-org/synapse/issues/1635))
+-   Fail with a coherent error message if /sync?filter= is invalid. ([\#1636](https://github.com/matrix-org/synapse/issues/1636))
+-   More efficient notif count queries. ([\#1644](https://github.com/matrix-org/synapse/issues/1644))
 
 Changes in synapse v0.18.4 (2016-11-22)
 =======================================
 
 Bug fixes:
 
--   Add workaround for buggy clients that the fail to register (PR #1632)
+-   Add workaround for buggy clients that the fail to register. ([\#1632](https://github.com/matrix-org/synapse/issues/1632))
 
 Changes in synapse v0.18.4-rc1 (2016-11-14)
 ===========================================
 
 Changes:
 
--   Various database efficiency improvements (PR #1188, #1192)
--   Update default config to blacklist more internal IPs, thanks to Euan Kemp (PR #1198)
--   Allow specifying duration in minutes in config, thanks to Daniel Dent (PR #1625)
+-   Various database efficiency improvements. ([\#1188](https://github.com/matrix-org/synapse/issues/1188), [\#1192](https://github.com/matrix-org/synapse/issues/1192))
+-   Update default config to blacklist more internal IPs, thanks to Euan Kemp. ([\#1198](https://github.com/matrix-org/synapse/issues/1198))
+-   Allow specifying duration in minutes in config, thanks to Daniel Dent. ([\#1625](https://github.com/matrix-org/synapse/issues/1625))
 
 Bug fixes:
 
--   Fix media repo to set CORs headers on responses (PR #1190)
--   Fix registration to not error on non-ascii passwords (PR #1191)
--   Fix create event code to limit the number of `prev_events` (PR #1615)
--   Fix bug in transaction ID deduplication (PR #1624)
+-   Fix media repo to set CORs headers on responses. ([\#1190](https://github.com/matrix-org/synapse/issues/1190))
+-   Fix registration to not error on non-ascii passwords. ([\#1191](https://github.com/matrix-org/synapse/issues/1191))
+-   Fix create event code to limit the number of `prev_events`. ([\#1615](https://github.com/matrix-org/synapse/issues/1615))
+-   Fix bug in transaction ID deduplication. ([\#1624](https://github.com/matrix-org/synapse/issues/1624))
 
 Changes in synapse v0.18.3 (2016-11-08)
 =======================================
@@ -2302,32 +2302,32 @@ Changes in synapse v0.18.2-rc5 (2016-10-28)
 
 Bug fixes:
 
--   Fix prometheus process metrics in worker processes (PR #1184)
+-   Fix prometheus process metrics in worker processes. ([\#1184](https://github.com/matrix-org/synapse/issues/1184))
 
 Changes in synapse v0.18.2-rc4 (2016-10-27)
 ===========================================
 
 Bug fixes:
 
--   Fix `user_threepids` schema delta, which in some instances prevented startup after upgrade (PR #1183)
+-   Fix `user_threepids` schema delta, which in some instances prevented startup after upgrade. ([\#1183](https://github.com/matrix-org/synapse/issues/1183))
 
 Changes in synapse v0.18.2-rc3 (2016-10-27)
 ===========================================
 
 Changes:
 
--   Allow clients to supply access tokens as headers (PR #1098)
--   Clarify error codes for GET /filter/, thanks to Alexander Maznev (PR #1164)
--   Make password reset email field case insensitive (PR #1170)
--   Reduce redundant database work in email pusher (PR #1174)
--   Allow configurable rate limiting per AS (PR #1175)
--   Check whether to ratelimit sooner to avoid work (PR #1176)
--   Standardise prometheus metrics (PR #1177)
+-   Allow clients to supply access tokens as headers. ([\#1098](https://github.com/matrix-org/synapse/issues/1098))
+-   Clarify error codes for GET /filter/, thanks to Alexander Maznev. ([\#1164](https://github.com/matrix-org/synapse/issues/1164))
+-   Make password reset email field case insensitive. ([\#1170](https://github.com/matrix-org/synapse/issues/1170))
+-   Reduce redundant database work in email pusher. ([\#1174](https://github.com/matrix-org/synapse/issues/1174))
+-   Allow configurable rate limiting per AS. ([\#1175](https://github.com/matrix-org/synapse/issues/1175))
+-   Check whether to ratelimit sooner to avoid work. ([\#1176](https://github.com/matrix-org/synapse/issues/1176))
+-   Standardise prometheus metrics. ([\#1177](https://github.com/matrix-org/synapse/issues/1177))
 
 Bug fixes:
 
--   Fix incredibly slow back pagination query (PR #1178)
--   Fix infinite typing bug (PR #1179)
+-   Fix incredibly slow back pagination query. ([\#1178](https://github.com/matrix-org/synapse/issues/1178))
+-   Fix infinite typing bug. ([\#1179](https://github.com/matrix-org/synapse/issues/1179))
 
 Changes in synapse v0.18.2-rc2 (2016-10-25)
 ===========================================
@@ -2339,20 +2339,20 @@ Changes in synapse v0.18.2-rc1 (2016-10-17)
 
 Changes:
 
--   Remove redundant `event_auth` index (PR #1113)
--   Reduce DB hits for replication (PR #1141)
--   Implement pluggable password auth (PR #1155)
--   Remove rate limiting from app service senders and fix `get_or_create_user` requester, thanks to Patrik Oldsberg (PR #1157)
--   window.postmessage for Interactive Auth fallback (PR #1159)
--   Use sys.executable instead of hardcoded python, thanks to Pedro Larroy (PR #1162)
--   Add config option for adding additional TLS fingerprints (PR #1167)
--   User-interactive auth on delete device (PR #1168)
+-   Remove redundant `event_auth` index. ([\#1113](https://github.com/matrix-org/synapse/issues/1113))
+-   Reduce DB hits for replication. ([\#1141](https://github.com/matrix-org/synapse/issues/1141))
+-   Implement pluggable password auth. ([\#1155](https://github.com/matrix-org/synapse/issues/1155))
+-   Remove rate limiting from app service senders and fix `get_or_create_user` requester, thanks to Patrik Oldsberg. ([\#1157](https://github.com/matrix-org/synapse/issues/1157))
+-   window.postmessage for Interactive Auth fallback. ([\#1159](https://github.com/matrix-org/synapse/issues/1159))
+-   Use sys.executable instead of hardcoded python, thanks to Pedro Larroy. ([\#1162](https://github.com/matrix-org/synapse/issues/1162))
+-   Add config option for adding additional TLS fingerprints. ([\#1167](https://github.com/matrix-org/synapse/issues/1167))
+-   User-interactive auth on delete device. ([\#1168](https://github.com/matrix-org/synapse/issues/1168))
 
 Bug fixes:
 
--   Fix not being allowed to set your own `state_key`, thanks to Patrik Oldsberg (PR #1150)
--   Fix interactive auth to return 401 from for incorrect password (PR #1160, #1166)
--   Fix email push notifs being dropped (PR #1169)
+-   Fix not being allowed to set your own `state_key`, thanks to Patrik Oldsberg. ([\#1150](https://github.com/matrix-org/synapse/issues/1150))
+-   Fix interactive auth to return 401 from for incorrect password. ([\#1160](https://github.com/matrix-org/synapse/issues/1160), [\#1166](https://github.com/matrix-org/synapse/issues/1166))
+-   Fix email push notifs being dropped. ([\#1169](https://github.com/matrix-org/synapse/issues/1169))
 
 Changes in synapse v0.18.1 (2016-10-05)
 =======================================
@@ -2364,19 +2364,19 @@ Changes in synapse v0.18.1-rc1 (2016-09-30)
 
 Features:
 
--   Add `total_room_count_estimate` to `/publicRooms` (PR #1133)
+-   Add `total_room_count_estimate` to `/publicRooms`. ([\#1133](https://github.com/matrix-org/synapse/issues/1133))
 
 Changes:
 
--   Time out typing over federation (PR #1140)
--   Restructure LDAP authentication (PR #1153)
+-   Time out typing over federation. ([\#1140](https://github.com/matrix-org/synapse/issues/1140))
+-   Restructure LDAP authentication. ([\#1153](https://github.com/matrix-org/synapse/issues/1153))
 
 Bug fixes:
 
--   Fix 3pid invites when server is already in the room (PR #1136)
--   Fix upgrading with SQLite taking lots of CPU for a few days after upgrade (PR #1144)
--   Fix upgrading from very old database versions (PR #1145)
--   Fix port script to work with recently added tables (PR #1146)
+-   Fix 3pid invites when server is already in the room. ([\#1136](https://github.com/matrix-org/synapse/issues/1136))
+-   Fix upgrading with SQLite taking lots of CPU for a few days after upgrade. ([\#1144](https://github.com/matrix-org/synapse/issues/1144))
+-   Fix upgrading from very old database versions. ([\#1145](https://github.com/matrix-org/synapse/issues/1145))
+-   Fix port script to work with recently added tables. ([\#1146](https://github.com/matrix-org/synapse/issues/1146))
 
 Changes in synapse v0.18.0 (2016-09-19)
 =======================================
@@ -2385,39 +2385,39 @@ The release includes major changes to the state storage database schemas, which
 
 Changes:
 
--   Make public room search case insensitive (PR #1127)
+-   Make public room search case insensitive. ([\#1127](https://github.com/matrix-org/synapse/issues/1127))
 
 Bug fixes:
 
--   Fix and clean up publicRooms pagination (PR #1129)
+-   Fix and clean up publicRooms pagination. ([\#1129](https://github.com/matrix-org/synapse/issues/1129))
 
 Changes in synapse v0.18.0-rc1 (2016-09-16)
 ===========================================
 
 Features:
 
--   Add `only=highlight` on `/notifications` (PR #1081)
--   Add server param to /publicRooms (PR #1082)
--   Allow clients to ask for the whole of a single state event (PR #1094)
--   Add `is_direct` param to /createRoom (PR #1108)
--   Add pagination support to publicRooms (PR #1121)
--   Add very basic filter API to /publicRooms (PR #1126)
--   Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104, #1111)
+-   Add `only=highlight` on `/notifications`. ([\#1081](https://github.com/matrix-org/synapse/issues/1081))
+-   Add server param to /publicRooms. ([\#1082](https://github.com/matrix-org/synapse/issues/1082))
+-   Allow clients to ask for the whole of a single state event. ([\#1094](https://github.com/matrix-org/synapse/issues/1094))
+-   Add `is_direct` param to /createRoom. ([\#1108](https://github.com/matrix-org/synapse/issues/1108))
+-   Add pagination support to publicRooms. ([\#1121](https://github.com/matrix-org/synapse/issues/1121))
+-   Add very basic filter API to /publicRooms. ([\#1126](https://github.com/matrix-org/synapse/issues/1126))
+-   Add basic direct to device messaging support for E2E. ([\#1074](https://github.com/matrix-org/synapse/issues/1074), [\#1084](https://github.com/matrix-org/synapse/issues/1084), [\#1104](https://github.com/matrix-org/synapse/issues/1104), [\#1111](https://github.com/matrix-org/synapse/issues/1111))
 
 Changes:
 
--   Move to storing `state_groups_state` as deltas, greatly reducing DB size (PR #1065)
--   Reduce amount of state pulled out of the DB during common requests (PR #1069)
--   Allow PDF to be rendered from media repo (PR #1071)
--   Reindex `state_groups_state` after pruning (PR #1085)
--   Clobber EDUs in send queue (PR #1095)
--   Conform better to the CAS protocol specification (PR #1100)
--   Limit how often we ask for keys from dead servers (PR #1114)
+-   Move to storing `state_groups_state` as deltas, greatly reducing DB size. ([\#1065](https://github.com/matrix-org/synapse/issues/1065))
+-   Reduce amount of state pulled out of the DB during common requests. ([\#1069](https://github.com/matrix-org/synapse/issues/1069))
+-   Allow PDF to be rendered from media repo. ([\#1071](https://github.com/matrix-org/synapse/issues/1071))
+-   Reindex `state_groups_state` after pruning. ([\#1085](https://github.com/matrix-org/synapse/issues/1085))
+-   Clobber EDUs in send queue. ([\#1095](https://github.com/matrix-org/synapse/issues/1095))
+-   Conform better to the CAS protocol specification. ([\#1100](https://github.com/matrix-org/synapse/issues/1100))
+-   Limit how often we ask for keys from dead servers. ([\#1114](https://github.com/matrix-org/synapse/issues/1114))
 
 Bug fixes:
 
--   Fix /notifications API when used with `from` param (PR #1080)
--   Fix backfill when cannot find an event. (PR #1107)
+-   Fix /notifications API when used with `from` param. ([\#1080](https://github.com/matrix-org/synapse/issues/1080))
+-   Fix backfill when cannot find an event. ([\#1107](https://github.com/matrix-org/synapse/issues/1107))
 
 Changes in synapse v0.17.3 (2016-09-09)
 =======================================
@@ -2436,20 +2436,20 @@ Changes in synapse v0.17.2-rc1 (2016-09-05)
 
 Features:
 
--   Start adding store-and-forward direct-to-device messaging (PR #1046, #1050, #1062, #1066)
+-   Start adding store-and-forward direct-to-device messaging. ([\#1046](https://github.com/matrix-org/synapse/issues/1046), [\#1050](https://github.com/matrix-org/synapse/issues/1050), [\#1062](https://github.com/matrix-org/synapse/issues/1062), [\#1066](https://github.com/matrix-org/synapse/issues/1066))
 
 Changes:
 
--   Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063, #1068)
--   Don't notify for online to online presence transitions. (PR #1054)
--   Occasionally persist unpersisted presence updates (PR #1055)
--   Allow application services to have an optional `url` (PR #1056)
--   Clean up old sent transactions from DB (PR #1059)
+-   Avoid pulling the full state of a room out so often. ([\#1047](https://github.com/matrix-org/synapse/issues/1047), [\#1049](https://github.com/matrix-org/synapse/issues/1049), [\#1063](https://github.com/matrix-org/synapse/issues/1063), [\#1068](https://github.com/matrix-org/synapse/issues/1068))
+-   Don't notify for online to online presence transitions. ([\#1054](https://github.com/matrix-org/synapse/issues/1054))
+-   Occasionally persist unpersisted presence updates. ([\#1055](https://github.com/matrix-org/synapse/issues/1055))
+-   Allow application services to have an optional `url`. ([\#1056](https://github.com/matrix-org/synapse/issues/1056))
+-   Clean up old sent transactions from DB. ([\#1059](https://github.com/matrix-org/synapse/issues/1059))
 
 Bug fixes:
 
--   Fix None check in backfill (PR #1043)
--   Fix membership changes to be idempotent (PR #1067)
+-   Fix None check in backfill. ([\#1043](https://github.com/matrix-org/synapse/issues/1043))
+-   Fix membership changes to be idempotent. ([\#1067](https://github.com/matrix-org/synapse/issues/1067))
 -   Fix bug in `get_pdu` where it would sometimes return events with incorrect signature
 
 Changes in synapse v0.17.1 (2016-08-24)
@@ -2457,86 +2457,86 @@ Changes in synapse v0.17.1 (2016-08-24)
 
 Changes:
 
--   Delete old `received_transactions` rows (PR #1038)
--   Pass through user-supplied content in `/join/$room_id` (PR #1039)
+-   Delete old `received_transactions` rows. ([\#1038](https://github.com/matrix-org/synapse/issues/1038))
+-   Pass through user-supplied content in `/join/$room_id`. ([\#1039](https://github.com/matrix-org/synapse/issues/1039))
 
 Bug fixes:
 
--   Fix bug with backfill (PR #1040)
+-   Fix bug with backfill. ([\#1040](https://github.com/matrix-org/synapse/issues/1040))
 
 Changes in synapse v0.17.1-rc1 (2016-08-22)
 ===========================================
 
 Features:
 
--   Add notification API (PR #1028)
+-   Add notification API. ([\#1028](https://github.com/matrix-org/synapse/issues/1028))
 
 Changes:
 
--   Don't print stack traces when failing to get remote keys (PR #996)
--   Various federation /event/ perf improvements (PR #998)
--   Only process one local membership event per room at a time (PR #1005)
--   Move default display name push rule (PR #1011, #1023)
--   Fix up preview URL API. Add tests. (PR #1015)
--   Set `Content-Security-Policy` on media repo (PR #1021)
--   Make `notify_interested_services` faster (PR #1022)
--   Add usage stats to prometheus monitoring (PR #1037)
+-   Don't print stack traces when failing to get remote keys. ([\#996](https://github.com/matrix-org/synapse/issues/996))
+-   Various federation /event/ perf improvements. ([\#998](https://github.com/matrix-org/synapse/issues/998))
+-   Only process one local membership event per room at a time. ([\#1005](https://github.com/matrix-org/synapse/issues/1005))
+-   Move default display name push rule. ([\#1011](https://github.com/matrix-org/synapse/issues/1011), [\#1023](https://github.com/matrix-org/synapse/issues/1023))
+-   Fix up preview URL API. Add tests. ([\#1015](https://github.com/matrix-org/synapse/issues/1015))
+-   Set `Content-Security-Policy` on media repo. ([\#1021](https://github.com/matrix-org/synapse/issues/1021))
+-   Make `notify_interested_services` faster. ([\#1022](https://github.com/matrix-org/synapse/issues/1022))
+-   Add usage stats to prometheus monitoring. ([\#1037](https://github.com/matrix-org/synapse/issues/1037))
 
 Bug fixes:
 
--   Fix token login (PR #993)
--   Fix CAS login (PR #994, #995)
--   Fix /sync to not clobber `status_msg` (PR #997)
--   Fix redacted state events to include `prev_content` (PR #1003)
--   Fix some bugs in the auth/ldap handler (PR #1007)
--   Fix backfill request to limit URI length, so that remotes Don't reject the requests due to path length limits (PR #1012)
--   Fix AS push code to not send duplicate events (PR #1025)
+-   Fix token login. ([\#993](https://github.com/matrix-org/synapse/issues/993))
+-   Fix CAS login. ([\#994](https://github.com/matrix-org/synapse/issues/994), [\#995](https://github.com/matrix-org/synapse/issues/995))
+-   Fix /sync to not clobber `status_msg`. ([\#997](https://github.com/matrix-org/synapse/issues/997))
+-   Fix redacted state events to include `prev_content`. ([\#1003](https://github.com/matrix-org/synapse/issues/1003))
+-   Fix some bugs in the auth/ldap handler. ([\#1007](https://github.com/matrix-org/synapse/issues/1007))
+-   Fix backfill request to limit URI length, so that remotes Don't reject the requests due to path length limits. ([\#1012](https://github.com/matrix-org/synapse/issues/1012))
+-   Fix AS push code to not send duplicate events. ([\#1025](https://github.com/matrix-org/synapse/issues/1025))
 
 Changes in synapse v0.17.0 (2016-08-08)
 =======================================
 
 This release contains significant security bug fixes regarding authenticating events received over federation. PLEASE UPGRADE.
 
-This release changes the LDAP configuration format in a backwards incompatible way, see PR #843 for details.
+This release changes the LDAP configuration format in a backwards incompatible way, see [\#843](https://github.com/matrix-org/synapse/issues/843) for details.
 
 Changes:
 
--   Add federation /version API (PR #990)
--   Make psutil dependency optional (PR #992)
+-   Add federation /version API. ([\#990](https://github.com/matrix-org/synapse/issues/990))
+-   Make psutil dependency optional. ([\#992](https://github.com/matrix-org/synapse/issues/992))
 
 Bug fixes:
 
--   Fix URL preview API to exclude HTML comments in description (PR #988)
--   Fix error handling of remote joins (PR #991)
+-   Fix URL preview API to exclude HTML comments in description. ([\#988](https://github.com/matrix-org/synapse/issues/988))
+-   Fix error handling of remote joins. ([\#991](https://github.com/matrix-org/synapse/issues/991))
 
 Changes in synapse v0.17.0-rc4 (2016-08-05)
 ===========================================
 
 Changes:
 
--   Change the way we summarize URLs when previewing (PR #973)
--   Add new `/state_ids/` federation API (PR #979)
--   Speed up processing of `/state/` response (PR #986)
+-   Change the way we summarize URLs when previewing. ([\#973](https://github.com/matrix-org/synapse/issues/973))
+-   Add new `/state_ids/` federation API. ([\#979](https://github.com/matrix-org/synapse/issues/979))
+-   Speed up processing of `/state/` response. ([\#986](https://github.com/matrix-org/synapse/issues/986))
 
 Bug fixes:
 
--   Fix event persistence when event has already been partially persisted (PR #975, #983, #985)
--   Fix port script to also copy across backfilled events (PR #982)
+-   Fix event persistence when event has already been partially persisted. ([\#975](https://github.com/matrix-org/synapse/issues/975), [\#983](https://github.com/matrix-org/synapse/issues/983), [\#985](https://github.com/matrix-org/synapse/issues/985))
+-   Fix port script to also copy across backfilled events. ([\#982](https://github.com/matrix-org/synapse/issues/982))
 
 Changes in synapse v0.17.0-rc3 (2016-08-02)
 ===========================================
 
 Changes:
 
--   Forbid non-ASes from registering users whose names begin with `_` (PR #958)
--   Add some basic admin API docs (PR #963)
+-   Forbid non-ASes from registering users whose names begin with `_`. ([\#958](https://github.com/matrix-org/synapse/issues/958))
+-   Add some basic admin API docs. ([\#963](https://github.com/matrix-org/synapse/issues/963))
 
 Bug fixes:
 
--   Send the correct host header when fetching keys (PR #941)
--   Fix joining a room that has missing auth events (PR #964)
--   Fix various push bugs (PR #966, #970)
--   Fix adding emails on registration (PR #968)
+-   Send the correct host header when fetching keys. ([\#941](https://github.com/matrix-org/synapse/issues/941))
+-   Fix joining a room that has missing auth events. ([\#964](https://github.com/matrix-org/synapse/issues/964))
+-   Fix various push bugs. ([\#966](https://github.com/matrix-org/synapse/issues/966), [\#970](https://github.com/matrix-org/synapse/issues/970))
+-   Fix adding emails on registration. ([\#968](https://github.com/matrix-org/synapse/issues/968))
 
 Changes in synapse v0.17.0-rc2 (2016-08-02)
 ===========================================
@@ -2546,51 +2546,51 @@ Changes in synapse v0.17.0-rc2 (2016-08-02)
 Changes in synapse v0.17.0-rc1 (2016-07-28)
 ===========================================
 
-This release changes the LDAP configuration format in a backwards incompatible way, see PR #843 for details.
+This release changes the LDAP configuration format in a backwards incompatible way, see [\#843](https://github.com/matrix-org/synapse/issues/843) for details.
 
 Features:
 
--   Add `purge_media_cache` admin API (PR #902)
--   Add deactivate account admin API (PR #903)
--   Add optional pepper to password hashing (PR #907, #910 by KentShikama)
--   Add an admin option to shared secret registration (breaks backwards compat) (PR #909)
--   Add purge local room history API (PR #911, #923, #924)
--   Add requestToken endpoints (PR #915)
--   Add an /account/deactivate endpoint (PR #921)
--   Add filter param to /messages. Add `contains_url` to filter. (PR #922)
--   Add `device_id` support to /login (PR #929)
--   Add `device_id` support to /v2/register flow. (PR #937, #942)
--   Add GET /devices endpoint (PR #939, #944)
--   Add GET /device/{deviceId} (PR #943)
--   Add update and delete APIs for devices (PR #949)
+-   Add `purge_media_cache` admin API. ([\#902](https://github.com/matrix-org/synapse/issues/902))
+-   Add deactivate account admin API. ([\#903](https://github.com/matrix-org/synapse/issues/903))
+-   Add optional pepper to password hashing by KentShikama. ([\#907](https://github.com/matrix-org/synapse/issues/907), [\#910](https://github.com/matrix-org/synapse/issues/910))
+-   Add an admin option to shared secret registration (breaks backwards compat). ([\#909](https://github.com/matrix-org/synapse/issues/909))
+-   Add purge local room history API. ([\#911](https://github.com/matrix-org/synapse/issues/911), [\#923](https://github.com/matrix-org/synapse/issues/923), [\#924](https://github.com/matrix-org/synapse/issues/924))
+-   Add requestToken endpoints. ([\#915](https://github.com/matrix-org/synapse/issues/915))
+-   Add an /account/deactivate endpoint. ([\#921](https://github.com/matrix-org/synapse/issues/921))
+-   Add filter param to /messages. Add `contains_url` to filter. ([\#922](https://github.com/matrix-org/synapse/issues/922))
+-   Add `device_id` support to /login. ([\#929](https://github.com/matrix-org/synapse/issues/929))
+-   Add `device_id` support to /v2/register flow. ([\#937](https://github.com/matrix-org/synapse/issues/937), [\#942](https://github.com/matrix-org/synapse/issues/942))
+-   Add GET /devices endpoint. ([\#939](https://github.com/matrix-org/synapse/issues/939), [\#944](https://github.com/matrix-org/synapse/issues/944))
+-   Add GET /device/{deviceId}. ([\#943](https://github.com/matrix-org/synapse/issues/943))
+-   Add update and delete APIs for devices. ([\#949](https://github.com/matrix-org/synapse/issues/949))
 
 Changes:
 
--   Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt)
--   Linearize some federation endpoints based on `(origin, room_id)` (PR #879)
--   Remove the legacy v0 content upload API. (PR #888)
--   Use similar naming we use in email notifs for push (PR #894)
--   Optionally include password hash in createUser endpoint (PR #905 by KentShikama)
--   Use a query that postgresql optimises better for `get_events_around` (PR #906)
--   Fall back to '`username` if `user` is not given for appservice registration. (PR #927 by Half-Shot)
--   Add metrics for psutil derived memory usage (PR #936)
--   Record `device_id` in `client_ips` (PR #938)
--   Send the correct host header when fetching keys (PR #941)
--   Log the hostname the reCAPTCHA was completed on (PR #946)
--   Make the device id on e2e key upload optional (PR #956)
--   Add r0.2.0 to the "supported versions" list (PR #960)
--   Don't include name of room for invites in push (PR #961)
+-   Rewrite LDAP Authentication against ldap3. Contributed by mweinelt. ([\#843](https://github.com/matrix-org/synapse/issues/843))
+-   Linearize some federation endpoints based on `(origin, room_id)`. ([\#879](https://github.com/matrix-org/synapse/issues/879))
+-   Remove the legacy v0 content upload API. ([\#888](https://github.com/matrix-org/synapse/issues/888))
+-   Use similar naming we use in email notifs for push. ([\#894](https://github.com/matrix-org/synapse/issues/894))
+-   Optionally include password hash in createUser endpoint. Contributed by KentShikama. ([\#905](https://github.com/matrix-org/synapse/issues/905))
+-   Use a query that postgresql optimises better for `get_events_around`. ([\#906](https://github.com/matrix-org/synapse/issues/906))
+-   Fall back to '`username` if `user` is not given for appservice registration. Contributed by Half-Shot. ([\#927](https://github.com/matrix-org/synapse/issues/927))
+-   Add metrics for psutil derived memory usage. ([\#936](https://github.com/matrix-org/synapse/issues/936))
+-   Record `device_id` in `client_ips`. ([\#938](https://github.com/matrix-org/synapse/issues/938))
+-   Send the correct host header when fetching keys. ([\#941](https://github.com/matrix-org/synapse/issues/941))
+-   Log the hostname the reCAPTCHA was completed on. ([\#946](https://github.com/matrix-org/synapse/issues/946))
+-   Make the device id on e2e key upload optional. ([\#956](https://github.com/matrix-org/synapse/issues/956))
+-   Add r0.2.0 to the "supported versions" list. ([\#960](https://github.com/matrix-org/synapse/issues/960))
+-   Don't include name of room for invites in push. ([\#961](https://github.com/matrix-org/synapse/issues/961))
 
 Bug fixes:
 
--   Fix substitution failure in mail template (PR #887)
--   Put most recent 20 messages in email notif (PR #892)
--   Ensure that the guest user is in the database when upgrading accounts (PR #914)
--   Fix various edge cases in auth handling (PR #919)
--   Fix 500 ISE when sending alias event without a `state_key` (PR #925)
--   Fix bug where we stored rejections in the `state_group`, persist all rejections (PR #948)
--   Fix lack of check of if the user is banned when handling 3pid invites (PR #952)
--   Fix a couple of bugs in the transaction and keyring code (PR #954, #955)
+-   Fix substitution failure in mail template. ([\#887](https://github.com/matrix-org/synapse/issues/887))
+-   Put most recent 20 messages in email notif. ([\#892](https://github.com/matrix-org/synapse/issues/892))
+-   Ensure that the guest user is in the database when upgrading accounts. ([\#914](https://github.com/matrix-org/synapse/issues/914))
+-   Fix various edge cases in auth handling. ([\#919](https://github.com/matrix-org/synapse/issues/919))
+-   Fix 500 ISE when sending alias event without a `state_key`. ([\#925](https://github.com/matrix-org/synapse/issues/925))
+-   Fix bug where we stored rejections in the `state_group`, persist all rejections. ([\#948](https://github.com/matrix-org/synapse/issues/948))
+-   Fix lack of check of if the user is banned when handling 3pid invites. ([\#952](https://github.com/matrix-org/synapse/issues/952))
+-   Fix a couple of bugs in the transaction and keyring code. ([\#954](https://github.com/matrix-org/synapse/issues/954), [\#955](https://github.com/matrix-org/synapse/issues/955))
 
 Changes in synapse v0.16.1-r1 (2016-07-08)
 ==========================================
@@ -2604,13 +2604,13 @@ Changes in synapse v0.16.1 (2016-06-20)
 
 Bug fixes:
 
--   Fix assorted bugs in `/preview_url` (PR #872)
--   Fix TypeError when setting unicode passwords (PR #873)
+-   Fix assorted bugs in `/preview_url`. ([\#872](https://github.com/matrix-org/synapse/issues/872))
+-   Fix TypeError when setting unicode passwords. ([\#873](https://github.com/matrix-org/synapse/issues/873))
 
 Performance improvements:
 
--   Turn `use_frozen_events` off by default (PR #877)
--   Disable responding with canonical json for federation (PR #878)
+-   Turn `use_frozen_events` off by default. ([\#877](https://github.com/matrix-org/synapse/issues/877))
+-   Disable responding with canonical json for federation. ([\#878](https://github.com/matrix-org/synapse/issues/878))
 
 Changes in synapse v0.16.1-rc1 (2016-06-15)
 ===========================================
@@ -2619,20 +2619,20 @@ Features: None
 
 Changes:
 
--   Log requester for `/publicRoom` endpoints when possible (PR #856)
--   502 on `/thumbnail` when can't connect to remote server (PR #862)
--   Linearize fetching of gaps on incoming events (PR #871)
+-   Log requester for `/publicRoom` endpoints when possible. ([\#856](https://github.com/matrix-org/synapse/issues/856))
+-   502 on `/thumbnail` when can't connect to remote server. ([\#862](https://github.com/matrix-org/synapse/issues/862))
+-   Linearize fetching of gaps on incoming events. ([\#871](https://github.com/matrix-org/synapse/issues/871))
 
 Bugs fixes:
 
--   Fix bug where rooms where marked as published by default (PR #857)
--   Fix bug where joining room with an event with invalid sender (PR #868)
--   Fix bug where backfilled events were sent down sync streams (PR #869)
--   Fix bug where outgoing connections could wedge indefinitely, causing push notifications to be unreliable (PR #870)
+-   Fix bug where rooms where marked as published by default. ([\#857](https://github.com/matrix-org/synapse/issues/857))
+-   Fix bug where joining room with an event with invalid sender. ([\#868](https://github.com/matrix-org/synapse/issues/868))
+-   Fix bug where backfilled events were sent down sync streams. ([\#869](https://github.com/matrix-org/synapse/issues/869))
+-   Fix bug where outgoing connections could wedge indefinitely, causing push notifications to be unreliable. ([\#870](https://github.com/matrix-org/synapse/issues/870))
 
 Performance improvements:
 
--   Improve `/publicRooms` performance(PR #859)
+-   Improve `/publicRooms` performance. ([\#859](https://github.com/matrix-org/synapse/issues/859))
 
 Changes in synapse v0.16.0 (2016-06-09)
 =======================================
@@ -2641,31 +2641,31 @@ NB: As of v0.14 all AS config files must have an ID field.
 
 Bug fixes:
 
--   Don't make rooms published by default (PR #857)
+-   Don't make rooms published by default. ([\#857](https://github.com/matrix-org/synapse/issues/857))
 
 Changes in synapse v0.16.0-rc2 (2016-06-08)
 ===========================================
 
 Features:
 
--   Add configuration option for tuning GC via `gc.set_threshold` (PR #849)
+-   Add configuration option for tuning GC via `gc.set_threshold`. ([\#849](https://github.com/matrix-org/synapse/issues/849))
 
 Changes:
 
--   Record metrics about GC (PR #771, #847, #852)
--   Add metric counter for number of persisted events (PR #841)
+-   Record metrics about GC. ([\#771](https://github.com/matrix-org/synapse/issues/771), [\#847](https://github.com/matrix-org/synapse/issues/847), [\#852](https://github.com/matrix-org/synapse/issues/852))
+-   Add metric counter for number of persisted events. ([\#841](https://github.com/matrix-org/synapse/issues/841))
 
 Bug fixes:
 
--   Fix `From` header in email notifications (PR #843)
--   Fix presence where timeouts were not being fired for the first 8h after restarts (PR #842)
+-   Fix `From` header in email notifications. ([\#843](https://github.com/matrix-org/synapse/issues/843))
+-   Fix presence where timeouts were not being fired for the first 8h after restarts. ([\#842](https://github.com/matrix-org/synapse/issues/842))
 -   Fix bug where synapse sent malformed transactions to AS's when retrying transactions (Commits 310197b, 8437906)
 
 Performance improvements:
 
--   Remove event fetching from DB threads (PR #835)
--   Change the way we cache events (PR #836)
--   Add events to cache when we persist them (PR #840)
+-   Remove event fetching from DB threads. ([\#835](https://github.com/matrix-org/synapse/issues/835))
+-   Change the way we cache events. ([\#836](https://github.com/matrix-org/synapse/issues/836))
+-   Add events to cache when we persist them. ([\#840](https://github.com/matrix-org/synapse/issues/840))
 
 Changes in synapse v0.16.0-rc1 (2016-06-03)
 ===========================================
@@ -2674,74 +2674,74 @@ Version 0.15 was not released. See v0.15.0-rc1 below for additional changes.
 
 Features:
 
--   Add email notifications for missed messages (PR #759, #786, #799, #810, #815, #821)
--   Add a `url_preview_ip_range_whitelist` config param (PR #760)
--   Add /report endpoint (PR #762)
--   Add basic ignore user API (PR #763)
--   Add an openidish mechanism for proving that you own a given `user_id` (PR #765)
--   Allow clients to specify a `server_name` to avoid "No known servers" (PR #794)
--   Add `secondary_directory_servers` option to fetch room list from other servers (PR #808, #813)
+-   Add email notifications for missed messages. ([\#759](https://github.com/matrix-org/synapse/issues/759), [\#786](https://github.com/matrix-org/synapse/issues/786), [\#799](https://github.com/matrix-org/synapse/issues/799), [\#810](https://github.com/matrix-org/synapse/issues/810), [\#815](https://github.com/matrix-org/synapse/issues/815), [\#821](https://github.com/matrix-org/synapse/issues/821))
+-   Add a `url_preview_ip_range_whitelist` config param. ([\#760](https://github.com/matrix-org/synapse/issues/760))
+-   Add /report endpoint. ([\#762](https://github.com/matrix-org/synapse/issues/762))
+-   Add basic ignore user API. ([\#763](https://github.com/matrix-org/synapse/issues/763))
+-   Add an openidish mechanism for proving that you own a given `user_id`. ([\#765](https://github.com/matrix-org/synapse/issues/765))
+-   Allow clients to specify a `server_name` to avoid "No known servers". ([\#794](https://github.com/matrix-org/synapse/issues/794))
+-   Add `secondary_directory_servers` option to fetch room list from other servers. ([\#808](https://github.com/matrix-org/synapse/issues/808), [\#813](https://github.com/matrix-org/synapse/issues/813))
 
 Changes:
 
--   Report per request metrics for all of the things using `request_handler` (PR #756)
--   Correctly handle `NULL` password hashes from the database (PR #775)
--   Allow receipts for events we haven't seen in the db (PR #784)
--   Make synctl read a cache factor from config file (PR #785)
--   Increment badge count per missed convo, not per msg (PR #793)
--   Special case `m.room.third_party_invite` event auth to match invites (PR #814)
+-   Report per request metrics for all of the things using `request_handler`. ([\#756](https://github.com/matrix-org/synapse/issues/756))
+-   Correctly handle `NULL` password hashes from the database. ([\#775](https://github.com/matrix-org/synapse/issues/775))
+-   Allow receipts for events we haven't seen in the db. ([\#784](https://github.com/matrix-org/synapse/issues/784))
+-   Make synctl read a cache factor from config file. ([\#785](https://github.com/matrix-org/synapse/issues/785))
+-   Increment badge count per missed convo, not per msg. ([\#793](https://github.com/matrix-org/synapse/issues/793))
+-   Special case `m.room.third_party_invite` event auth to match invites. ([\#814](https://github.com/matrix-org/synapse/issues/814))
 
 Bug fixes:
 
--   Fix typo in `event_auth` servlet path (PR #757)
--   Fix password reset (PR #758)
+-   Fix typo in `event_auth` servlet path. ([\#757](https://github.com/matrix-org/synapse/issues/757))
+-   Fix password reset. ([\#758](https://github.com/matrix-org/synapse/issues/758))
 
 Performance improvements:
 
--   Reduce database inserts when sending transactions (PR #767)
--   Queue events by room for persistence (PR #768)
--   Add cache to `get_user_by_id` (PR #772)
--   Add and use `get_domain_from_id` (PR #773)
--   Use tree cache for `get_linearized_receipts_for_room` (PR #779)
--   Remove unused indices (PR #782)
--   Add caches to `bulk_get_push_rules*` (PR #804)
--   Cache `get_event_reference_hashes` (PR #806)
--   Add `get_users_with_read_receipts_in_room` cache (PR #809)
--   Use state to calculate `get_users_in_room` (PR #811)
--   Load push rules in storage layer so that they get cached (PR #825)
--   Make `get_joined_hosts_for_room` use `get_users_in_room` (PR #828)
--   Poke notifier on next reactor tick (PR #829)
--   Change CacheMetrics to be quicker (PR #830)
+-   Reduce database inserts when sending transactions. ([\#767](https://github.com/matrix-org/synapse/issues/767))
+-   Queue events by room for persistence. ([\#768](https://github.com/matrix-org/synapse/issues/768))
+-   Add cache to `get_user_by_id`. ([\#772](https://github.com/matrix-org/synapse/issues/772))
+-   Add and use `get_domain_from_id`. ([\#773](https://github.com/matrix-org/synapse/issues/773))
+-   Use tree cache for `get_linearized_receipts_for_room`. ([\#779](https://github.com/matrix-org/synapse/issues/779))
+-   Remove unused indices. ([\#782](https://github.com/matrix-org/synapse/issues/782))
+-   Add caches to `bulk_get_push_rules*`. ([\#804](https://github.com/matrix-org/synapse/issues/804))
+-   Cache `get_event_reference_hashes`. ([\#806](https://github.com/matrix-org/synapse/issues/806))
+-   Add `get_users_with_read_receipts_in_room` cache. ([\#809](https://github.com/matrix-org/synapse/issues/809))
+-   Use state to calculate `get_users_in_room`. ([\#811](https://github.com/matrix-org/synapse/issues/811))
+-   Load push rules in storage layer so that they get cached. ([\#825](https://github.com/matrix-org/synapse/issues/825))
+-   Make `get_joined_hosts_for_room` use `get_users_in_room`. ([\#828](https://github.com/matrix-org/synapse/issues/828))
+-   Poke notifier on next reactor tick. ([\#829](https://github.com/matrix-org/synapse/issues/829))
+-   Change CacheMetrics to be quicker. ([\#830](https://github.com/matrix-org/synapse/issues/830))
 
 Changes in synapse v0.15.0-rc1 (2016-04-26)
 ===========================================
 
 Features:
 
--   Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck (PR #671,\#687)
--   Add URL previewing support (PR #688)
--   Add login support for LDAP, thanks to Christoph Witzany (PR #701)
--   Add GET endpoint for pushers (PR #716)
+-   Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck. ([\#671](https://github.com/matrix-org/synapse/issues/671), [\#687](https://github.com/matrix-org/synapse/issues/687))
+-   Add URL previewing support. ([\#688](https://github.com/matrix-org/synapse/issues/688))
+-   Add login support for LDAP, thanks to Christoph Witzany. ([\#701](https://github.com/matrix-org/synapse/issues/701))
+-   Add GET endpoint for pushers. ([\#716](https://github.com/matrix-org/synapse/issues/716))
 
 Changes:
 
--   Never notify for member events (PR #667)
--   Deduplicate identical `/sync` requests (PR #668)
--   Require user to have left room to forget room (PR #673)
--   Use DNS cache if within TTL (PR #677)
--   Let users see their own leave events (PR #699)
--   Deduplicate membership changes (PR #700)
--   Increase performance of pusher code (PR #705)
--   Respond with error status 504 if failed to talk to remote server (PR #731)
--   Increase search performance on postgres (PR #745)
+-   Never notify for member events. ([\#667](https://github.com/matrix-org/synapse/issues/667))
+-   Deduplicate identical `/sync` requests. ([\#668](https://github.com/matrix-org/synapse/issues/668))
+-   Require user to have left room to forget room. ([\#673](https://github.com/matrix-org/synapse/issues/673))
+-   Use DNS cache if within TTL. ([\#677](https://github.com/matrix-org/synapse/issues/677))
+-   Let users see their own leave events. ([\#699](https://github.com/matrix-org/synapse/issues/699))
+-   Deduplicate membership changes. ([\#700](https://github.com/matrix-org/synapse/issues/700))
+-   Increase performance of pusher code. ([\#705](https://github.com/matrix-org/synapse/issues/705))
+-   Respond with error status 504 if failed to talk to remote server. ([\#731](https://github.com/matrix-org/synapse/issues/731))
+-   Increase search performance on postgres. ([\#745](https://github.com/matrix-org/synapse/issues/745))
 
 Bug fixes:
 
--   Fix bug where disabling all notifications still resulted in push (PR #678)
--   Fix bug where users couldn't reject remote invites if remote refused (PR #691)
--   Fix bug where synapse attempted to backfill from itself (PR #693)
--   Fix bug where profile information was not correctly added when joining remote rooms (PR #703)
--   Fix bug where register API required incorrect key name for AS registration (PR #727)
+-   Fix bug where disabling all notifications still resulted in push. ([\#678](https://github.com/matrix-org/synapse/issues/678))
+-   Fix bug where users couldn't reject remote invites if remote refused. ([\#691](https://github.com/matrix-org/synapse/issues/691))
+-   Fix bug where synapse attempted to backfill from itself. ([\#693](https://github.com/matrix-org/synapse/issues/693))
+-   Fix bug where profile information was not correctly added when joining remote rooms. ([\#703](https://github.com/matrix-org/synapse/issues/703))
+-   Fix bug where register API required incorrect key name for AS registration. ([\#727](https://github.com/matrix-org/synapse/issues/727))
 
 Changes in synapse v0.14.0 (2016-03-30)
 =======================================
@@ -2753,58 +2753,58 @@ Changes in synapse v0.14.0-rc2 (2016-03-23)
 
 Features:
 
--   Add published room list API (PR #657)
+-   Add published room list API. ([\#657](https://github.com/matrix-org/synapse/issues/657))
 
 Changes:
 
--   Change various caches to consume less memory (PR #656, #658, #660, #662, #663, #665)
--   Allow rooms to be published without requiring an alias (PR #664)
--   Intern common strings in caches to reduce memory footprint (\#666)
+-   Change various caches to consume less memory. ([\#656](https://github.com/matrix-org/synapse/issues/656), [\#658](https://github.com/matrix-org/synapse/issues/658), [\#660](https://github.com/matrix-org/synapse/issues/660), [\#662](https://github.com/matrix-org/synapse/issues/662), [\#663](https://github.com/matrix-org/synapse/issues/663), [\#665](https://github.com/matrix-org/synapse/issues/665))
+-   Allow rooms to be published without requiring an alias. ([\#664](https://github.com/matrix-org/synapse/issues/664))
+-   Intern common strings in caches to reduce memory footprint. ([\#666](https://github.com/matrix-org/synapse/issues/666))
 
 Bug fixes:
 
--   Fix reject invites over federation (PR #646)
--   Fix bug where registration was not idempotent (PR #649)
--   Update aliases event after deleting aliases (PR #652)
--   Fix unread notification count, which was sometimes wrong (PR #661)
+-   Fix reject invites over federation. ([\#646](https://github.com/matrix-org/synapse/issues/646))
+-   Fix bug where registration was not idempotent. ([\#649](https://github.com/matrix-org/synapse/issues/649))
+-   Update aliases event after deleting aliases. ([\#652](https://github.com/matrix-org/synapse/issues/652))
+-   Fix unread notification count, which was sometimes wrong. ([\#661](https://github.com/matrix-org/synapse/issues/661))
 
 Changes in synapse v0.14.0-rc1 (2016-03-14)
 ===========================================
 
 Features:
 
--   Add `event_id` to response to state event PUT (PR #581)
--   Allow guest users access to messages in rooms they have joined (PR #587)
--   Add config for what state is included in a room invite (PR #598)
--   Send the inviter's member event in room invite state (PR #607)
--   Add error codes for malformed/bad JSON in /login (PR #608)
--   Add support for changing the actions for default rules (PR #609)
--   Add environment variable `SYNAPSE_CACHE_FACTOR`, default it to 0.1 (PR #612)
--   Add ability for alias creators to delete aliases (PR #614)
--   Add profile information to invites (PR #624)
+-   Add `event_id` to response to state event PUT. ([\#581](https://github.com/matrix-org/synapse/issues/581))
+-   Allow guest users access to messages in rooms they have joined. ([\#587](https://github.com/matrix-org/synapse/issues/587))
+-   Add config for what state is included in a room invite. ([\#598](https://github.com/matrix-org/synapse/issues/598))
+-   Send the inviter's member event in room invite state. ([\#607](https://github.com/matrix-org/synapse/issues/607))
+-   Add error codes for malformed/bad JSON in /login. ([\#608](https://github.com/matrix-org/synapse/issues/608))
+-   Add support for changing the actions for default rules. ([\#609](https://github.com/matrix-org/synapse/issues/609))
+-   Add environment variable `SYNAPSE_CACHE_FACTOR`, default it to 0.1. ([\#612](https://github.com/matrix-org/synapse/issues/612))
+-   Add ability for alias creators to delete aliases. ([\#614](https://github.com/matrix-org/synapse/issues/614))
+-   Add profile information to invites. ([\#624](https://github.com/matrix-org/synapse/issues/624))
 
 Changes:
 
--   Enforce `user_id` exclusivity for AS registrations (PR #572)
--   Make adding push rules idempotent (PR #587)
--   Improve presence performance (PR #582, #586)
--   Change presence semantics for `last_active_ago` (PR #582, #586)
--   Don't allow `m.room.create` to be changed (PR #596)
--   Add 800x600 to default list of valid thumbnail sizes (PR #616)
--   Always include kicks and bans in full /sync (PR #625)
--   Send history visibility on boundary changes (PR #626)
--   Register endpoint now returns a `refresh_token` (PR #637)
+-   Enforce `user_id` exclusivity for AS registrations. ([\#572](https://github.com/matrix-org/synapse/issues/572))
+-   Make adding push rules idempotent. ([\#587](https://github.com/matrix-org/synapse/issues/587))
+-   Improve presence performance. ([\#582](https://github.com/matrix-org/synapse/issues/582), [\#586](https://github.com/matrix-org/synapse/issues/586))
+-   Change presence semantics for `last_active_ago`. ([\#582](https://github.com/matrix-org/synapse/issues/582), [\#586](https://github.com/matrix-org/synapse/issues/586))
+-   Don't allow `m.room.create` to be changed. ([\#596](https://github.com/matrix-org/synapse/issues/596))
+-   Add 800x600 to default list of valid thumbnail sizes. ([\#616](https://github.com/matrix-org/synapse/issues/616))
+-   Always include kicks and bans in full /sync. ([\#625](https://github.com/matrix-org/synapse/issues/625))
+-   Send history visibility on boundary changes. ([\#626](https://github.com/matrix-org/synapse/issues/626))
+-   Register endpoint now returns a `refresh_token`. ([\#637](https://github.com/matrix-org/synapse/issues/637))
 
 Bug fixes:
 
--   Fix bug where we returned incorrect state in /sync (PR #573)
--   Always return a JSON object from push rule API (PR #606)
--   Fix bug where registering without a user id sometimes failed (PR #610)
--   Report size of ExpiringCache in cache size metrics (PR #611)
--   Fix rejection of invites to empty rooms (PR #615)
--   Fix usage of `bcrypt` to not use `checkpw` (PR #619)
--   Pin `pysaml2` dependency (PR #634)
--   Fix bug in `/sync` where timeline order was incorrect for backfilled events (PR #635)
+-   Fix bug where we returned incorrect state in /sync. ([\#573](https://github.com/matrix-org/synapse/issues/573))
+-   Always return a JSON object from push rule API. ([\#606](https://github.com/matrix-org/synapse/issues/606))
+-   Fix bug where registering without a user id sometimes failed. ([\#610](https://github.com/matrix-org/synapse/issues/610))
+-   Report size of ExpiringCache in cache size metrics. ([\#611](https://github.com/matrix-org/synapse/issues/611))
+-   Fix rejection of invites to empty rooms. ([\#615](https://github.com/matrix-org/synapse/issues/615))
+-   Fix usage of `bcrypt` to not use `checkpw`. ([\#619](https://github.com/matrix-org/synapse/issues/619))
+-   Pin `pysaml2` dependency. ([\#634](https://github.com/matrix-org/synapse/issues/634))
+-   Fix bug in `/sync` where timeline order was incorrect for backfilled events. ([\#635](https://github.com/matrix-org/synapse/issues/635))
 
 Changes in synapse v0.13.3 (2016-02-11)
 =======================================
@@ -2814,7 +2814,7 @@ Changes in synapse v0.13.3 (2016-02-11)
 Changes in synapse v0.13.2 (2016-02-11)
 =======================================
 
--   Fix bug where `/events` would fail to skip some events if there had been more events than the limit specified since the last request (PR #570)
+-   Fix bug where `/events` would fail to skip some events if there had been more events than the limit specified since the last request. ([\#570](https://github.com/matrix-org/synapse/issues/570))
 
 Changes in synapse v0.13.1 (2016-02-10)
 =======================================
@@ -2828,176 +2828,176 @@ This version includes an upgrade of the schema, specifically adding an index to
 
 Changes:
 
--   Improve general performance (PR #540, #543. \#544, #54, #549, #567)
--   Change guest user ids to be incrementing integers (PR #550)
--   Improve performance of public room list API (PR #552)
--   Change profile API to omit keys rather than return null (PR #557)
--   Add `/media/r0` endpoint prefix, which is equivalent to `/media/v1/` (PR #595)
+-   Improve general performance. ([\#540](https://github.com/matrix-org/synapse/issues/540), [\#543](https://github.com/matrix-org/synapse/issues/543). [\#544](https://github.com/matrix-org/synapse/issues/544), [\#54](https://github.com/matrix-org/synapse/issues/54), [\#549](https://github.com/matrix-org/synapse/issues/549), [\#567](https://github.com/matrix-org/synapse/issues/567))
+-   Change guest user ids to be incrementing integers. ([\#550](https://github.com/matrix-org/synapse/issues/550))
+-   Improve performance of public room list API. ([\#552](https://github.com/matrix-org/synapse/issues/552))
+-   Change profile API to omit keys rather than return null. ([\#557](https://github.com/matrix-org/synapse/issues/557))
+-   Add `/media/r0` endpoint prefix, which is equivalent to `/media/v1/`. ([\#595](https://github.com/matrix-org/synapse/issues/595))
 
 Bug fixes:
 
--   Fix bug with upgrading guest accounts where it would fail if you opened the registration email on a different device (PR #547)
--   Fix bug where unread count could be wrong (PR #568)
+-   Fix bug with upgrading guest accounts where it would fail if you opened the registration email on a different device. ([\#547](https://github.com/matrix-org/synapse/issues/547))
+-   Fix bug where unread count could be wrong. ([\#568](https://github.com/matrix-org/synapse/issues/568))
 
 Changes in synapse v0.12.1-rc1 (2016-01-29)
 ===========================================
 
 Features:
 
--   Add unread notification counts in `/sync` (PR #456)
--   Add support for inviting 3pids in `/createRoom` (PR #460)
--   Add ability for guest accounts to upgrade (PR #462)
--   Add `/versions` API (PR #468)
--   Add `event` to `/context` API (PR #492)
--   Add specific error code for invalid user names in `/register` (PR #499)
--   Add support for push badge counts (PR #507)
--   Add support for non-guest users to peek in rooms using `/events` (PR #510)
+-   Add unread notification counts in `/sync`. ([\#456](https://github.com/matrix-org/synapse/issues/456))
+-   Add support for inviting 3pids in `/createRoom`. ([\#460](https://github.com/matrix-org/synapse/issues/460))
+-   Add ability for guest accounts to upgrade. ([\#462](https://github.com/matrix-org/synapse/issues/462))
+-   Add `/versions` API. ([\#468](https://github.com/matrix-org/synapse/issues/468))
+-   Add `event` to `/context` API. ([\#492](https://github.com/matrix-org/synapse/issues/492))
+-   Add specific error code for invalid user names in `/register`. ([\#499](https://github.com/matrix-org/synapse/issues/499))
+-   Add support for push badge counts. ([\#507](https://github.com/matrix-org/synapse/issues/507))
+-   Add support for non-guest users to peek in rooms using `/events`. ([\#510](https://github.com/matrix-org/synapse/issues/510))
 
 Changes:
 
--   Change `/sync` so that guest users only get rooms they've joined (PR #469)
--   Change to require unbanning before other membership changes (PR #501)
--   Change default push rules to notify for all messages (PR #486)
--   Change default push rules to not notify on membership changes (PR #514)
--   Change default push rules in one to one rooms to only notify for events that are messages (PR #529)
--   Change `/sync` to reject requests with a `from` query param (PR #512)
--   Change server manhole to use SSH rather than telnet (PR #473)
--   Change server to require AS users to be registered before use (PR #487)
--   Change server not to start when ASes are invalidly configured (PR #494)
--   Change server to require ID and `as_token` to be unique for AS's (PR #496)
--   Change maximum pagination limit to 1000 (PR #497)
+-   Change `/sync` so that guest users only get rooms they've joined. ([\#469](https://github.com/matrix-org/synapse/issues/469))
+-   Change to require unbanning before other membership changes. ([\#501](https://github.com/matrix-org/synapse/issues/501))
+-   Change default push rules to notify for all messages. ([\#486](https://github.com/matrix-org/synapse/issues/486))
+-   Change default push rules to not notify on membership changes. ([\#514](https://github.com/matrix-org/synapse/issues/514))
+-   Change default push rules in one to one rooms to only notify for events that are messages. ([\#529](https://github.com/matrix-org/synapse/issues/529))
+-   Change `/sync` to reject requests with a `from` query param. ([\#512](https://github.com/matrix-org/synapse/issues/512))
+-   Change server manhole to use SSH rather than telnet. ([\#473](https://github.com/matrix-org/synapse/issues/473))
+-   Change server to require AS users to be registered before use. ([\#487](https://github.com/matrix-org/synapse/issues/487))
+-   Change server not to start when ASes are invalidly configured. ([\#494](https://github.com/matrix-org/synapse/issues/494))
+-   Change server to require ID and `as_token` to be unique for AS's. ([\#496](https://github.com/matrix-org/synapse/issues/496))
+-   Change maximum pagination limit to 1000. ([\#497](https://github.com/matrix-org/synapse/issues/497))
 
 Bug fixes:
 
--   Fix bug where `/sync` didn't return when something under the leave key changed (PR #461)
--   Fix bug where we returned smaller rather than larger than requested thumbnails when `method=crop` (PR #464)
--   Fix thumbnails API to only return cropped thumbnails when asking for a cropped thumbnail (PR #475)
--   Fix bug where we occasionally still logged access tokens (PR #477)
--   Fix bug where `/events` would always return immediately for guest users (PR #480)
--   Fix bug where `/sync` unexpectedly returned old left rooms (PR #481)
--   Fix enabling and disabling push rules (PR #498)
--   Fix bug where `/register` returned 500 when given unicode username (PR #513)
+-   Fix bug where `/sync` didn't return when something under the leave key changed. ([\#461](https://github.com/matrix-org/synapse/issues/461))
+-   Fix bug where we returned smaller rather than larger than requested thumbnails when `method=crop`. ([\#464](https://github.com/matrix-org/synapse/issues/464))
+-   Fix thumbnails API to only return cropped thumbnails when asking for a cropped thumbnail. ([\#475](https://github.com/matrix-org/synapse/issues/475))
+-   Fix bug where we occasionally still logged access tokens. ([\#477](https://github.com/matrix-org/synapse/issues/477))
+-   Fix bug where `/events` would always return immediately for guest users. ([\#480](https://github.com/matrix-org/synapse/issues/480))
+-   Fix bug where `/sync` unexpectedly returned old left rooms. ([\#481](https://github.com/matrix-org/synapse/issues/481))
+-   Fix enabling and disabling push rules. ([\#498](https://github.com/matrix-org/synapse/issues/498))
+-   Fix bug where `/register` returned 500 when given unicode username. ([\#513](https://github.com/matrix-org/synapse/issues/513))
 
 Changes in synapse v0.12.0 (2016-01-04)
 =======================================
 
--   Expose `/login` under `r0` (PR #459)
+-   Expose `/login` under `r0`. ([\#459](https://github.com/matrix-org/synapse/issues/459))
 
 Changes in synapse v0.12.0-rc3 (2015-12-23)
 ===========================================
 
--   Allow guest accounts access to `/sync` (PR #455)
--   Allow filters to include/exclude rooms at the room level rather than just from the components of the sync for each room. (PR #454)
--   Include urls for room avatars in the response to `/publicRooms` (PR #453)
--   Don't set a identicon as the avatar for a user when they register (PR #450)
--   Add a `display_name` to third-party invites (PR #449)
--   Send more information to the identity server for third-party invites so that it can send richer messages to the invitee (PR #446)
--   Cache the responses to `/initialSync` for 5 minutes. If a client retries a request to `/initialSync` before the a response was computed to the first request then the same response is used for both requests (PR #457)
--   Fix a bug where synapse would always request the signing keys of remote servers even when the key was cached locally (PR #452)
--   Fix 500 when pagination search results (PR #447)
--   Fix a bug where synapse was leaking raw email address in third-party invites (PR #448)
+-   Allow guest accounts access to `/sync`. ([\#455](https://github.com/matrix-org/synapse/issues/455))
+-   Allow filters to include/exclude rooms at the room level rather than just from the components of the sync for each room. ([\#454](https://github.com/matrix-org/synapse/issues/454))
+-   Include urls for room avatars in the response to `/publicRooms`. ([\#453](https://github.com/matrix-org/synapse/issues/453))
+-   Don't set a identicon as the avatar for a user when they register. ([\#450](https://github.com/matrix-org/synapse/issues/450))
+-   Add a `display_name` to third-party invites. ([\#449](https://github.com/matrix-org/synapse/issues/449))
+-   Send more information to the identity server for third-party invites so that it can send richer messages to the invitee. ([\#446](https://github.com/matrix-org/synapse/issues/446))
+-   Cache the responses to `/initialSync` for 5 minutes. If a client retries a request to `/initialSync` before the a response was computed to the first request then the same response is used for both requests. ([\#457](https://github.com/matrix-org/synapse/issues/457))
+-   Fix a bug where synapse would always request the signing keys of remote servers even when the key was cached locally. ([\#452](https://github.com/matrix-org/synapse/issues/452))
+-   Fix 500 when pagination search results. ([\#447](https://github.com/matrix-org/synapse/issues/447))
+-   Fix a bug where synapse was leaking raw email address in third-party invites. ([\#448](https://github.com/matrix-org/synapse/issues/448))
 
 Changes in synapse v0.12.0-rc2 (2015-12-14)
 ===========================================
 
--   Add caches for whether rooms have been forgotten by a user (PR #434)
--   Remove instructions to use `--process-dependency-link` since all of the dependencies of synapse are on PyPI (PR #436)
--   Parallelise the processing of `/sync` requests (PR #437)
--   Fix race updating presence in `/events` (PR #444)
--   Fix bug back-populating search results (PR #441)
--   Fix bug calculating state in `/sync` requests (PR #442)
+-   Add caches for whether rooms have been forgotten by a user. ([\#434](https://github.com/matrix-org/synapse/issues/434))
+-   Remove instructions to use `--process-dependency-link` since all of the dependencies of synapse are on PyPI. ([\#436](https://github.com/matrix-org/synapse/issues/436))
+-   Parallelise the processing of `/sync` requests. ([\#437](https://github.com/matrix-org/synapse/issues/437))
+-   Fix race updating presence in `/events`. ([\#444](https://github.com/matrix-org/synapse/issues/444))
+-   Fix bug back-populating search results. ([\#441](https://github.com/matrix-org/synapse/issues/441))
+-   Fix bug calculating state in `/sync` requests. ([\#442](https://github.com/matrix-org/synapse/issues/442))
 
 Changes in synapse v0.12.0-rc1 (2015-12-10)
 ===========================================
 
--   Host the client APIs released as r0 by <https://matrix.org/docs/spec/r0.0.0/client_server.html> on paths prefixed by `/_matrix/client/r0`. (PR #430, PR #415, PR #400)
+-   Host the client APIs released as r0 by <https://matrix.org/docs/spec/r0.0.0/client_server.html> on paths prefixed by `/_matrix/client/r0`. ([\#430](https://github.com/matrix-org/synapse/issues/430), [\#415](https://github.com/matrix-org/synapse/issues/415), [\#400](https://github.com/matrix-org/synapse/issues/400))
 -   Updates the client APIs to match r0 of the matrix specification.
-    -   All APIs return events in the new event format, old APIs also include the fields needed to parse the event using the old format for compatibility. (PR #402)
-    -   Search results are now given as a JSON array rather than a JSON object (PR #405)
-    -   Miscellaneous changes to search (PR #403, PR #406, PR #412)
-    -   Filter JSON objects may now be passed as query parameters to `/sync` (PR #431)
-    -   Fix implementation of `/admin/whois` (PR #418)
-    -   Only include the rooms that user has left in `/sync` if the client requests them in the filter (PR #423)
-    -   Don't push for `m.room.message` by default (PR #411)
-    -   Add API for setting per account user data (PR #392)
-    -   Allow users to forget rooms (PR #385)
+    -   All APIs return events in the new event format, old APIs also include the fields needed to parse the event using the old format for compatibility. ([\#402](https://github.com/matrix-org/synapse/issues/402))
+    -   Search results are now given as a JSON array rather than a JSON object. ([\#405](https://github.com/matrix-org/synapse/issues/405))
+    -   Miscellaneous changes to search. ([\#403](https://github.com/matrix-org/synapse/issues/403), [\#406](https://github.com/matrix-org/synapse/issues/406), [\#412](https://github.com/matrix-org/synapse/issues/412))
+    -   Filter JSON objects may now be passed as query parameters to `/sync`. ([\#431](https://github.com/matrix-org/synapse/issues/431))
+    -   Fix implementation of `/admin/whois`. ([\#418](https://github.com/matrix-org/synapse/issues/418))
+    -   Only include the rooms that user has left in `/sync` if the client requests them in the filter. ([\#423](https://github.com/matrix-org/synapse/issues/423))
+    -   Don't push for `m.room.message` by default. ([\#411](https://github.com/matrix-org/synapse/issues/411))
+    -   Add API for setting per account user data. ([\#392](https://github.com/matrix-org/synapse/issues/392))
+    -   Allow users to forget rooms. ([\#385](https://github.com/matrix-org/synapse/issues/385))
 -   Performance improvements and monitoring:
-    -   Add per-request counters for CPU time spent on the main python thread. (PR #421, PR #420)
-    -   Add per-request counters for time spent in the database (PR #429)
-    -   Make state updates in the C+S API idempotent (PR #416)
-    -   Only fire `user_joined_room` if the user has actually joined. (PR #410)
-    -   Reuse a single http client, rather than creating new ones (PR #413)
--   Fixed a bug upgrading from older versions of synapse on postgresql (PR #417)
+    -   Add per-request counters for CPU time spent on the main python thread. ([\#421](https://github.com/matrix-org/synapse/issues/421), [\#420](https://github.com/matrix-org/synapse/issues/420))
+    -   Add per-request counters for time spent in the database. ([\#429](https://github.com/matrix-org/synapse/issues/429))
+    -   Make state updates in the C+S API idempotent. ([\#416](https://github.com/matrix-org/synapse/issues/416))
+    -   Only fire `user_joined_room` if the user has actually joined. ([\#410](https://github.com/matrix-org/synapse/issues/410))
+    -   Reuse a single http client, rather than creating new ones. ([\#413](https://github.com/matrix-org/synapse/issues/413))
+-   Fixed a bug upgrading from older versions of synapse on postgresql. ([\#417](https://github.com/matrix-org/synapse/issues/417))
 
 Changes in synapse v0.11.1 (2015-11-20)
 =======================================
 
--   Add extra options to search API (PR #394)
--   Fix bug where we did not correctly cap federation retry timers. This meant it could take several hours for servers to start talking to resurrected servers, even when they were receiving traffic from them (PR #393)
--   Don't advertise login token flow unless CAS is enabled. This caused issues where some clients would always use the fallback API if they did not recognize all login flows (PR #391)
--   Change /v2 sync API to rename `private_user_data` to `account_data` (PR #386)
--   Change /v2 sync API to remove the `event_map` and rename keys in `rooms` object (PR #389)
+-   Add extra options to search API. ([\#394](https://github.com/matrix-org/synapse/issues/394))
+-   Fix bug where we did not correctly cap federation retry timers. This meant it could take several hours for servers to start talking to resurrected servers, even when they were receiving traffic from them. ([\#393](https://github.com/matrix-org/synapse/issues/393))
+-   Don't advertise login token flow unless CAS is enabled. This caused issues where some clients would always use the fallback API if they did not recognize all login flows. ([\#391](https://github.com/matrix-org/synapse/issues/391))
+-   Change /v2 sync API to rename `private_user_data` to `account_data`. ([\#386](https://github.com/matrix-org/synapse/issues/386))
+-   Change /v2 sync API to remove the `event_map` and rename keys in `rooms` object. ([\#389](https://github.com/matrix-org/synapse/issues/389))
 
 Changes in synapse v0.11.0-r2 (2015-11-19)
 ==========================================
 
--   Fix bug in database port script (PR #387)
+-   Fix bug in database port script. ([\#387](https://github.com/matrix-org/synapse/issues/387))
 
 Changes in synapse v0.11.0-r1 (2015-11-18)
 ==========================================
 
--   Retry and fail federation requests more aggressively for requests that block client side requests (PR #384)
+-   Retry and fail federation requests more aggressively for requests that block client side requests. ([\#384](https://github.com/matrix-org/synapse/issues/384))
 
 Changes in synapse v0.11.0 (2015-11-17)
 =======================================
 
--   Change CAS login API (PR #349)
+-   Change CAS login API. ([\#349](https://github.com/matrix-org/synapse/issues/349))
 
 Changes in synapse v0.11.0-rc2 (2015-11-13)
 ===========================================
 
--   Various changes to /sync API response format (PR #373)
--   Fix regression when setting display name in newly joined room over federation (PR #368)
--   Fix problem where /search was slow when using SQLite (PR #366)
+-   Various changes to /sync API response format. ([\#373](https://github.com/matrix-org/synapse/issues/373))
+-   Fix regression when setting display name in newly joined room over federation. ([\#368](https://github.com/matrix-org/synapse/issues/368))
+-   Fix problem where /search was slow when using SQLite. ([\#366](https://github.com/matrix-org/synapse/issues/366))
 
 Changes in synapse v0.11.0-rc1 (2015-11-11)
 ===========================================
 
--   Add Search API (PR #307, #324, #327, #336, #350, #359)
--   Add `archived` state to v2 /sync API (PR #316)
--   Add ability to reject invites (PR #317)
--   Add config option to disable password login (PR #322)
--   Add the login fallback API (PR #330)
--   Add room context API (PR #334)
--   Add room tagging support (PR #335)
--   Update v2 /sync API to match spec (PR #305, #316, #321, #332, #337, #341)
--   Change retry schedule for application services (PR #320)
--   Change retry schedule for remote servers (PR #340)
--   Fix bug where we hosted static content in the incorrect place (PR #329)
--   Fix bug where we didn't increment retry interval for remote servers (PR #343)
+-   Add Search API. ([\#307](https://github.com/matrix-org/synapse/issues/307), [\#324](https://github.com/matrix-org/synapse/issues/324), [\#327](https://github.com/matrix-org/synapse/issues/327), [\#336](https://github.com/matrix-org/synapse/issues/336), [\#350](https://github.com/matrix-org/synapse/issues/350), [\#359](https://github.com/matrix-org/synapse/issues/359))
+-   Add `archived` state to v2 /sync API. ([\#316](https://github.com/matrix-org/synapse/issues/316))
+-   Add ability to reject invites. ([\#317](https://github.com/matrix-org/synapse/issues/317))
+-   Add config option to disable password login. ([\#322](https://github.com/matrix-org/synapse/issues/322))
+-   Add the login fallback API. ([\#330](https://github.com/matrix-org/synapse/issues/330))
+-   Add room context API. ([\#334](https://github.com/matrix-org/synapse/issues/334))
+-   Add room tagging support. ([\#335](https://github.com/matrix-org/synapse/issues/335))
+-   Update v2 /sync API to match spec. ([\#305](https://github.com/matrix-org/synapse/issues/305), [\#316](https://github.com/matrix-org/synapse/issues/316), [\#321](https://github.com/matrix-org/synapse/issues/321), [\#332](https://github.com/matrix-org/synapse/issues/332), [\#337](https://github.com/matrix-org/synapse/issues/337), [\#341](https://github.com/matrix-org/synapse/issues/341))
+-   Change retry schedule for application services. ([\#320](https://github.com/matrix-org/synapse/issues/320))
+-   Change retry schedule for remote servers. ([\#340](https://github.com/matrix-org/synapse/issues/340))
+-   Fix bug where we hosted static content in the incorrect place. ([\#329](https://github.com/matrix-org/synapse/issues/329))
+-   Fix bug where we didn't increment retry interval for remote servers. ([\#343](https://github.com/matrix-org/synapse/issues/343))
 
 Changes in synapse v0.10.1-rc1 (2015-10-15)
 ===========================================
 
--   Add support for CAS, thanks to Steven Hammerton (PR #295, #296)
--   Add support for using macaroons for `access_token` (PR #256, #229)
--   Add support for `m.room.canonical_alias` (PR #287)
--   Add support for viewing the history of rooms that they have left. (PR #276, #294)
--   Add support for refresh tokens (PR #240)
--   Add flag on creation which disables federation of the room (PR #279)
--   Add some room state to invites. (PR #275)
--   Atomically persist events when joining a room over federation (PR #283)
--   Change default history visibility for private rooms (PR #271)
--   Allow users to redact their own sent events (PR #262)
--   Use tox for tests (PR #247)
--   Split up syutil into separate libraries (PR #243)
+-   Add support for CAS, thanks to Steven Hammerton. ([\#295](https://github.com/matrix-org/synapse/issues/295), [\#296](https://github.com/matrix-org/synapse/issues/296))
+-   Add support for using macaroons for `access_token`. ([\#256](https://github.com/matrix-org/synapse/issues/256), [\#229](https://github.com/matrix-org/synapse/issues/229))
+-   Add support for `m.room.canonical_alias`. ([\#287](https://github.com/matrix-org/synapse/issues/287))
+-   Add support for viewing the history of rooms that they have left. ([\#276](https://github.com/matrix-org/synapse/issues/276), [\#294](https://github.com/matrix-org/synapse/issues/294))
+-   Add support for refresh tokens. ([\#240](https://github.com/matrix-org/synapse/issues/240))
+-   Add flag on creation which disables federation of the room. ([\#279](https://github.com/matrix-org/synapse/issues/279))
+-   Add some room state to invites. ([\#275](https://github.com/matrix-org/synapse/issues/275))
+-   Atomically persist events when joining a room over federation. ([\#283](https://github.com/matrix-org/synapse/issues/283))
+-   Change default history visibility for private rooms. ([\#271](https://github.com/matrix-org/synapse/issues/271))
+-   Allow users to redact their own sent events. ([\#262](https://github.com/matrix-org/synapse/issues/262))
+-   Use tox for tests. ([\#247](https://github.com/matrix-org/synapse/issues/247))
+-   Split up syutil into separate libraries. ([\#243](https://github.com/matrix-org/synapse/issues/243))
 
 Changes in synapse v0.10.0-r2 (2015-09-16)
 ==========================================
 
 -   Fix bug where we always fetched remote server signing keys instead of using ones in our cache.
 -   Fix adding threepids to an existing account.
--   Fix bug with invinting over federation where remote server was already in the room. (PR #281, SYN-392)
+-   Fix bug with invinting over federation where remote server was already in the room. ([\#281](https://github.com/matrix-org/synapse/issues/281), SYN-392)
 
 Changes in synapse v0.10.0-r1 (2015-09-08)
 ==========================================
@@ -3023,20 +3023,20 @@ Changes in synapse v0.10.0-rc5 (2015-08-27)
 Changes in synapse v0.10.0-rc4 (2015-08-27)
 ===========================================
 
--   Allow UTF-8 filenames for upload. (PR #259)
+-   Allow UTF-8 filenames for upload. ([\#259](https://github.com/matrix-org/synapse/issues/259))
 
 Changes in synapse v0.10.0-rc3 (2015-08-25)
 ===========================================
 
--   Add `--keys-directory` config option to specify where files such as certs and signing keys should be stored in, when using `--generate-config` or `--generate-keys`. (PR #250)
--   Allow `--config-path` to specify a directory, causing synapse to use all `*.yaml` files in the directory as config files. (PR #249)
--   Add `web_client_location` config option to specify static files to be hosted by synapse under `/_matrix/client`. (PR #245)
+-   Add `--keys-directory` config option to specify where files such as certs and signing keys should be stored in, when using `--generate-config` or `--generate-keys`. ([\#250](https://github.com/matrix-org/synapse/issues/250))
+-   Allow `--config-path` to specify a directory, causing synapse to use all `*.yaml` files in the directory as config files. ([\#249](https://github.com/matrix-org/synapse/issues/249))
+-   Add `web_client_location` config option to specify static files to be hosted by synapse under `/_matrix/client`. ([\#245](https://github.com/matrix-org/synapse/issues/245))
 -   Add helper utility to synapse to read and parse the config files and extract the value of a given key. For example:
 
         $ python -m synapse.config read server_name -c homeserver.yaml
         localhost
 
-    (PR #246)
+   . ([\#246](https://github.com/matrix-org/synapse/issues/246))
 
 Changes in synapse v0.10.0-rc2 (2015-08-24)
 ===========================================
@@ -3051,37 +3051,37 @@ Also see v0.9.4-rc1 changelog, which has been amalgamated into this release.
 
 General:
 
--   Upgrade to Twisted 15 (PR #173)
--   Add support for serving and fetching encryption keys over federation. (PR #208)
--   Add support for logging in with email address (PR #234)
--   Add support for new `m.room.canonical_alias` event. (PR #233)
+-   Upgrade to Twisted 15. ([\#173](https://github.com/matrix-org/synapse/issues/173))
+-   Add support for serving and fetching encryption keys over federation. ([\#208](https://github.com/matrix-org/synapse/issues/208))
+-   Add support for logging in with email address. ([\#234](https://github.com/matrix-org/synapse/issues/234))
+-   Add support for new `m.room.canonical_alias` event. ([\#233](https://github.com/matrix-org/synapse/issues/233))
 -   Change synapse to treat user IDs case insensitively during registration and login. (If two users already exist with case insensitive matching user ids, synapse will continue to require them to specify their user ids exactly.)
--   Error if a user tries to register with an email already in use. (PR #211)
--   Add extra and improve existing caches (PR #212, #219, #226, #228)
--   Batch various storage request (PR #226, #228)
--   Fix bug where we didn't correctly log the entity that triggered the request if the request came in via an application service (PR #230)
--   Fix bug where we needlessly regenerated the full list of rooms an AS is interested in. (PR #232)
--   Add support for AS's to use `v2_alpha` registration API (PR #210)
+-   Error if a user tries to register with an email already in use. ([\#211](https://github.com/matrix-org/synapse/issues/211))
+-   Add extra and improve existing caches. ([\#212](https://github.com/matrix-org/synapse/issues/212), [\#219](https://github.com/matrix-org/synapse/issues/219), [\#226](https://github.com/matrix-org/synapse/issues/226), [\#228](https://github.com/matrix-org/synapse/issues/228))
+-   Batch various storage request. ([\#226](https://github.com/matrix-org/synapse/issues/226), [\#228](https://github.com/matrix-org/synapse/issues/228))
+-   Fix bug where we didn't correctly log the entity that triggered the request if the request came in via an application service. ([\#230](https://github.com/matrix-org/synapse/issues/230))
+-   Fix bug where we needlessly regenerated the full list of rooms an AS is interested in. ([\#232](https://github.com/matrix-org/synapse/issues/232))
+-   Add support for AS's to use `v2_alpha` registration API. ([\#210](https://github.com/matrix-org/synapse/issues/210))
 
 Configuration:
 
--   Add `--generate-keys` that will generate any missing cert and key files in the configuration files. This is equivalent to running `--generate-config` on an existing configuration file. (PR #220)
--   `--generate-config` now no longer requires a `--server-name` parameter when used on existing configuration files. (PR #220)
--   Add `--print-pidfile` flag that controls the printing of the pid to stdout of the demonised process. (PR #213)
+-   Add `--generate-keys` that will generate any missing cert and key files in the configuration files. This is equivalent to running `--generate-config` on an existing configuration file. ([\#220](https://github.com/matrix-org/synapse/issues/220))
+-   `--generate-config` now no longer requires a `--server-name` parameter when used on existing configuration files. ([\#220](https://github.com/matrix-org/synapse/issues/220))
+-   Add `--print-pidfile` flag that controls the printing of the pid to stdout of the demonised process. ([\#213](https://github.com/matrix-org/synapse/issues/213))
 
 Media Repository:
 
--   Fix bug where we picked a lower resolution image than requested. (PR #205)
--   Add support for specifying if a the media repository should dynamically thumbnail images or not. (PR #206)
+-   Fix bug where we picked a lower resolution image than requested. ([\#205](https://github.com/matrix-org/synapse/issues/205))
+-   Add support for specifying if a the media repository should dynamically thumbnail images or not. ([\#206](https://github.com/matrix-org/synapse/issues/206))
 
 Metrics:
 
--   Add statistics from the reactor to the metrics API. (PR #224, #225)
+-   Add statistics from the reactor to the metrics API. ([\#224](https://github.com/matrix-org/synapse/issues/224), [\#225](https://github.com/matrix-org/synapse/issues/225))
 
 Demo Homeservers:
 
--   Fix starting the demo homeservers without rate-limiting enabled. (PR #182)
--   Fix enabling registration on demo homeservers (PR #223)
+-   Fix starting the demo homeservers without rate-limiting enabled. ([\#182](https://github.com/matrix-org/synapse/issues/182))
+-   Fix enabling registration on demo homeservers. ([\#223](https://github.com/matrix-org/synapse/issues/223))
 
 Changes in synapse v0.9.4-rc1 (2015-07-21)
 ==========================================
@@ -3089,13 +3089,13 @@ Changes in synapse v0.9.4-rc1 (2015-07-21)
 General:
 
 -   Add basic implementation of receipts. (SPEC-99)
--   Add support for configuration presets in room creation API. (PR #203)
+-   Add support for configuration presets in room creation API. ([\#203](https://github.com/matrix-org/synapse/issues/203))
 -   Add auth event that limits the visibility of history for new users. (SPEC-134)
--   Add SAML2 login/registration support. (PR #201. Thanks Muthu Subramanian!)
--   Add client side key management APIs for end to end encryption. (PR #198)
+-   Add SAML2 login/registration support. Thanks Muthu Subramanian! ([\#201](https://github.com/matrix-org/synapse/issues/201))
+-   Add client side key management APIs for end to end encryption. ([\#198](https://github.com/matrix-org/synapse/issues/198))
 -   Change power level semantics so that you cannot kick, ban or change power levels of users that have equal or greater power level than you. (SYN-192)
--   Improve performance by bulk inserting events where possible. (PR #193)
--   Improve performance by bulk verifying signatures where possible. (PR #194)
+-   Improve performance by bulk inserting events where possible. ([\#193](https://github.com/matrix-org/synapse/issues/193))
+-   Improve performance by bulk verifying signatures where possible. ([\#194](https://github.com/matrix-org/synapse/issues/194))
 
 Configuration:
 
diff --git a/docs/postgres.md b/docs/postgres.md
index 02d4b9b162..ad7c6a0738 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -66,7 +66,7 @@ database:
   args:
     user: <user>
     password: <pass>
-    database: <db>
+    dbname: <db>
     host: <host>
     cp_min: 5
     cp_max: 10
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index a1ca5fa98c..7c4e742cd5 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -1447,7 +1447,7 @@ database:
   args:
     user: synapse_user
     password: secretpassword
-    database: synapse
+    dbname: synapse
     host: localhost
     port: 5432
     cp_min: 5
@@ -1526,7 +1526,7 @@ databases:
     args:
       user: synapse_user
       password: secretpassword
-      database: synapse_main
+      dbname: synapse_main
       host: localhost
       port: 5432
       cp_min: 5
@@ -1539,7 +1539,7 @@ databases:
     args:
       user: synapse_user
       password: secretpassword
-      database: synapse_state
+      dbname: synapse_state
       host: localhost
       port: 5432
       cp_min: 5
@@ -1753,6 +1753,19 @@ rc_third_party_invite:
   burst_count: 10
 ```
 ---
+### `rc_media_create`
+
+This option ratelimits creation of MXC URIs via the `/_matrix/media/v1/create`
+endpoint based on the account that's creating the media. Defaults to
+`per_second: 10`, `burst_count: 50`.
+
+Example configuration:
+```yaml
+rc_media_create:
+  per_second: 10
+  burst_count: 50
+```
+---
 ### `rc_federation`
 
 Defines limits on federation requests.
@@ -1814,6 +1827,27 @@ Example configuration:
 media_store_path: "DATADIR/media_store"
 ```
 ---
+### `max_pending_media_uploads`
+
+How many *pending media uploads* can a given user have? A pending media upload
+is a created MXC URI that (a) is not expired (the `unused_expires_at` timestamp
+has not passed) and (b) the media has not yet been uploaded for. Defaults to 5.
+
+Example configuration:
+```yaml
+max_pending_media_uploads: 5
+```
+---
+### `unused_expiration_time`
+
+How long to wait in milliseconds before expiring created media IDs. Defaults to
+"24h"
+
+Example configuration:
+```yaml
+unused_expiration_time: "1h"
+```
+---
 ### `media_storage_providers`
 
 Media storage providers allow media to be stored in different
@@ -4219,6 +4253,9 @@ outbound_federation_restricted_to:
 Also see the [worker
 documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers)
 for more info.
+
+_Added in Synapse 1.89.0._
+
 ---
 ### `run_background_tasks_on`
 
diff --git a/mypy.ini b/mypy.ini
index fdfe9432fc..1a2b9ea410 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -37,8 +37,8 @@ files =
   build_rust.py
 
 [mypy-synapse.metrics._reactor_metrics]
-# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
-# See https://github.com/matrix-org/synapse/pull/11771.
+# This module  pokes at the internals of OS-specific classes, to appease mypy
+# on different systems we add additional ignores.
 warn_unused_ignores = False
 
 [mypy-synapse.util.caches.treecache]
diff --git a/poetry.lock b/poetry.lock
index 00f5b4a20a..32b12d8076 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -417,19 +417,6 @@ files = [
 colorama = {version = "*", markers = "platform_system == \"Windows\""}
 
 [[package]]
-name = "click-default-group"
-version = "1.2.2"
-description = "Extends click.Group to invoke a command without explicit subcommand name"
-optional = false
-python-versions = "*"
-files = [
-    {file = "click-default-group-1.2.2.tar.gz", hash = "sha256:d9560e8e8dfa44b3562fbc9425042a0fd6d21956fcc2db0077f63f34253ab904"},
-]
-
-[package.dependencies]
-click = "*"
-
-[[package]]
 name = "colorama"
 version = "0.4.6"
 description = "Cross-platform colored terminal text."
@@ -1742,13 +1729,13 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes
 
 [[package]]
 name = "prometheus-client"
-version = "0.17.1"
+version = "0.18.0"
 description = "Python client for the Prometheus monitoring system."
 optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.8"
 files = [
-    {file = "prometheus_client-0.17.1-py3-none-any.whl", hash = "sha256:e537f37160f6807b8202a6fc4764cdd19bac5480ddd3e0d463c3002b34462101"},
-    {file = "prometheus_client-0.17.1.tar.gz", hash = "sha256:21e674f39831ae3f8acde238afd9a27a37d0d2fb5a28ea094f0ce25d2cbf2091"},
+    {file = "prometheus_client-0.18.0-py3-none-any.whl", hash = "sha256:8de3ae2755f890826f4b6479e5571d4f74ac17a81345fe69a6778fdb92579184"},
+    {file = "prometheus_client-0.18.0.tar.gz", hash = "sha256:35f7a8c22139e2bb7ca5a698e92d38145bc8dc74c1c0bf56f25cca886a764e17"},
 ]
 
 [package.extras]
@@ -2012,12 +1999,12 @@ plugins = ["importlib-metadata"]
 
 [[package]]
 name = "pyicu"
-version = "2.11"
+version = "2.12"
 description = "Python extension wrapping the ICU C++ API"
 optional = true
 python-versions = "*"
 files = [
-    {file = "PyICU-2.11.tar.gz", hash = "sha256:3ab531264cfe9132b3d2ac5d708da9a4649d25f6e6813730ac88cf040a08a844"},
+    {file = "PyICU-2.12.tar.gz", hash = "sha256:bd7ab5efa93ad692e6daa29cd249364e521218329221726a113ca3cb281c8611"},
 ]
 
 [[package]]
@@ -2439,28 +2426,28 @@ files = [
 
 [[package]]
 name = "ruff"
-version = "0.0.292"
-description = "An extremely fast Python linter, written in Rust."
+version = "0.1.4"
+description = "An extremely fast Python linter and code formatter, written in Rust."
 optional = false
 python-versions = ">=3.7"
 files = [
-    {file = "ruff-0.0.292-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:02f29db018c9d474270c704e6c6b13b18ed0ecac82761e4fcf0faa3728430c96"},
-    {file = "ruff-0.0.292-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:69654e564342f507edfa09ee6897883ca76e331d4bbc3676d8a8403838e9fade"},
-    {file = "ruff-0.0.292-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c3c91859a9b845c33778f11902e7b26440d64b9d5110edd4e4fa1726c41e0a4"},
-    {file = "ruff-0.0.292-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f4476f1243af2d8c29da5f235c13dca52177117935e1f9393f9d90f9833f69e4"},
-    {file = "ruff-0.0.292-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be8eb50eaf8648070b8e58ece8e69c9322d34afe367eec4210fdee9a555e4ca7"},
-    {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:9889bac18a0c07018aac75ef6c1e6511d8411724d67cb879103b01758e110a81"},
-    {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bdfabd4334684a4418b99b3118793f2c13bb67bf1540a769d7816410402a205"},
-    {file = "ruff-0.0.292-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7c77c53bfcd75dbcd4d1f42d6cabf2485d2e1ee0678da850f08e1ab13081a8"},
-    {file = "ruff-0.0.292-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e087b24d0d849c5c81516ec740bf4fd48bf363cfb104545464e0fca749b6af9"},
-    {file = "ruff-0.0.292-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f160b5ec26be32362d0774964e218f3fcf0a7da299f7e220ef45ae9e3e67101a"},
-    {file = "ruff-0.0.292-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ac153eee6dd4444501c4bb92bff866491d4bfb01ce26dd2fff7ca472c8df9ad0"},
-    {file = "ruff-0.0.292-py3-none-musllinux_1_2_i686.whl", hash = "sha256:87616771e72820800b8faea82edd858324b29bb99a920d6aa3d3949dd3f88fb0"},
-    {file = "ruff-0.0.292-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b76deb3bdbea2ef97db286cf953488745dd6424c122d275f05836c53f62d4016"},
-    {file = "ruff-0.0.292-py3-none-win32.whl", hash = "sha256:e854b05408f7a8033a027e4b1c7f9889563dd2aca545d13d06711e5c39c3d003"},
-    {file = "ruff-0.0.292-py3-none-win_amd64.whl", hash = "sha256:f27282bedfd04d4c3492e5c3398360c9d86a295be00eccc63914438b4ac8a83c"},
-    {file = "ruff-0.0.292-py3-none-win_arm64.whl", hash = "sha256:7f67a69c8f12fbc8daf6ae6d36705037bde315abf8b82b6e1f4c9e74eb750f68"},
-    {file = "ruff-0.0.292.tar.gz", hash = "sha256:1093449e37dd1e9b813798f6ad70932b57cf614e5c2b5c51005bf67d55db33ac"},
+    {file = "ruff-0.1.4-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:864958706b669cce31d629902175138ad8a069d99ca53514611521f532d91495"},
+    {file = "ruff-0.1.4-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:9fdd61883bb34317c788af87f4cd75dfee3a73f5ded714b77ba928e418d6e39e"},
+    {file = "ruff-0.1.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4eaca8c9cc39aa7f0f0d7b8fe24ecb51232d1bb620fc4441a61161be4a17539"},
+    {file = "ruff-0.1.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a9a1301dc43cbf633fb603242bccd0aaa34834750a14a4c1817e2e5c8d60de17"},
+    {file = "ruff-0.1.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78e8db8ab6f100f02e28b3d713270c857d370b8d61871d5c7d1702ae411df683"},
+    {file = "ruff-0.1.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:80fea754eaae06335784b8ea053d6eb8e9aac75359ebddd6fee0858e87c8d510"},
+    {file = "ruff-0.1.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bc02a480d4bfffd163a723698da15d1a9aec2fced4c06f2a753f87f4ce6969c"},
+    {file = "ruff-0.1.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9862811b403063765b03e716dac0fda8fdbe78b675cd947ed5873506448acea4"},
+    {file = "ruff-0.1.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58826efb8b3efbb59bb306f4b19640b7e366967a31c049d49311d9eb3a4c60cb"},
+    {file = "ruff-0.1.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:fdfd453fc91d9d86d6aaa33b1bafa69d114cf7421057868f0b79104079d3e66e"},
+    {file = "ruff-0.1.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:e8791482d508bd0b36c76481ad3117987301b86072158bdb69d796503e1c84a8"},
+    {file = "ruff-0.1.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:01206e361021426e3c1b7fba06ddcb20dbc5037d64f6841e5f2b21084dc51800"},
+    {file = "ruff-0.1.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:645591a613a42cb7e5c2b667cbefd3877b21e0252b59272ba7212c3d35a5819f"},
+    {file = "ruff-0.1.4-py3-none-win32.whl", hash = "sha256:99908ca2b3b85bffe7e1414275d004917d1e0dfc99d497ccd2ecd19ad115fd0d"},
+    {file = "ruff-0.1.4-py3-none-win_amd64.whl", hash = "sha256:1dfd6bf8f6ad0a4ac99333f437e0ec168989adc5d837ecd38ddb2cc4a2e3db8a"},
+    {file = "ruff-0.1.4-py3-none-win_arm64.whl", hash = "sha256:d98ae9ebf56444e18a3e3652b3383204748f73e247dea6caaf8b52d37e6b32da"},
+    {file = "ruff-0.1.4.tar.gz", hash = "sha256:21520ecca4cc555162068d87c747b8f95e1e95f8ecfcbbe59e8dd00710586315"},
 ]
 
 [[package]]
@@ -2580,13 +2567,13 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (
 
 [[package]]
 name = "setuptools-rust"
-version = "1.8.0"
+version = "1.8.1"
 description = "Setuptools Rust extension plugin"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "setuptools-rust-1.8.0.tar.gz", hash = "sha256:5e02b7a80058853bf64127314f6b97d0efed11e08b94c88ca639a20976f6adc4"},
-    {file = "setuptools_rust-1.8.0-py3-none-any.whl", hash = "sha256:95ec67edee2ca73233c9e75250e9d23a302aa23b4c8413dfd19c14c30d08f703"},
+    {file = "setuptools-rust-1.8.1.tar.gz", hash = "sha256:94b1dd5d5308b3138d5b933c3a2b55e6d6927d1a22632e509fcea9ddd0f7e486"},
+    {file = "setuptools_rust-1.8.1-py3-none-any.whl", hash = "sha256:b5324493949ccd6aa0c03890c5f6b5f02de4512e3ac1697d02e9a6c02b18aa8e"},
 ]
 
 [package.dependencies]
@@ -2906,18 +2893,17 @@ files = [
 
 [[package]]
 name = "towncrier"
-version = "23.6.0"
+version = "23.11.0"
 description = "Building newsfiles for your project."
 optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
 files = [
-    {file = "towncrier-23.6.0-py3-none-any.whl", hash = "sha256:da552f29192b3c2b04d630133f194c98e9f14f0558669d427708e203fea4d0a5"},
-    {file = "towncrier-23.6.0.tar.gz", hash = "sha256:fc29bd5ab4727c8dacfbe636f7fb5dc53b99805b62da1c96b214836159ff70c1"},
+    {file = "towncrier-23.11.0-py3-none-any.whl", hash = "sha256:2e519ca619426d189e3c98c99558fe8be50c9ced13ea1fc20a4a353a95d2ded7"},
+    {file = "towncrier-23.11.0.tar.gz", hash = "sha256:13937c247e3f8ae20ac44d895cf5f96a60ad46cfdcc1671759530d7837d9ee5d"},
 ]
 
 [package.dependencies]
 click = "*"
-click-default-group = "*"
 importlib-resources = {version = ">=5", markers = "python_version < \"3.10\""}
 incremental = "*"
 jinja2 = "*"
@@ -2928,13 +2914,13 @@ dev = ["furo", "packaging", "sphinx (>=5)", "twisted"]
 
 [[package]]
 name = "treq"
-version = "22.2.0"
+version = "23.11.0"
 description = "High-level Twisted HTTP Client API"
 optional = false
 python-versions = ">=3.6"
 files = [
-    {file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"},
-    {file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"},
+    {file = "treq-23.11.0-py3-none-any.whl", hash = "sha256:f494c2218d61cab2cabbee37cd6606d3eea9d16cf14190323095c95d22c467e9"},
+    {file = "treq-23.11.0.tar.gz", hash = "sha256:0914ff929fd1632ce16797235260f8bc19d20ff7c459c1deabd65b8c68cbeac5"},
 ]
 
 [package.dependencies]
@@ -2942,11 +2928,11 @@ attrs = "*"
 hyperlink = ">=21.0.0"
 incremental = "*"
 requests = ">=2.1.0"
-Twisted = {version = ">=18.7.0", extras = ["tls"]}
+Twisted = {version = ">=22.10.0", extras = ["tls"]}
 
 [package.extras]
-dev = ["httpbin (==0.5.0)", "pep8", "pyflakes"]
-docs = ["sphinx (>=1.4.8)"]
+dev = ["httpbin (==0.7.0)", "pep8", "pyflakes", "werkzeug (==2.0.3)"]
+docs = ["sphinx (<7.0.0)"]
 
 [[package]]
 name = "twine"
@@ -2972,13 +2958,13 @@ urllib3 = ">=1.26.0"
 
 [[package]]
 name = "twisted"
-version = "23.8.0"
+version = "23.10.0"
 description = "An asynchronous networking framework written in Python"
 optional = false
-python-versions = ">=3.7.1"
+python-versions = ">=3.8.0"
 files = [
-    {file = "twisted-23.8.0-py3-none-any.whl", hash = "sha256:b8bdba145de120ffb36c20e6e071cce984e89fba798611ed0704216fb7f884cd"},
-    {file = "twisted-23.8.0.tar.gz", hash = "sha256:3c73360add17336a622c0d811c2a2ce29866b6e59b1125fd6509b17252098a24"},
+    {file = "twisted-23.10.0-py3-none-any.whl", hash = "sha256:4ae8bce12999a35f7fe6443e7f1893e6fe09588c8d2bed9c35cdce8ff2d5b444"},
+    {file = "twisted-23.10.0.tar.gz", hash = "sha256:987847a0790a2c597197613686e2784fd54167df3a55d0fb17c8412305d76ce5"},
 ]
 
 [package.dependencies]
@@ -2991,19 +2977,18 @@ incremental = ">=22.10.0"
 pyopenssl = {version = ">=21.0.0", optional = true, markers = "extra == \"tls\""}
 service-identity = {version = ">=18.1.0", optional = true, markers = "extra == \"tls\""}
 twisted-iocpsupport = {version = ">=1.0.2,<2", markers = "platform_system == \"Windows\""}
-typing-extensions = ">=3.10.0"
+typing-extensions = ">=4.2.0"
 zope-interface = ">=5"
 
 [package.extras]
-all-non-platform = ["twisted[conch,contextvars,http2,serial,test,tls]", "twisted[conch,contextvars,http2,serial,test,tls]"]
+all-non-platform = ["twisted[conch,http2,serial,test,tls]", "twisted[conch,http2,serial,test,tls]"]
 conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"]
-contextvars = ["contextvars (>=2.4,<3)"]
 dev = ["coverage (>=6b1,<7)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "twisted[dev-release]", "twistedchecker (>=0.7,<1.0)"]
-dev-release = ["pydoctor (>=23.4.0,<23.5.0)", "pydoctor (>=23.4.0,<23.5.0)", "readthedocs-sphinx-ext (>=2.2,<3.0)", "readthedocs-sphinx-ext (>=2.2,<3.0)", "sphinx (>=5,<7)", "sphinx (>=5,<7)", "sphinx-rtd-theme (>=1.2,<2.0)", "sphinx-rtd-theme (>=1.2,<2.0)", "towncrier (>=22.12,<23.0)", "towncrier (>=22.12,<23.0)", "urllib3 (<2)", "urllib3 (<2)"]
+dev-release = ["pydoctor (>=23.9.0,<23.10.0)", "pydoctor (>=23.9.0,<23.10.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"]
 gtk-platform = ["pygobject", "pygobject", "twisted[all-non-platform]", "twisted[all-non-platform]"]
 http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
 macos-platform = ["pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "twisted[all-non-platform]", "twisted[all-non-platform]"]
-mypy = ["mypy (==0.981)", "mypy-extensions (==0.4.3)", "mypy-zope (==0.3.11)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"]
+mypy = ["mypy (>=1.5.1,<1.6.0)", "mypy-zope (>=1.0.1,<1.1.0)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"]
 osx-platform = ["twisted[macos-platform]", "twisted[macos-platform]"]
 serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
 test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"]
@@ -3048,13 +3033,13 @@ twisted = "*"
 
 [[package]]
 name = "types-bleach"
-version = "6.1.0.0"
+version = "6.1.0.1"
 description = "Typing stubs for bleach"
 optional = false
 python-versions = ">=3.7"
 files = [
-    {file = "types-bleach-6.1.0.0.tar.gz", hash = "sha256:3cf0e55d4618890a00af1151f878b2e2a7a96433850b74e12bede7663d774532"},
-    {file = "types_bleach-6.1.0.0-py3-none-any.whl", hash = "sha256:f0bc75d0f6475036ac69afebf37c41d116dfba78dae55db80437caf0fcd35c28"},
+    {file = "types-bleach-6.1.0.1.tar.gz", hash = "sha256:1e43c437e734a90efe4f40ebfe831057599568d3b275939ffbd6094848a18a27"},
+    {file = "types_bleach-6.1.0.1-py3-none-any.whl", hash = "sha256:f83f80e0709f13d809a9c79b958a1089df9b99e68059287beb196e38967e4ddf"},
 ]
 
 [[package]]
@@ -3070,13 +3055,13 @@ files = [
 
 [[package]]
 name = "types-jsonschema"
-version = "4.19.0.3"
+version = "4.19.0.4"
 description = "Typing stubs for jsonschema"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-jsonschema-4.19.0.3.tar.gz", hash = "sha256:e0fc0f5d51fd0988bf193be42174a5376b0096820ff79505d9c1b66de23f0581"},
-    {file = "types_jsonschema-4.19.0.3-py3-none-any.whl", hash = "sha256:5cedbb661e5ca88d95b94b79902423e3f97a389c245e5fe0ab384122f27d56b9"},
+    {file = "types-jsonschema-4.19.0.4.tar.gz", hash = "sha256:994feb6632818259c4b5dbd733867824cb475029a6abc2c2b5201a2268b6e7d2"},
+    {file = "types_jsonschema-4.19.0.4-py3-none-any.whl", hash = "sha256:b73c3f4ba3cd8108602d1198a438e2698d5eb6b9db206ed89a33e24729b0abe7"},
 ]
 
 [package.dependencies]
@@ -3128,13 +3113,13 @@ files = [
 
 [[package]]
 name = "types-pyopenssl"
-version = "23.2.0.2"
+version = "23.3.0.0"
 description = "Typing stubs for pyOpenSSL"
 optional = false
-python-versions = "*"
+python-versions = ">=3.7"
 files = [
-    {file = "types-pyOpenSSL-23.2.0.2.tar.gz", hash = "sha256:6a010dac9ecd42b582d7dd2cc3e9e40486b79b3b64bb2fffba1474ff96af906d"},
-    {file = "types_pyOpenSSL-23.2.0.2-py3-none-any.whl", hash = "sha256:19536aa3debfbe25a918cf0d898e9f5fbbe6f3594a429da7914bf331deb1b342"},
+    {file = "types-pyOpenSSL-23.3.0.0.tar.gz", hash = "sha256:5ffb077fe70b699c88d5caab999ae80e192fe28bf6cda7989b7e79b1e4e2dcd3"},
+    {file = "types_pyOpenSSL-23.3.0.0-py3-none-any.whl", hash = "sha256:00171433653265843b7469ddb9f3c86d698668064cc33ef10537822156130ebf"},
 ]
 
 [package.dependencies]
@@ -3142,13 +3127,13 @@ cryptography = ">=35.0.0"
 
 [[package]]
 name = "types-pyyaml"
-version = "6.0.12.11"
+version = "6.0.12.12"
 description = "Typing stubs for PyYAML"
 optional = false
 python-versions = "*"
 files = [
-    {file = "types-PyYAML-6.0.12.11.tar.gz", hash = "sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b"},
-    {file = "types_PyYAML-6.0.12.11-py3-none-any.whl", hash = "sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d"},
+    {file = "types-PyYAML-6.0.12.12.tar.gz", hash = "sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062"},
+    {file = "types_PyYAML-6.0.12.12-py3-none-any.whl", hash = "sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24"},
 ]
 
 [[package]]
@@ -3448,4 +3433,4 @@ user-search = ["pyicu"]
 [metadata]
 lock-version = "2.0"
 python-versions = "^3.8.0"
-content-hash = "a08543c65f18cc7e9dea648e89c18ab88fc1747aa2e029aa208f777fc3db06dd"
+content-hash = "369455d6a67753a6bcfbad3cd86801b1dd02896d0180080e2ba9501e007353ec"
diff --git a/pyproject.toml b/pyproject.toml
index 7ff7178777..47c255e395 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -192,7 +192,7 @@ phonenumbers = ">=8.2.0"
 # we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
 prometheus-client = ">=0.4.0"
 # we use `order`, which arrived in attrs 19.2.0.
-# Note: 21.1.0 broke `/sync`, see #9936
+# Note: 21.1.0 broke `/sync`, see https://github.com/matrix-org/synapse/issues/9936
 attrs = ">=19.2.0,!=21.1.0"
 netaddr = ">=0.7.18"
 # Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
@@ -321,7 +321,7 @@ all = [
 # This helps prevents merge conflicts when running a batch of dependabot updates.
 isort = ">=5.10.1"
 black = ">=22.7.0"
-ruff = "0.0.292"
+ruff = "0.1.4"
 # Type checking only works with the pydantic.v1 compat module from pydantic v2
 pydantic = "^2"
 
@@ -357,7 +357,7 @@ commonmark = ">=0.9.1"
 pygithub = ">=1.55"
 # The following are executed as commands by the release script.
 twine = "*"
-# Towncrier min version comes from #3425. Rationale unclear.
+# Towncrier min version comes from https://github.com/matrix-org/synapse/pull/3425. Rationale unclear.
 towncrier = ">=18.6.0rc1"
 
 # Used for checking the Poetry lockfile
@@ -377,11 +377,12 @@ furo = ">=2022.12.7,<2024.0.0"
 
 [build-system]
 # The upper bounds here are defensive, intended to prevent situations like
-# #13849 and #14079 where we see buildtime or runtime errors caused by build
-# system changes.
+# https://github.com/matrix-org/synapse/issues/13849 and
+# https://github.com/matrix-org/synapse/issues/14079 where we see buildtime or
+# runtime errors caused by build system changes.
 # We are happy to raise these upper bounds upon request,
 # provided we check that it's safe to do so (i.e. that CI passes).
-requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.8.0"]
+requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.8.1"]
 build-backend = "poetry.core.masonry.api"
 
 
diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index ef8590db65..75fe0183f6 100755
--- a/synapse/_scripts/synapse_port_db.py
+++ b/synapse/_scripts/synapse_port_db.py
@@ -348,8 +348,7 @@ class Porter:
                     backward_chunk = 0
                     already_ported = 0
             else:
-                forward_chunk = row["forward_rowid"]
-                backward_chunk = row["backward_rowid"]
+                forward_chunk, backward_chunk = row
 
             if total_to_port is None:
                 already_ported, total_to_port = await self._get_total_count_to_port(
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index fdb2955be8..fbd8b16ec3 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -83,6 +83,8 @@ class Codes(str, Enum):
     USER_DEACTIVATED = "M_USER_DEACTIVATED"
     # USER_LOCKED = "M_USER_LOCKED"
     USER_LOCKED = "ORG_MATRIX_MSC3939_USER_LOCKED"
+    NOT_YET_UPLOADED = "M_NOT_YET_UPLOADED"
+    CANNOT_OVERWRITE_MEDIA = "M_CANNOT_OVERWRITE_MEDIA"
 
     # Part of MSC3848
     # https://github.com/matrix-org/matrix-spec-proposals/pull/3848
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index f7c80eee21..bcfb7a7200 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -104,8 +104,8 @@ logger = logging.getLogger("synapse.app.generic_worker")
 
 
 class GenericWorkerStore(
-    # FIXME(#3714): We need to add UserDirectoryStore as we write directly
-    # rather than going via the correct worker.
+    # FIXME(https://github.com/matrix-org/synapse/issues/3714): We need to add
+    # UserDirectoryStore as we write directly rather than going via the correct worker.
     UserDirectoryStore,
     StatsStore,
     UIAuthWorkerStore,
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 4efbaeac0d..b1fcaf71a3 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -204,3 +204,10 @@ class RatelimitConfig(Config):
             "rc_third_party_invite",
             defaults={"per_second": 0.0025, "burst_count": 5},
         )
+
+        # Ratelimit create media requests:
+        self.rc_media_create = RatelimitSettings.parse(
+            config,
+            "rc_media_create",
+            defaults={"per_second": 10, "burst_count": 50},
+        )
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index f6cfdd3e04..839c026d70 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -141,6 +141,12 @@ class ContentRepositoryConfig(Config):
             "prevent_media_downloads_from", []
         )
 
+        self.unused_expiration_time = self.parse_duration(
+            config.get("unused_expiration_time", "24h")
+        )
+
+        self.max_pending_media_uploads = config.get("max_pending_media_uploads", 5)
+
         self.media_store_path = self.ensure_directory(
             config.get("media_store_path", "media_store")
         )
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 8e3064c7e7..2bb2c64ebe 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -84,7 +84,7 @@ from synapse.replication.http.federation import (
 from synapse.storage.databases.main.lock import Lock
 from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
 from synapse.storage.roommember import MemberSummary
-from synapse.types import JsonDict, StateMap, get_domain_from_id, UserID
+from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id
 from synapse.util import unwrapFirstError
 from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
 from synapse.util.caches.response_cache import ResponseCache
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 7980d1a322..948fde6658 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -581,14 +581,14 @@ class FederationSender(AbstractFederationSender):
                                 "get_joined_hosts", str(sg)
                             )
                             if destinations is None:
-                                # Add logging to help track down #13444
+                                # Add logging to help track down https://github.com/matrix-org/synapse/issues/13444
                                 logger.info(
                                     "Unexpectedly did not have cached destinations for %s / %s",
                                     sg,
                                     event.event_id,
                                 )
                         else:
-                            # Add logging to help track down #13444
+                            # Add logging to help track down https://github.com/matrix-org/synapse/issues/13444
                             logger.info(
                                 "Unexpectedly did not have cached prev group for %s",
                                 event.event_id,
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 2c2baeac67..d06f8e3296 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -283,7 +283,7 @@ class AdminHandler:
                 start, limit, user_id
             )
             for media in media_ids:
-                writer.write_media_id(media["media_id"], media)
+                writer.write_media_id(media.media_id, attr.asdict(media))
 
             logger.info(
                 "[%s] Written %d media_ids of %s",
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 93472d0117..98e6e42563 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -383,7 +383,7 @@ class DeviceWorkerHandler:
         )
 
     DEVICE_MSGS_DELETE_BATCH_LIMIT = 1000
-    DEVICE_MSGS_DELETE_SLEEP_MS = 1000
+    DEVICE_MSGS_DELETE_SLEEP_MS = 100
 
     async def _delete_device_messages(
         self,
@@ -396,15 +396,17 @@ class DeviceWorkerHandler:
         up_to_stream_id = task.params["up_to_stream_id"]
 
         # Delete the messages in batches to avoid too much DB load.
+        from_stream_id = None
         while True:
-            res = await self.store.delete_messages_for_device(
+            from_stream_id, _ = await self.store.delete_messages_for_device_between(
                 user_id=user_id,
                 device_id=device_id,
-                up_to_stream_id=up_to_stream_id,
+                from_stream_id=from_stream_id,
+                to_stream_id=up_to_stream_id,
                 limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT,
             )
 
-            if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT:
+            if from_stream_id is None:
                 return TaskStatus.COMPLETE, None, None
 
             await self.clock.sleep(DeviceHandler.DEVICE_MSGS_DELETE_SLEEP_MS / 1000.0)
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index d06524495f..70fa931d17 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -1450,19 +1450,25 @@ class E2eKeysHandler:
 
         return desired_key_data
 
-    async def is_cross_signing_set_up_for_user(self, user_id: str) -> bool:
+    async def check_cross_signing_setup(self, user_id: str) -> Tuple[bool, bool]:
         """Checks if the user has cross-signing set up
 
         Args:
             user_id: The user to check
 
-        Returns:
-            True if the user has cross-signing set up, False otherwise
+        Returns: a 2-tuple of booleans
+            - whether the user has cross-signing set up, and
+            - whether the user's master cross-signing key may be replaced without UIA.
         """
-        existing_master_key = await self.store.get_e2e_cross_signing_key(
-            user_id, "master"
-        )
-        return existing_master_key is not None
+        (
+            exists,
+            ts_replacable_without_uia_before,
+        ) = await self.store.get_master_cross_signing_key_updatable_before(user_id)
+
+        if ts_replacable_without_uia_before is None:
+            return exists, False
+        else:
+            return exists, self.clock.time_msec() < ts_replacable_without_uia_before
 
 
 def _check_cross_signing_key(
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 0cc8e990d9..f4c17894aa 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -88,7 +88,7 @@ from synapse.types import (
 )
 from synapse.types.state import StateFilter
 from synapse.util.async_helpers import Linearizer, concurrently_execute
-from synapse.util.iterutils import batch_iter, partition
+from synapse.util.iterutils import batch_iter, partition, sorted_topologically_batched
 from synapse.util.retryutils import NotRetryingDestination
 from synapse.util.stringutils import shortstr
 
@@ -748,7 +748,7 @@ class FederationEventHandler:
         # fetching fresh state for the room if the missing event
         # can't be found, which slightly reduces our security.
         # it may also increase our DAG extremity count for the room,
-        # causing additional state resolution?  See #1760.
+        # causing additional state resolution?  See https://github.com/matrix-org/synapse/issues/1760.
         # However, fetching state doesn't hold the linearizer lock
         # apparently.
         #
@@ -1669,14 +1669,13 @@ class FederationEventHandler:
 
         # XXX: it might be possible to kick this process off in parallel with fetching
         # the events.
-        while event_map:
-            # build a list of events whose auth events are not in the queue.
-            roots = tuple(
-                ev
-                for ev in event_map.values()
-                if not any(aid in event_map for aid in ev.auth_event_ids())
-            )
 
+        # We need to persist an event's auth events before the event.
+        auth_graph = {
+            ev: [event_map[e_id] for e_id in ev.auth_event_ids() if e_id in event_map]
+            for ev in event_map.values()
+        }
+        for roots in sorted_topologically_batched(event_map.values(), auth_graph):
             if not roots:
                 # if *none* of the remaining events are ready, that means
                 # we have a loop. This either means a bug in our logic, or that
@@ -1698,9 +1697,6 @@ class FederationEventHandler:
 
             await self._auth_and_persist_outliers_inner(room_id, roots)
 
-            for ev in roots:
-                del event_map[ev.event_id]
-
     async def _auth_and_persist_outliers_inner(
         self, room_id: str, fetched_events: Collection[EventBase]
     ) -> None:
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 202beee738..4137fd50b1 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -1816,7 +1816,7 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
                 # the same token repeatedly.
                 #
                 # Hence this guard where we just return nothing so that the sync
-                # doesn't return. C.f. #5503.
+                # doesn't return. C.f. https://github.com/matrix-org/synapse/issues/5503.
                 return [], max_token
 
             # Figure out which other users this user should explicitly receive
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index c2109036ec..1027fbfd28 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 import logging
 import random
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING, Optional, Union
 
 from synapse.api.errors import (
     AuthError,
@@ -23,6 +23,7 @@ from synapse.api.errors import (
     StoreError,
     SynapseError,
 )
+from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia
 from synapse.types import JsonDict, Requester, UserID, create_requester
 from synapse.util.caches.descriptors import cached
 from synapse.util.stringutils import parse_and_validate_mxc_uri
@@ -306,7 +307,9 @@ class ProfileHandler:
             server_name = host
 
         if self._is_mine_server_name(server_name):
-            media_info = await self.store.get_local_media(media_id)
+            media_info: Optional[
+                Union[LocalMedia, RemoteMedia]
+            ] = await self.store.get_local_media(media_id)
         else:
             media_info = await self.store.get_cached_remote_media(server_name, media_id)
 
@@ -322,12 +325,12 @@ class ProfileHandler:
 
         if self.max_avatar_size:
             # Ensure avatar does not exceed max allowed avatar size
-            if media_info["media_length"] > self.max_avatar_size:
+            if media_info.media_length > self.max_avatar_size:
                 logger.warning(
                     "Forbidding avatar change to %s: %d bytes is above the allowed size "
                     "limit",
                     mxc,
-                    media_info["media_length"],
+                    media_info.media_length,
                 )
                 return False
 
@@ -335,12 +338,12 @@ class ProfileHandler:
             # Ensure the avatar's file type is allowed
             if (
                 self.allowed_avatar_mimetypes
-                and media_info["media_type"] not in self.allowed_avatar_mimetypes
+                and media_info.media_type not in self.allowed_avatar_mimetypes
             ):
                 logger.warning(
                     "Forbidding avatar change to %s: mimetype %s not allowed",
                     mxc,
-                    media_info["media_type"],
+                    media_info.media_type,
                 )
                 return False
 
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 6d680b0795..afd8138caf 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -269,7 +269,7 @@ class RoomCreationHandler:
         self,
         requester: Requester,
         old_room_id: str,
-        old_room: Dict[str, Any],
+        old_room: Tuple[bool, str, bool],
         new_room_id: str,
         new_version: RoomVersion,
         tombstone_event: EventBase,
@@ -279,7 +279,7 @@ class RoomCreationHandler:
         Args:
             requester: the user requesting the upgrade
             old_room_id: the id of the room to be replaced
-            old_room: a dict containing room information for the room to be replaced,
+            old_room: a tuple containing room information for the room to be replaced,
                 as returned by `RoomWorkerStore.get_room`.
             new_room_id: the id of the replacement room
             new_version: the version to upgrade the room to
@@ -299,7 +299,7 @@ class RoomCreationHandler:
         await self.store.store_room(
             room_id=new_room_id,
             room_creator_user_id=user_id,
-            is_public=old_room["is_public"],
+            is_public=old_room[0],
             room_version=new_version,
         )
 
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index 36e2db8975..2947e154be 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -33,6 +33,7 @@ from synapse.api.errors import (
     RequestSendFailed,
     SynapseError,
 )
+from synapse.storage.databases.main.room import LargestRoomStats
 from synapse.types import JsonDict, JsonMapping, ThirdPartyInstanceID
 from synapse.util.caches.descriptors import _CacheContext, cached
 from synapse.util.caches.response_cache import ResponseCache
@@ -170,26 +171,24 @@ class RoomListHandler:
             ignore_non_federatable=from_federation,
         )
 
-        def build_room_entry(room: JsonDict) -> JsonDict:
+        def build_room_entry(room: LargestRoomStats) -> JsonDict:
             entry = {
-                "room_id": room["room_id"],
-                "name": room["name"],
-                "topic": room["topic"],
-                "canonical_alias": room["canonical_alias"],
-                "num_joined_members": room["joined_members"],
-                "avatar_url": room["avatar"],
-                "world_readable": room["history_visibility"]
+                "room_id": room.room_id,
+                "name": room.name,
+                "topic": room.topic,
+                "canonical_alias": room.canonical_alias,
+                "num_joined_members": room.joined_members,
+                "avatar_url": room.avatar,
+                "world_readable": room.history_visibility
                 == HistoryVisibility.WORLD_READABLE,
-                "guest_can_join": room["guest_access"] == "can_join",
-                "join_rule": room["join_rules"],
-                "room_type": room["room_type"],
+                "guest_can_join": room.guest_access == "can_join",
+                "join_rule": room.join_rules,
+                "room_type": room.room_type,
             }
 
             # Filter out Nones – rather omit the field altogether
             return {k: v for k, v in entry.items() if v is not None}
 
-        results = [build_room_entry(r) for r in results]
-
         response: JsonDict = {}
         num_results = len(results)
         if limit is not None:
@@ -212,33 +211,33 @@ class RoomListHandler:
                     # If there was a token given then we assume that there
                     # must be previous results.
                     response["prev_batch"] = RoomListNextBatch(
-                        last_joined_members=initial_entry["num_joined_members"],
-                        last_room_id=initial_entry["room_id"],
+                        last_joined_members=initial_entry.joined_members,
+                        last_room_id=initial_entry.room_id,
                         direction_is_forward=False,
                     ).to_token()
 
                 if more_to_come:
                     response["next_batch"] = RoomListNextBatch(
-                        last_joined_members=final_entry["num_joined_members"],
-                        last_room_id=final_entry["room_id"],
+                        last_joined_members=final_entry.joined_members,
+                        last_room_id=final_entry.room_id,
                         direction_is_forward=True,
                     ).to_token()
             else:
                 if has_batch_token:
                     response["next_batch"] = RoomListNextBatch(
-                        last_joined_members=final_entry["num_joined_members"],
-                        last_room_id=final_entry["room_id"],
+                        last_joined_members=final_entry.joined_members,
+                        last_room_id=final_entry.room_id,
                         direction_is_forward=True,
                     ).to_token()
 
                 if more_to_come:
                     response["prev_batch"] = RoomListNextBatch(
-                        last_joined_members=initial_entry["num_joined_members"],
-                        last_room_id=initial_entry["room_id"],
+                        last_joined_members=initial_entry.joined_members,
+                        last_room_id=initial_entry.room_id,
                         direction_is_forward=False,
                     ).to_token()
 
-        response["chunk"] = results
+        response["chunk"] = [build_room_entry(r) for r in results]
 
         response["total_room_count_estimate"] = await self.store.count_public_rooms(
             network_tuple,
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 918eb203e2..eddc2af9ba 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -1260,7 +1260,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
         # Add new room to the room directory if the old room was there
         # Remove old room from the room directory
         old_room = await self.store.get_room(old_room_id)
-        if old_room is not None and old_room["is_public"]:
+        # If the old room exists and is public.
+        if old_room is not None and old_room[0]:
             await self.store.set_room_is_public(old_room_id, False)
             await self.store.set_room_is_public(room_id, True)
 
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index dd559b4c45..1dfb12e065 100644
--- a/synapse/handlers/room_summary.py
+++ b/synapse/handlers/room_summary.py
@@ -703,24 +703,24 @@ class RoomSummaryHandler:
         # there should always be an entry
         assert stats is not None, "unable to retrieve stats for %s" % (room_id,)
 
-        entry = {
-            "room_id": stats["room_id"],
-            "name": stats["name"],
-            "topic": stats["topic"],
-            "canonical_alias": stats["canonical_alias"],
-            "num_joined_members": stats["joined_members"],
-            "avatar_url": stats["avatar"],
-            "join_rule": stats["join_rules"],
+        entry: JsonDict = {
+            "room_id": stats.room_id,
+            "name": stats.name,
+            "topic": stats.topic,
+            "canonical_alias": stats.canonical_alias,
+            "num_joined_members": stats.joined_members,
+            "avatar_url": stats.avatar,
+            "join_rule": stats.join_rules,
             "world_readable": (
-                stats["history_visibility"] == HistoryVisibility.WORLD_READABLE
+                stats.history_visibility == HistoryVisibility.WORLD_READABLE
             ),
-            "guest_can_join": stats["guest_access"] == "can_join",
-            "room_type": stats["room_type"],
+            "guest_can_join": stats.guest_access == "can_join",
+            "room_type": stats.room_type,
         }
 
         if self._msc3266_enabled:
-            entry["im.nheko.summary.version"] = stats["version"]
-            entry["im.nheko.summary.encryption"] = stats["encryption"]
+            entry["im.nheko.summary.version"] = stats.version
+            entry["im.nheko.summary.encryption"] = stats.encryption
 
         # Federation requests need to provide additional information so the
         # requested server is able to filter the response appropriately.
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index 62f2454f5d..389dc5298a 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -806,7 +806,7 @@ class SsoHandler:
                 media_id = profile["avatar_url"].split("/")[-1]
                 if self._is_mine_server_name(server_name):
                     media = await self._media_repo.store.get_local_media(media_id)
-                    if media is not None and upload_name == media["upload_name"]:
+                    if media is not None and upload_name == media.upload_name:
                         logger.info("skipping saving the user avatar")
                         return True
 
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 2f1bc5a015..bf0106c6e7 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -399,7 +399,7 @@ class SyncHandler:
         #
         # If that happens, we mustn't cache it, so that when the client comes back
         # with the same cache token, we don't immediately return the same empty
-        # result, causing a tightloop. (#8518)
+        # result, causing a tightloop. (https://github.com/matrix-org/synapse/issues/8518)
         if result.next_batch == since_token:
             cache_context.should_cache = False
 
@@ -1003,7 +1003,7 @@ class SyncHandler:
                     # always make sure we LL ourselves so we know we're in the room
                     # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209
                     # We only need apply this on full state syncs given we disabled
-                    # LL for incr syncs in #3840.
+                    # LL for incr syncs in https://github.com/matrix-org/synapse/pull/3840.
                     # We don't insert ourselves into `members_to_fetch`, because in some
                     # rare cases (an empty event batch with a now_token after the user's
                     # leave in a partial state room which another local user has
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 75717ba4f9..3c19ea56f8 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -184,8 +184,8 @@ class UserDirectoryHandler(StateDeltasHandler):
         """Called to update index of our local user profiles when they change
         irrespective of any rooms the user may be in.
         """
-        # FIXME(#3714): We should probably do this in the same worker as all
-        # the other changes.
+        # FIXME(https://github.com/matrix-org/synapse/issues/3714): We should
+        # probably do this in the same worker as all the other changes.
 
         if await self.store.should_include_local_user_in_dir(user_id):
             await self.store.update_profile_in_user_dir(
@@ -194,8 +194,8 @@ class UserDirectoryHandler(StateDeltasHandler):
 
     async def handle_local_user_deactivated(self, user_id: str) -> None:
         """Called when a user ID is deactivated"""
-        # FIXME(#3714): We should probably do this in the same worker as all
-        # the other changes.
+        # FIXME(https://github.com/matrix-org/synapse/issues/3714): We should
+        # probably do this in the same worker as all the other changes.
         await self.store.remove_from_user_dir(user_id)
 
     async def _unsafe_process(self) -> None:
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 08c7fc1631..d5013e8e97 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -465,7 +465,7 @@ class MatrixFederationHttpClient:
         """Wrapper for _send_request which can optionally retry the request
         upon receiving a combination of a 400 HTTP response code and a
         'M_UNRECOGNIZED' errcode. This is a workaround for Synapse <= v0.99.3
-        due to #3622.
+        due to https://github.com/matrix-org/synapse/issues/3622.
 
         Args:
             request: details of request to be sent
@@ -958,9 +958,9 @@ class MatrixFederationHttpClient:
                 requests).
             try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED
                 response we should try appending a trailing slash to the end
-                of the request. Workaround for #3622 in Synapse <= v0.99.3. This
-                will be attempted before backing off if backing off has been
-                enabled.
+                of the request. Workaround for https://github.com/matrix-org/synapse/issues/3622
+                in Synapse <= v0.99.3. This will be attempted before backing off if
+                backing off has been enabled.
             parser: The parser to use to decode the response. Defaults to
                 parsing as JSON.
             backoff_on_all_error_codes: Back off if we get any error response
@@ -1155,7 +1155,8 @@ class MatrixFederationHttpClient:
 
             try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED
                 response we should try appending a trailing slash to the end of
-                the request. Workaround for #3622 in Synapse <= v0.99.3.
+                the request. Workaround for https://github.com/matrix-org/synapse/issues/3622
+                in Synapse <= v0.99.3.
 
             parser: The parser to use to decode the response. Defaults to
                 parsing as JSON.
@@ -1250,7 +1251,8 @@ class MatrixFederationHttpClient:
 
             try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED
                 response we should try appending a trailing slash to the end of
-                the request. Workaround for #3622 in Synapse <= v0.99.3.
+                the request. Workaround for https://github.com/matrix-org/synapse/issues/3622
+                in Synapse <= v0.99.3.
 
             parser: The parser to use to decode the response. Defaults to
                 parsing as JSON.
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index 4454fe29a5..e297fa9c8b 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -1019,11 +1019,14 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]:
     if not opentracing:
         return func
 
+    # getfullargspec is somewhat expensive, so ensure it is only called a single
+    # time (the function signature shouldn't change anyway).
+    argspec = inspect.getfullargspec(func)
+
     @contextlib.contextmanager
     def _wrapping_logic(
-        func: Callable[P, R], *args: P.args, **kwargs: P.kwargs
+        _func: Callable[P, R], *args: P.args, **kwargs: P.kwargs
     ) -> Generator[None, None, None]:
-        argspec = inspect.getfullargspec(func)
         # We use `[1:]` to skip the `self` object reference and `start=1` to
         # make the index line up with `argspec.args`.
         #
diff --git a/synapse/media/_base.py b/synapse/media/_base.py
index 860e5ddca2..9d88a711cf 100644
--- a/synapse/media/_base.py
+++ b/synapse/media/_base.py
@@ -83,6 +83,12 @@ INLINE_CONTENT_TYPES = [
     "audio/x-flac",
 ]
 
+# Default timeout_ms for download and thumbnail requests
+DEFAULT_MAX_TIMEOUT_MS = 20_000
+
+# Maximum allowed timeout_ms for download and thumbnail requests
+MAXIMUM_ALLOWED_MAX_TIMEOUT_MS = 60_000
+
 
 def respond_404(request: SynapseRequest) -> None:
     assert request.path is not None
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index 72b0f1c5de..bf976b9e7c 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -19,6 +19,7 @@ import shutil
 from io import BytesIO
 from typing import IO, TYPE_CHECKING, Dict, List, Optional, Set, Tuple
 
+import attr
 from matrix_common.types.mxc_uri import MXCUri
 
 import twisted.internet.error
@@ -26,13 +27,16 @@ import twisted.web.http
 from twisted.internet.defer import Deferred
 
 from synapse.api.errors import (
+    Codes,
     FederationDeniedError,
     HttpResponseException,
     NotFoundError,
     RequestSendFailed,
     SynapseError,
+    cs_error,
 )
 from synapse.config.repository import ThumbnailRequirement
+from synapse.http.server import respond_with_json
 from synapse.http.site import SynapseRequest
 from synapse.logging.context import defer_to_thread
 from synapse.logging.opentracing import trace
@@ -50,6 +54,7 @@ from synapse.media.storage_provider import StorageProviderWrapper
 from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
 from synapse.media.url_previewer import UrlPreviewer
 from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia
 from synapse.types import UserID
 from synapse.util.async_helpers import Linearizer
 from synapse.util.retryutils import NotRetryingDestination
@@ -78,6 +83,8 @@ class MediaRepository:
         self.store = hs.get_datastores().main
         self.max_upload_size = hs.config.media.max_upload_size
         self.max_image_pixels = hs.config.media.max_image_pixels
+        self.unused_expiration_time = hs.config.media.unused_expiration_time
+        self.max_pending_media_uploads = hs.config.media.max_pending_media_uploads
 
         Thumbnailer.set_limits(self.max_image_pixels)
 
@@ -184,6 +191,117 @@ class MediaRepository:
             self.recently_accessed_locals.add(media_id)
 
     @trace
+    async def create_media_id(self, auth_user: UserID) -> Tuple[str, int]:
+        """Create and store a media ID for a local user and return the MXC URI and its
+        expiration.
+
+        Args:
+            auth_user: The user_id of the uploader
+
+        Returns:
+            A tuple containing the MXC URI of the stored content and the timestamp at
+            which the MXC URI expires.
+        """
+        media_id = random_string(24)
+        now = self.clock.time_msec()
+        await self.store.store_local_media_id(
+            media_id=media_id,
+            time_now_ms=now,
+            user_id=auth_user,
+        )
+        return f"mxc://{self.server_name}/{media_id}", now + self.unused_expiration_time
+
+    @trace
+    async def reached_pending_media_limit(self, auth_user: UserID) -> Tuple[bool, int]:
+        """Check if the user is over the limit for pending media uploads.
+
+        Args:
+            auth_user: The user_id of the uploader
+
+        Returns:
+            A tuple with a boolean and an integer indicating whether the user has too
+            many pending media uploads and the timestamp at which the first pending
+            media will expire, respectively.
+        """
+        pending, first_expiration_ts = await self.store.count_pending_media(
+            user_id=auth_user
+        )
+        return pending >= self.max_pending_media_uploads, first_expiration_ts
+
+    @trace
+    async def verify_can_upload(self, media_id: str, auth_user: UserID) -> None:
+        """Verify that the media ID can be uploaded to by the given user. This
+        function checks that:
+
+        * the media ID exists
+        * the media ID does not already have content
+        * the user uploading is the same as the one who created the media ID
+        * the media ID has not expired
+
+        Args:
+            media_id: The media ID to verify
+            auth_user: The user_id of the uploader
+        """
+        media = await self.store.get_local_media(media_id)
+        if media is None:
+            raise SynapseError(404, "Unknow media ID", errcode=Codes.NOT_FOUND)
+
+        if media.user_id != auth_user.to_string():
+            raise SynapseError(
+                403,
+                "Only the creator of the media ID can upload to it",
+                errcode=Codes.FORBIDDEN,
+            )
+
+        if media.media_length is not None:
+            raise SynapseError(
+                409,
+                "Media ID already has content",
+                errcode=Codes.CANNOT_OVERWRITE_MEDIA,
+            )
+
+        expired_time_ms = self.clock.time_msec() - self.unused_expiration_time
+        if media.created_ts < expired_time_ms:
+            raise NotFoundError("Media ID has expired")
+
+    @trace
+    async def update_content(
+        self,
+        media_id: str,
+        media_type: str,
+        upload_name: Optional[str],
+        content: IO,
+        content_length: int,
+        auth_user: UserID,
+    ) -> None:
+        """Update the content of the given media ID.
+
+        Args:
+            media_id: The media ID to replace.
+            media_type: The content type of the file.
+            upload_name: The name of the file, if provided.
+            content: A file like object that is the content to store
+            content_length: The length of the content
+            auth_user: The user_id of the uploader
+        """
+        file_info = FileInfo(server_name=None, file_id=media_id)
+        fname = await self.media_storage.store_file(content, file_info)
+        logger.info("Stored local media in file %r", fname)
+
+        await self.store.update_local_media(
+            media_id=media_id,
+            media_type=media_type,
+            upload_name=upload_name,
+            media_length=content_length,
+            user_id=auth_user,
+        )
+
+        try:
+            await self._generate_thumbnails(None, media_id, media_id, media_type)
+        except Exception as e:
+            logger.info("Failed to generate thumbnails: %s", e)
+
+    @trace
     async def create_content(
         self,
         media_type: str,
@@ -229,8 +347,74 @@ class MediaRepository:
 
         return MXCUri(self.server_name, media_id)
 
+    def respond_not_yet_uploaded(self, request: SynapseRequest) -> None:
+        respond_with_json(
+            request,
+            504,
+            cs_error("Media has not been uploaded yet", code=Codes.NOT_YET_UPLOADED),
+            send_cors=True,
+        )
+
+    async def get_local_media_info(
+        self, request: SynapseRequest, media_id: str, max_timeout_ms: int
+    ) -> Optional[LocalMedia]:
+        """Gets the info dictionary for given local media ID. If the media has
+        not been uploaded yet, this function will wait up to ``max_timeout_ms``
+        milliseconds for the media to be uploaded.
+
+        Args:
+            request: The incoming request.
+            media_id: The media ID of the content. (This is the same as
+                the file_id for local content.)
+            max_timeout_ms: the maximum number of milliseconds to wait for the
+                media to be uploaded.
+
+        Returns:
+            Either the info dictionary for the given local media ID or
+            ``None``. If ``None``, then no further processing is necessary as
+            this function will send the necessary JSON response.
+        """
+        wait_until = self.clock.time_msec() + max_timeout_ms
+        while True:
+            # Get the info for the media
+            media_info = await self.store.get_local_media(media_id)
+            if not media_info:
+                logger.info("Media %s is unknown", media_id)
+                respond_404(request)
+                return None
+
+            if media_info.quarantined_by:
+                logger.info("Media %s is quarantined", media_id)
+                respond_404(request)
+                return None
+
+            # The file has been uploaded, so stop looping
+            if media_info.media_length is not None:
+                return media_info
+
+            # Check if the media ID has expired and still hasn't been uploaded to.
+            now = self.clock.time_msec()
+            expired_time_ms = now - self.unused_expiration_time
+            if media_info.created_ts < expired_time_ms:
+                logger.info("Media %s has expired without being uploaded", media_id)
+                respond_404(request)
+                return None
+
+            if now >= wait_until:
+                break
+
+            await self.clock.sleep(0.5)
+
+        logger.info("Media %s has not yet been uploaded", media_id)
+        self.respond_not_yet_uploaded(request)
+        return None
+
     async def get_local_media(
-        self, request: SynapseRequest, media_id: str, name: Optional[str]
+        self,
+        request: SynapseRequest,
+        media_id: str,
+        name: Optional[str],
+        max_timeout_ms: int,
     ) -> None:
         """Responds to requests for local media, if exists, or returns 404.
 
@@ -240,23 +424,24 @@ class MediaRepository:
                 the file_id for local content.)
             name: Optional name that, if specified, will be used as
                 the filename in the Content-Disposition header of the response.
+            max_timeout_ms: the maximum number of milliseconds to wait for the
+                media to be uploaded.
 
         Returns:
             Resolves once a response has successfully been written to request
         """
-        media_info = await self.store.get_local_media(media_id)
-        if not media_info or media_info["quarantined_by"]:
-            respond_404(request)
+        media_info = await self.get_local_media_info(request, media_id, max_timeout_ms)
+        if not media_info:
             return
 
         self.mark_recently_accessed(None, media_id)
 
-        media_type = media_info["media_type"]
+        media_type = media_info.media_type
         if not media_type:
             media_type = "application/octet-stream"
-        media_length = media_info["media_length"]
-        upload_name = name if name else media_info["upload_name"]
-        url_cache = media_info["url_cache"]
+        media_length = media_info.media_length
+        upload_name = name if name else media_info.upload_name
+        url_cache = media_info.url_cache
 
         file_info = FileInfo(None, media_id, url_cache=bool(url_cache))
 
@@ -271,6 +456,7 @@ class MediaRepository:
         server_name: str,
         media_id: str,
         name: Optional[str],
+        max_timeout_ms: int,
     ) -> None:
         """Respond to requests for remote media.
 
@@ -280,6 +466,8 @@ class MediaRepository:
             media_id: The media ID of the content (as defined by the remote server).
             name: Optional name that, if specified, will be used as
                 the filename in the Content-Disposition header of the response.
+            max_timeout_ms: the maximum number of milliseconds to wait for the
+                media to be uploaded.
 
         Returns:
             Resolves once a response has successfully been written to request
@@ -305,27 +493,33 @@ class MediaRepository:
         key = (server_name, media_id)
         async with self.remote_media_linearizer.queue(key):
             responder, media_info = await self._get_remote_media_impl(
-                server_name, media_id
+                server_name, media_id, max_timeout_ms
             )
 
         # We deliberately stream the file outside the lock
-        if responder:
-            media_type = media_info["media_type"]
-            media_length = media_info["media_length"]
-            upload_name = name if name else media_info["upload_name"]
+        if responder and media_info:
+            upload_name = name if name else media_info.upload_name
             await respond_with_responder(
-                request, responder, media_type, media_length, upload_name
+                request,
+                responder,
+                media_info.media_type,
+                media_info.media_length,
+                upload_name,
             )
         else:
             respond_404(request)
 
-    async def get_remote_media_info(self, server_name: str, media_id: str) -> dict:
+    async def get_remote_media_info(
+        self, server_name: str, media_id: str, max_timeout_ms: int
+    ) -> RemoteMedia:
         """Gets the media info associated with the remote file, downloading
         if necessary.
 
         Args:
             server_name: Remote server_name where the media originated.
             media_id: The media ID of the content (as defined by the remote server).
+            max_timeout_ms: the maximum number of milliseconds to wait for the
+                media to be uploaded.
 
         Returns:
             The media info of the file
@@ -341,7 +535,7 @@ class MediaRepository:
         key = (server_name, media_id)
         async with self.remote_media_linearizer.queue(key):
             responder, media_info = await self._get_remote_media_impl(
-                server_name, media_id
+                server_name, media_id, max_timeout_ms
             )
 
         # Ensure we actually use the responder so that it releases resources
@@ -352,8 +546,8 @@ class MediaRepository:
         return media_info
 
     async def _get_remote_media_impl(
-        self, server_name: str, media_id: str
-    ) -> Tuple[Optional[Responder], dict]:
+        self, server_name: str, media_id: str, max_timeout_ms: int
+    ) -> Tuple[Optional[Responder], RemoteMedia]:
         """Looks for media in local cache, if not there then attempt to
         download from remote server.
 
@@ -361,6 +555,8 @@ class MediaRepository:
             server_name: Remote server_name where the media originated.
             media_id: The media ID of the content (as defined by the
                 remote server).
+            max_timeout_ms: the maximum number of milliseconds to wait for the
+                media to be uploaded.
 
         Returns:
             A tuple of responder and the media info of the file.
@@ -373,15 +569,17 @@ class MediaRepository:
 
         # If we have an entry in the DB, try and look for it
         if media_info:
-            file_id = media_info["filesystem_id"]
+            file_id = media_info.filesystem_id
             file_info = FileInfo(server_name, file_id)
 
-            if media_info["quarantined_by"]:
+            if media_info.quarantined_by:
                 logger.info("Media is quarantined")
                 raise NotFoundError()
 
-            if not media_info["media_type"]:
-                media_info["media_type"] = "application/octet-stream"
+            if not media_info.media_type:
+                media_info = attr.evolve(
+                    media_info, media_type="application/octet-stream"
+                )
 
             responder = await self.media_storage.fetch_media(file_info)
             if responder:
@@ -391,8 +589,7 @@ class MediaRepository:
 
         try:
             media_info = await self._download_remote_file(
-                server_name,
-                media_id,
+                server_name, media_id, max_timeout_ms
             )
         except SynapseError:
             raise
@@ -403,9 +600,9 @@ class MediaRepository:
             if not media_info:
                 raise e
 
-        file_id = media_info["filesystem_id"]
-        if not media_info["media_type"]:
-            media_info["media_type"] = "application/octet-stream"
+        file_id = media_info.filesystem_id
+        if not media_info.media_type:
+            media_info = attr.evolve(media_info, media_type="application/octet-stream")
         file_info = FileInfo(server_name, file_id)
 
         # We generate thumbnails even if another process downloaded the media
@@ -415,7 +612,7 @@ class MediaRepository:
         # otherwise they'll request thumbnails and get a 404 if they're not
         # ready yet.
         await self._generate_thumbnails(
-            server_name, media_id, file_id, media_info["media_type"]
+            server_name, media_id, file_id, media_info.media_type
         )
 
         responder = await self.media_storage.fetch_media(file_info)
@@ -425,7 +622,8 @@ class MediaRepository:
         self,
         server_name: str,
         media_id: str,
-    ) -> dict:
+        max_timeout_ms: int,
+    ) -> RemoteMedia:
         """Attempt to download the remote file from the given server name,
         using the given file_id as the local id.
 
@@ -434,7 +632,8 @@ class MediaRepository:
             media_id: The media ID of the content (as defined by the
                 remote server). This is different than the file_id, which is
                 locally generated.
-            file_id: Local file ID
+            max_timeout_ms: the maximum number of milliseconds to wait for the
+                media to be uploaded.
 
         Returns:
             The media info of the file.
@@ -458,7 +657,8 @@ class MediaRepository:
                         # tell the remote server to 404 if it doesn't
                         # recognise the server_name, to make sure we don't
                         # end up with a routing loop.
-                        "allow_remote": "false"
+                        "allow_remote": "false",
+                        "timeout_ms": str(max_timeout_ms),
                     },
                 )
             except RequestSendFailed as e:
@@ -518,7 +718,7 @@ class MediaRepository:
                 origin=server_name,
                 media_id=media_id,
                 media_type=media_type,
-                time_now_ms=self.clock.time_msec(),
+                time_now_ms=time_now_ms,
                 upload_name=upload_name,
                 media_length=length,
                 filesystem_id=file_id,
@@ -526,15 +726,17 @@ class MediaRepository:
 
         logger.info("Stored remote media in file %r", fname)
 
-        media_info = {
-            "media_type": media_type,
-            "media_length": length,
-            "upload_name": upload_name,
-            "created_ts": time_now_ms,
-            "filesystem_id": file_id,
-        }
-
-        return media_info
+        return RemoteMedia(
+            media_origin=server_name,
+            media_id=media_id,
+            media_type=media_type,
+            media_length=length,
+            upload_name=upload_name,
+            created_ts=time_now_ms,
+            filesystem_id=file_id,
+            last_access_ts=time_now_ms,
+            quarantined_by=None,
+        )
 
     def _get_thumbnail_requirements(
         self, media_type: str
diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py
index 9b5a3dd5f4..44aac21de6 100644
--- a/synapse/media/url_previewer.py
+++ b/synapse/media/url_previewer.py
@@ -240,15 +240,14 @@ class UrlPreviewer:
         cache_result = await self.store.get_url_cache(url, ts)
         if (
             cache_result
-            and cache_result["expires_ts"] > ts
-            and cache_result["response_code"] / 100 == 2
+            and cache_result.expires_ts > ts
+            and cache_result.response_code // 100 == 2
         ):
             # It may be stored as text in the database, not as bytes (such as
             # PostgreSQL). If so, encode it back before handing it on.
-            og = cache_result["og"]
-            if isinstance(og, str):
-                og = og.encode("utf8")
-            return og
+            if isinstance(cache_result.og, str):
+                return cache_result.og.encode("utf8")
+            return cache_result.og
 
         # If this URL can be accessed via an allowed oEmbed, use that instead.
         url_to_download = url
diff --git a/synapse/metrics/_reactor_metrics.py b/synapse/metrics/_reactor_metrics.py
index a2c6e6842d..dd486dd3e2 100644
--- a/synapse/metrics/_reactor_metrics.py
+++ b/synapse/metrics/_reactor_metrics.py
@@ -12,17 +12,45 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import select
+import logging
 import time
-from typing import Any, Iterable, List, Tuple
+from selectors import SelectSelector, _PollLikeSelector  # type: ignore[attr-defined]
+from typing import Any, Callable, Iterable
 
 from prometheus_client import Histogram, Metric
 from prometheus_client.core import REGISTRY, GaugeMetricFamily
 
-from twisted.internet import reactor
+from twisted.internet import reactor, selectreactor
+from twisted.internet.asyncioreactor import AsyncioSelectorReactor
 
 from synapse.metrics._types import Collector
 
+try:
+    from selectors import KqueueSelector
+except ImportError:
+
+    class KqueueSelector:  # type: ignore[no-redef]
+        pass
+
+
+try:
+    from twisted.internet.epollreactor import EPollReactor
+except ImportError:
+
+    class EPollReactor:  # type: ignore[no-redef]
+        pass
+
+
+try:
+    from twisted.internet.pollreactor import PollReactor
+except ImportError:
+
+    class PollReactor:  # type: ignore[no-redef]
+        pass
+
+
+logger = logging.getLogger(__name__)
+
 #
 # Twisted reactor metrics
 #
@@ -34,52 +62,100 @@ tick_time = Histogram(
 )
 
 
-class EpollWrapper:
-    """a wrapper for an epoll object which records the time between polls"""
+class CallWrapper:
+    """A wrapper for a callable which records the time between calls"""
 
-    def __init__(self, poller: "select.epoll"):  # type: ignore[name-defined]
+    def __init__(self, wrapped: Callable[..., Any]):
         self.last_polled = time.time()
-        self._poller = poller
+        self._wrapped = wrapped
 
-    def poll(self, *args, **kwargs) -> List[Tuple[int, int]]:  # type: ignore[no-untyped-def]
-        # record the time since poll() was last called. This gives a good proxy for
+    def __call__(self, *args, **kwargs) -> Any:  # type: ignore[no-untyped-def]
+        # record the time since this was last called. This gives a good proxy for
         # how long it takes to run everything in the reactor - ie, how long anything
         # waiting for the next tick will have to wait.
         tick_time.observe(time.time() - self.last_polled)
 
-        ret = self._poller.poll(*args, **kwargs)
+        ret = self._wrapped(*args, **kwargs)
 
         self.last_polled = time.time()
         return ret
 
+
+class ObjWrapper:
+    """A wrapper for an object which wraps a specified method in CallWrapper.
+
+    Other methods/attributes are passed to the original object.
+
+    This is necessary when the wrapped object does not allow the attribute to be
+    overwritten.
+    """
+
+    def __init__(self, wrapped: Any, method_name: str):
+        self._wrapped = wrapped
+        self._method_name = method_name
+        self._wrapped_method = CallWrapper(getattr(wrapped, method_name))
+
     def __getattr__(self, item: str) -> Any:
-        return getattr(self._poller, item)
+        if item == self._method_name:
+            return self._wrapped_method
+
+        return getattr(self._wrapped, item)
 
 
 class ReactorLastSeenMetric(Collector):
-    def __init__(self, epoll_wrapper: EpollWrapper):
-        self._epoll_wrapper = epoll_wrapper
+    def __init__(self, call_wrapper: CallWrapper):
+        self._call_wrapper = call_wrapper
 
     def collect(self) -> Iterable[Metric]:
         cm = GaugeMetricFamily(
             "python_twisted_reactor_last_seen",
             "Seconds since the Twisted reactor was last seen",
         )
-        cm.add_metric([], time.time() - self._epoll_wrapper.last_polled)
+        cm.add_metric([], time.time() - self._call_wrapper.last_polled)
         yield cm
 
 
+# Twisted has already select a reasonable reactor for us, so assumptions can be
+# made about the shape.
+wrapper = None
 try:
-    # if the reactor has a `_poller` attribute, which is an `epoll` object
-    # (ie, it's an EPollReactor), we wrap the `epoll` with a thing that will
-    # measure the time between ticks
-    from select import epoll  # type: ignore[attr-defined]
-
-    poller = reactor._poller  # type: ignore[attr-defined]
-except (AttributeError, ImportError):
-    pass
-else:
-    if isinstance(poller, epoll):
-        poller = EpollWrapper(poller)
-        reactor._poller = poller  # type: ignore[attr-defined]
-        REGISTRY.register(ReactorLastSeenMetric(poller))
+    if isinstance(reactor, (PollReactor, EPollReactor)):
+        reactor._poller = ObjWrapper(reactor._poller, "poll")  # type: ignore[attr-defined]
+        wrapper = reactor._poller._wrapped_method  # type: ignore[attr-defined]
+
+    elif isinstance(reactor, selectreactor.SelectReactor):
+        # Twisted uses a module-level _select function.
+        wrapper = selectreactor._select = CallWrapper(selectreactor._select)
+
+    elif isinstance(reactor, AsyncioSelectorReactor):
+        # For asyncio look at the underlying asyncio event loop.
+        asyncio_loop = reactor._asyncioEventloop  # A sub-class of BaseEventLoop,
+
+        # A sub-class of BaseSelector.
+        selector = asyncio_loop._selector  # type: ignore[attr-defined]
+
+        if isinstance(selector, SelectSelector):
+            wrapper = selector._select = CallWrapper(selector._select)  # type: ignore[attr-defined]
+
+        # poll, epoll, and /dev/poll.
+        elif isinstance(selector, _PollLikeSelector):
+            selector._selector = ObjWrapper(selector._selector, "poll")  # type: ignore[attr-defined]
+            wrapper = selector._selector._wrapped_method  # type: ignore[attr-defined]
+
+        elif isinstance(selector, KqueueSelector):
+            selector._selector = ObjWrapper(selector._selector, "control")  # type: ignore[attr-defined]
+            wrapper = selector._selector._wrapped_method  # type: ignore[attr-defined]
+
+        else:
+            # E.g. this does not support the (Windows-only) ProactorEventLoop.
+            logger.warning(
+                "Skipping configuring ReactorLastSeenMetric: unexpected asyncio loop selector: %r via %r",
+                selector,
+                asyncio_loop,
+            )
+except Exception as e:
+    logger.warning("Configuring ReactorLastSeenMetric failed: %r", e)
+
+
+if wrapper:
+    REGISTRY.register(ReactorLastSeenMetric(wrapper))
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 755c59274c..812144a128 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -1860,7 +1860,8 @@ class PublicRoomListManager:
         if not room:
             return False
 
-        return room.get("is_public", False)
+        # The first item is whether the room is public.
+        return room[0]
 
     async def add_room_to_public_room_list(self, room_id: str) -> None:
         """Publishes a room to the public room list.
diff --git a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py
index ecaeef3511..7419785aff 100644
--- a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py
+++ b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py
@@ -295,7 +295,8 @@ class ThirdPartyEventRulesModuleApiCallbacks:
                 raise
             except SynapseError as e:
                 # FIXME: Being able to throw SynapseErrors is relied upon by
-                # some modules. PR #10386 accidentally broke this ability.
+                # some modules. PR https://github.com/matrix-org/synapse/pull/10386
+                # accidentally broke this ability.
                 # That said, we aren't keen on exposing this implementation detail
                 # to modules and we should one day have a proper way to do what
                 # is wanted.
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 14784312dc..5934b1ef34 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -25,10 +25,13 @@ from typing import (
     Sequence,
     Tuple,
     Union,
+    cast,
 )
 
 from prometheus_client import Counter
 
+from twisted.internet.defer import Deferred
+
 from synapse.api.constants import (
     MAIN_TIMELINE,
     EventContentFields,
@@ -40,11 +43,15 @@ from synapse.api.room_versions import PushRuleRoomFlag
 from synapse.event_auth import auth_types_for_event, get_user_power_level
 from synapse.events import EventBase, relation_from_event
 from synapse.events.snapshot import EventContext
+from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.state import POWER_KEY
 from synapse.storage.databases.main.roommember import EventIdMembership
+from synapse.storage.roommember import ProfileInfo
 from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator
 from synapse.types import JsonValue
 from synapse.types.state import StateFilter
+from synapse.util import unwrapFirstError
+from synapse.util.async_helpers import gather_results
 from synapse.util.caches import register_cache
 from synapse.util.metrics import measure_func
 from synapse.visibility import filter_event_for_clients_with_state
@@ -342,15 +349,41 @@ class BulkPushRuleEvaluator:
         rules_by_user = await self._get_rules_for_event(event)
         actions_by_user: Dict[str, Collection[Union[Mapping, str]]] = {}
 
-        room_member_count = await self.store.get_number_joined_users_in_room(
-            event.room_id
-        )
-
+        # Gather a bunch of info in parallel.
+        #
+        # This has a lot of ignored types and casting due to the use of @cached
+        # decorated functions passed into run_in_background.
+        #
+        # See https://github.com/matrix-org/synapse/issues/16606
         (
-            power_levels,
-            sender_power_level,
-        ) = await self._get_power_levels_and_sender_level(
-            event, context, event_id_to_event
+            room_member_count,
+            (power_levels, sender_power_level),
+            related_events,
+            profiles,
+        ) = await make_deferred_yieldable(
+            cast(
+                "Deferred[Tuple[int, Tuple[dict, Optional[int]], Dict[str, Dict[str, JsonValue]], Mapping[str, ProfileInfo]]]",
+                gather_results(
+                    (
+                        run_in_background(  # type: ignore[call-arg]
+                            self.store.get_number_joined_users_in_room, event.room_id  # type: ignore[arg-type]
+                        ),
+                        run_in_background(
+                            self._get_power_levels_and_sender_level,
+                            event,
+                            context,
+                            event_id_to_event,
+                        ),
+                        run_in_background(self._related_events, event),
+                        run_in_background(  # type: ignore[call-arg]
+                            self.store.get_subset_users_in_room_with_profiles,
+                            event.room_id,  # type: ignore[arg-type]
+                            rules_by_user.keys(),  # type: ignore[arg-type]
+                        ),
+                    ),
+                    consumeErrors=True,
+                ).addErrback(unwrapFirstError),
+            )
         )
 
         # Find the event's thread ID.
@@ -366,8 +399,6 @@ class BulkPushRuleEvaluator:
                 # the parent is part of a thread.
                 thread_id = await self.store.get_thread_id(relation.parent_id)
 
-        related_events = await self._related_events(event)
-
         # It's possible that old room versions have non-integer power levels (floats or
         # strings; even the occasional `null`). For old rooms, we interpret these as if
         # they were integers. Do this here for the `@room` power level threshold.
@@ -400,11 +431,6 @@ class BulkPushRuleEvaluator:
             self.hs.config.experimental.msc1767_enabled,  # MSC3931 flag
         )
 
-        users = rules_by_user.keys()
-        profiles = await self.store.get_subset_users_in_room_with_profiles(
-            event.room_id, users
-        )
-
         for uid, rules in rules_by_user.items():
             if event.sender == uid:
                 continue
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index afd03137f0..c14a18ba2e 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -257,6 +257,11 @@ class ReplicationCommandHandler:
         if hs.config.redis.redis_enabled:
             self._notifier.add_lock_released_callback(self.on_lock_released)
 
+        # Marks if we should send POSITION commands for all streams ASAP. This
+        # is checked by the `ReplicationStreamer` which manages sending
+        # RDATA/POSITION commands
+        self._should_announce_positions = True
+
     def subscribe_to_channel(self, channel_name: str) -> None:
         """
         Indicates that we wish to subscribe to a Redis channel by name.
@@ -397,29 +402,23 @@ class ReplicationCommandHandler:
         return self._streams_to_replicate
 
     def on_REPLICATE(self, conn: IReplicationConnection, cmd: ReplicateCommand) -> None:
-        self.send_positions_to_connection(conn)
+        self.send_positions_to_connection()
 
-    def send_positions_to_connection(self, conn: IReplicationConnection) -> None:
+    def send_positions_to_connection(self) -> None:
         """Send current position of all streams this process is source of to
         the connection.
         """
 
-        # We respond with current position of all streams this instance
-        # replicates.
-        for stream in self.get_streams_to_replicate():
-            # Note that we use the current token as the prev token here (rather
-            # than stream.last_token), as we can't be sure that there have been
-            # no rows written between last token and the current token (since we
-            # might be racing with the replication sending bg process).
-            current_token = stream.current_token(self._instance_name)
-            self.send_command(
-                PositionCommand(
-                    stream.NAME,
-                    self._instance_name,
-                    current_token,
-                    current_token,
-                )
-            )
+        self._should_announce_positions = True
+        self._notifier.notify_replication()
+
+    def should_announce_positions(self) -> bool:
+        """Check if we should send POSITION commands for all streams ASAP."""
+        return self._should_announce_positions
+
+    def will_announce_positions(self) -> None:
+        """Mark that we're about to send POSITIONs out for all streams."""
+        self._should_announce_positions = False
 
     def on_USER_SYNC(
         self, conn: IReplicationConnection, cmd: UserSyncCommand
@@ -588,6 +587,21 @@ class ReplicationCommandHandler:
 
         logger.debug("Handling '%s %s'", cmd.NAME, cmd.to_line())
 
+        # Check if we can early discard this position. We can only do so for
+        # connected streams.
+        stream = self._streams[cmd.stream_name]
+        if stream.can_discard_position(
+            cmd.instance_name, cmd.prev_token, cmd.new_token
+        ) and self.is_stream_connected(conn, cmd.stream_name):
+            logger.debug(
+                "Discarding redundant POSITION %s/%s %s %s",
+                cmd.instance_name,
+                cmd.stream_name,
+                cmd.prev_token,
+                cmd.new_token,
+            )
+            return
+
         self._add_command_to_stream_queue(conn, cmd)
 
     async def _process_position(
@@ -599,6 +613,18 @@ class ReplicationCommandHandler:
         """
         stream = self._streams[stream_name]
 
+        if stream.can_discard_position(
+            cmd.instance_name, cmd.prev_token, cmd.new_token
+        ) and self.is_stream_connected(conn, cmd.stream_name):
+            logger.debug(
+                "Discarding redundant POSITION %s/%s %s %s",
+                cmd.instance_name,
+                cmd.stream_name,
+                cmd.prev_token,
+                cmd.new_token,
+            )
+            return
+
         # We're about to go and catch up with the stream, so remove from set
         # of connected streams.
         for streams in self._streams_by_connection.values():
@@ -626,8 +652,9 @@ class ReplicationCommandHandler:
             # for why this can happen.
 
             logger.info(
-                "Fetching replication rows for '%s' between %i and %i",
+                "Fetching replication rows for '%s' / %s between %i and %i",
                 stream_name,
+                cmd.instance_name,
                 current_token,
                 cmd.new_token,
             )
@@ -657,6 +684,13 @@ class ReplicationCommandHandler:
 
         self._streams_by_connection.setdefault(conn, set()).add(stream_name)
 
+    def is_stream_connected(
+        self, conn: IReplicationConnection, stream_name: str
+    ) -> bool:
+        """Return if stream has been successfully connected and is ready to
+        receive updates"""
+        return stream_name in self._streams_by_connection.get(conn, ())
+
     def on_REMOTE_SERVER_UP(
         self, conn: IReplicationConnection, cmd: RemoteServerUpCommand
     ) -> None:
diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py
index 7e96145b3b..1fa37bb888 100644
--- a/synapse/replication/tcp/redis.py
+++ b/synapse/replication/tcp/redis.py
@@ -141,7 +141,7 @@ class RedisSubscriber(SubscriberProtocol):
         # We send out our positions when there is a new connection in case the
         # other side missed updates. We do this for Redis connections as the
         # otherside won't know we've connected and so won't issue a REPLICATE.
-        self.synapse_handler.send_positions_to_connection(self)
+        self.synapse_handler.send_positions_to_connection()
 
     def messageReceived(self, pattern: str, channel: str, message: str) -> None:
         """Received a message from redis."""
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index 38abb5df54..d15828f2d3 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -123,7 +123,7 @@ class ReplicationStreamer:
 
         # We check up front to see if anything has actually changed, as we get
         # poked because of changes that happened on other instances.
-        if all(
+        if not self.command_handler.should_announce_positions() and all(
             stream.last_token == stream.current_token(self._instance_name)
             for stream in self.streams
         ):
@@ -158,6 +158,21 @@ class ReplicationStreamer:
                         all_streams = list(all_streams)
                         random.shuffle(all_streams)
 
+                    if self.command_handler.should_announce_positions():
+                        # We need to send out POSITIONs for all streams, usually
+                        # because a worker has reconnected.
+                        self.command_handler.will_announce_positions()
+
+                        for stream in all_streams:
+                            self.command_handler.send_command(
+                                PositionCommand(
+                                    stream.NAME,
+                                    self._instance_name,
+                                    stream.last_token,
+                                    stream.last_token,
+                                )
+                            )
+
                     for stream in all_streams:
                         if stream.last_token == stream.current_token(
                             self._instance_name
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 58a44029aa..cc34dfb322 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -144,6 +144,16 @@ class Stream:
         """
         raise NotImplementedError()
 
+    def can_discard_position(
+        self, instance_name: str, prev_token: int, new_token: int
+    ) -> bool:
+        """Whether or not a position command for this stream can be discarded.
+
+        Useful for streams that can never go backwards and where we already know
+        the stream ID for the instance has advanced.
+        """
+        return False
+
     def discard_updates_and_advance(self) -> None:
         """Called when the stream should advance but the updates would be discarded,
         e.g. when there are no currently connected workers.
@@ -221,6 +231,14 @@ class _StreamFromIdGen(Stream):
     def minimal_local_current_token(self) -> Token:
         return self._stream_id_gen.get_minimal_local_current_token()
 
+    def can_discard_position(
+        self, instance_name: str, prev_token: int, new_token: int
+    ) -> bool:
+        # These streams can't go backwards, so we know we can ignore any
+        # positions where the tokens are from before the current token.
+
+        return new_token <= self.current_token(instance_name)
+
 
 def current_token_without_instance(
     current_token: Callable[[], int]
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 9bd0d764f8..91edfd45d7 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -88,6 +88,7 @@ from synapse.rest.admin.users import (
     UserByThreePid,
     UserMembershipRestServlet,
     UserRegisterServlet,
+    UserReplaceMasterCrossSigningKeyRestServlet,
     UserRestServletV2,
     UsersRestServletV2,
     UserTokenRestServlet,
@@ -292,6 +293,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
     ListDestinationsRestServlet(hs).register(http_server)
     RoomMessagesRestServlet(hs).register(http_server)
     RoomTimestampToEventRestServlet(hs).register(http_server)
+    UserReplaceMasterCrossSigningKeyRestServlet(hs).register(http_server)
     UserByExternalId(hs).register(http_server)
     UserByThreePid(hs).register(http_server)
 
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index b7637dff0b..8cf5268854 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -17,6 +17,8 @@ import logging
 from http import HTTPStatus
 from typing import TYPE_CHECKING, Optional, Tuple
 
+import attr
+
 from synapse.api.constants import Direction
 from synapse.api.errors import Codes, NotFoundError, SynapseError
 from synapse.http.server import HttpServer
@@ -418,7 +420,7 @@ class UserMediaRestServlet(RestServlet):
             start, limit, user_id, order_by, direction
         )
 
-        ret = {"media": media, "total": total}
+        ret = {"media": [attr.asdict(m) for m in media], "total": total}
         if (start + limit) < total:
             ret["next_token"] = start + len(media)
 
@@ -477,7 +479,7 @@ class UserMediaRestServlet(RestServlet):
         )
 
         deleted_media, total = await self.media_repository.delete_local_media_ids(
-            [row["media_id"] for row in media]
+            [m.media_id for m in media]
         )
 
         return HTTPStatus.OK, {"deleted_media": deleted_media, "total": total}
diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py
index ffce92d45e..f3e06d3da3 100644
--- a/synapse/rest/admin/registration_tokens.py
+++ b/synapse/rest/admin/registration_tokens.py
@@ -77,7 +77,18 @@ class ListRegistrationTokensRestServlet(RestServlet):
         await assert_requester_is_admin(self.auth, request)
         valid = parse_boolean(request, "valid")
         token_list = await self.store.get_registration_tokens(valid)
-        return HTTPStatus.OK, {"registration_tokens": token_list}
+        return HTTPStatus.OK, {
+            "registration_tokens": [
+                {
+                    "token": t[0],
+                    "uses_allowed": t[1],
+                    "pending": t[2],
+                    "completed": t[3],
+                    "expiry_time": t[4],
+                }
+                for t in token_list
+            ]
+        }
 
 
 class NewRegistrationTokenRestServlet(RestServlet):
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index 0659f22a89..7e40bea8aa 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -16,6 +16,8 @@ from http import HTTPStatus
 from typing import TYPE_CHECKING, List, Optional, Tuple, cast
 from urllib import parse as urlparse
 
+import attr
+
 from synapse.api.constants import Direction, EventTypes, JoinRules, Membership
 from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
 from synapse.api.filtering import Filter
@@ -306,10 +308,13 @@ class RoomRestServlet(RestServlet):
             raise NotFoundError("Room not found")
 
         members = await self.store.get_users_in_room(room_id)
-        ret["joined_local_devices"] = await self.store.count_devices_by_users(members)
-        ret["forgotten"] = await self.store.is_locally_forgotten_room(room_id)
+        result = attr.asdict(ret)
+        result["joined_local_devices"] = await self.store.count_devices_by_users(
+            members
+        )
+        result["forgotten"] = await self.store.is_locally_forgotten_room(room_id)
 
-        return HTTPStatus.OK, ret
+        return HTTPStatus.OK, result
 
     async def on_DELETE(
         self, request: SynapseRequest, room_id: str
@@ -408,8 +413,8 @@ class RoomMembersRestServlet(RestServlet):
     ) -> Tuple[int, JsonDict]:
         await assert_requester_is_admin(self.auth, request)
 
-        ret = await self.store.get_room(room_id)
-        if not ret:
+        room = await self.store.get_room(room_id)
+        if not room:
             raise NotFoundError("Room not found")
 
         members = await self.store.get_users_in_room(room_id)
@@ -437,8 +442,8 @@ class RoomStateRestServlet(RestServlet):
     ) -> Tuple[int, JsonDict]:
         await assert_requester_is_admin(self.auth, request)
 
-        ret = await self.store.get_room(room_id)
-        if not ret:
+        room = await self.store.get_room(room_id)
+        if not room:
             raise NotFoundError("Room not found")
 
         event_ids = await self._storage_controllers.state.get_current_state_ids(room_id)
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 7fe16130e7..9900498fbe 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -18,6 +18,8 @@ import secrets
 from http import HTTPStatus
 from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
 
+import attr
+
 from synapse.api.constants import Direction, UserTypes
 from synapse.api.errors import Codes, NotFoundError, SynapseError
 from synapse.http.servlet import (
@@ -161,11 +163,13 @@ class UsersRestServletV2(RestServlet):
         )
 
         # If support for MSC3866 is not enabled, don't show the approval flag.
+        filter = None
         if not self._msc3866_enabled:
-            for user in users:
-                del user["approved"]
 
-        ret = {"users": users, "total": total}
+            def _filter(a: attr.Attribute) -> bool:
+                return a.name != "approved"
+
+        ret = {"users": [attr.asdict(u, filter=filter) for u in users], "total": total}
         if (start + limit) < total:
             ret["next_token"] = str(start + len(users))
 
@@ -1266,6 +1270,46 @@ class AccountDataRestServlet(RestServlet):
         }
 
 
+class UserReplaceMasterCrossSigningKeyRestServlet(RestServlet):
+    """Allow a given user to replace their master cross-signing key without UIA.
+
+    This replacement is permitted for a limited period (currently 10 minutes).
+
+    While this is exposed via the admin API, this is intended for use by the
+    Matrix Authentication Service rather than server admins.
+    """
+
+    PATTERNS = admin_patterns(
+        "/users/(?P<user_id>[^/]*)/_allow_cross_signing_replacement_without_uia"
+    )
+    REPLACEMENT_PERIOD_MS = 10 * 60 * 1000  # 10 minutes
+
+    def __init__(self, hs: "HomeServer"):
+        self._auth = hs.get_auth()
+        self._store = hs.get_datastores().main
+
+    async def on_POST(
+        self,
+        request: SynapseRequest,
+        user_id: str,
+    ) -> Tuple[int, JsonDict]:
+        await assert_requester_is_admin(self._auth, request)
+
+        if user_id is None:
+            raise NotFoundError("User not found")
+
+        timestamp = (
+            await self._store.allow_master_cross_signing_key_replacement_without_uia(
+                user_id, self.REPLACEMENT_PERIOD_MS
+            )
+        )
+
+        if timestamp is None:
+            raise NotFoundError("User has no master cross-signing key")
+
+        return HTTPStatus.OK, {"updatable_without_uia_before_ms": timestamp}
+
+
 class UserByExternalId(RestServlet):
     """Find a user based on an external ID from an auth provider"""
 
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index 641390cb30..0c0e82627d 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -299,19 +299,16 @@ class DeactivateAccountRestServlet(RestServlet):
 
         requester = await self.auth.get_user_by_req(request)
 
-        # allow ASes to deactivate their own users
-        if requester.app_service:
-            await self._deactivate_account_handler.deactivate_account(
-                requester.user.to_string(), body.erase, requester
+        # allow ASes to deactivate their own users:
+        # ASes don't need user-interactive auth
+        if not requester.app_service:
+            await self.auth_handler.validate_user_via_ui_auth(
+                requester,
+                request,
+                body.dict(exclude_unset=True),
+                "deactivate your account",
             )
-            return 200, {}
 
-        await self.auth_handler.validate_user_via_ui_auth(
-            requester,
-            request,
-            body.dict(exclude_unset=True),
-            "deactivate your account",
-        )
         result = await self._deactivate_account_handler.deactivate_account(
             requester.user.to_string(), body.erase, requester, id_server=body.id_server
         )
diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py
index 82944ca711..3534c3c259 100644
--- a/synapse/rest/client/directory.py
+++ b/synapse/rest/client/directory.py
@@ -147,7 +147,7 @@ class ClientDirectoryListServer(RestServlet):
         if room is None:
             raise NotFoundError("Unknown room")
 
-        return 200, {"visibility": "public" if room["is_public"] else "private"}
+        return 200, {"visibility": "public" if room[0] else "private"}
 
     class PutBody(RequestBodyModel):
         visibility: Literal["public", "private"] = "public"
diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py
index 70b8be1aa2..add8045439 100644
--- a/synapse/rest/client/keys.py
+++ b/synapse/rest/client/keys.py
@@ -376,9 +376,10 @@ class SigningKeyUploadServlet(RestServlet):
         user_id = requester.user.to_string()
         body = parse_json_object_from_request(request)
 
-        is_cross_signing_setup = (
-            await self.e2e_keys_handler.is_cross_signing_set_up_for_user(user_id)
-        )
+        (
+            is_cross_signing_setup,
+            master_key_updatable_without_uia,
+        ) = await self.e2e_keys_handler.check_cross_signing_setup(user_id)
 
         # Before MSC3967 we required UIA both when setting up cross signing for the
         # first time and when resetting the device signing key. With MSC3967 we only
@@ -386,9 +387,14 @@ class SigningKeyUploadServlet(RestServlet):
         # time. Because there is no UIA in MSC3861, for now we throw an error if the
         # user tries to reset the device signing key when MSC3861 is enabled, but allow
         # first-time setup.
+        #
+        # XXX: We now have a get-out clause by which MAS can temporarily mark the master
+        # key as replaceable. It should do its own equivalent of user interactive auth
+        # before doing so.
         if self.hs.config.experimental.msc3861.enabled:
-            # There is no way to reset the device signing key with MSC3861
-            if is_cross_signing_setup:
+            # The auth service has to explicitly mark the master key as replaceable
+            # without UIA to reset the device signing key with MSC3861.
+            if is_cross_signing_setup and not master_key_updatable_without_uia:
                 raise SynapseError(
                     HTTPStatus.NOT_IMPLEMENTED,
                     "Resetting cross signing keys is not yet supported with MSC3861",
diff --git a/synapse/rest/media/create_resource.py b/synapse/rest/media/create_resource.py
new file mode 100644
index 0000000000..994afdf13c
--- /dev/null
+++ b/synapse/rest/media/create_resource.py
@@ -0,0 +1,83 @@
+# Copyright 2023 Beeper Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import re
+from typing import TYPE_CHECKING
+
+from synapse.api.errors import LimitExceededError
+from synapse.api.ratelimiting import Ratelimiter
+from synapse.http.server import respond_with_json
+from synapse.http.servlet import RestServlet
+from synapse.http.site import SynapseRequest
+
+if TYPE_CHECKING:
+    from synapse.media.media_repository import MediaRepository
+    from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class CreateResource(RestServlet):
+    PATTERNS = [re.compile("/_matrix/media/v1/create")]
+
+    def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"):
+        super().__init__()
+
+        self.media_repo = media_repo
+        self.clock = hs.get_clock()
+        self.auth = hs.get_auth()
+        self.max_pending_media_uploads = hs.config.media.max_pending_media_uploads
+
+        # A rate limiter for creating new media IDs.
+        self._create_media_rate_limiter = Ratelimiter(
+            store=hs.get_datastores().main,
+            clock=self.clock,
+            cfg=hs.config.ratelimiting.rc_media_create,
+        )
+
+    async def on_POST(self, request: SynapseRequest) -> None:
+        requester = await self.auth.get_user_by_req(request)
+
+        # If the create media requests for the user are over the limit, drop them.
+        await self._create_media_rate_limiter.ratelimit(requester)
+
+        (
+            reached_pending_limit,
+            first_expiration_ts,
+        ) = await self.media_repo.reached_pending_media_limit(requester.user)
+        if reached_pending_limit:
+            raise LimitExceededError(
+                limiter_name="max_pending_media_uploads",
+                retry_after_ms=first_expiration_ts - self.clock.time_msec(),
+            )
+
+        content_uri, unused_expires_at = await self.media_repo.create_media_id(
+            requester.user
+        )
+
+        logger.info(
+            "Created Media URI %r that if unused will expire at %d",
+            content_uri,
+            unused_expires_at,
+        )
+        respond_with_json(
+            request,
+            200,
+            {
+                "content_uri": content_uri,
+                "unused_expires_at": unused_expires_at,
+            },
+            send_cors=True,
+        )
diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py
index 65b9ff52fa..60cd87548c 100644
--- a/synapse/rest/media/download_resource.py
+++ b/synapse/rest/media/download_resource.py
@@ -17,9 +17,13 @@ import re
 from typing import TYPE_CHECKING, Optional
 
 from synapse.http.server import set_corp_headers, set_cors_headers
-from synapse.http.servlet import RestServlet, parse_boolean
+from synapse.http.servlet import RestServlet, parse_boolean, parse_integer
 from synapse.http.site import SynapseRequest
-from synapse.media._base import respond_404
+from synapse.media._base import (
+    DEFAULT_MAX_TIMEOUT_MS,
+    MAXIMUM_ALLOWED_MAX_TIMEOUT_MS,
+    respond_404,
+)
 from synapse.util.stringutils import parse_and_validate_server_name
 
 if TYPE_CHECKING:
@@ -65,12 +69,16 @@ class DownloadResource(RestServlet):
         )
         # Limited non-standard form of CSP for IE11
         request.setHeader(b"X-Content-Security-Policy", b"sandbox;")
-        request.setHeader(
-            b"Referrer-Policy",
-            b"no-referrer",
+        request.setHeader(b"Referrer-Policy", b"no-referrer")
+        max_timeout_ms = parse_integer(
+            request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
         )
+        max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
+
         if self._is_mine_server_name(server_name):
-            await self.media_repo.get_local_media(request, media_id, file_name)
+            await self.media_repo.get_local_media(
+                request, media_id, file_name, max_timeout_ms
+            )
         else:
             allow_remote = parse_boolean(request, "allow_remote", default=True)
             if not allow_remote:
@@ -83,5 +91,5 @@ class DownloadResource(RestServlet):
                 return
 
             await self.media_repo.get_remote_media(
-                request, server_name, media_id, file_name
+                request, server_name, media_id, file_name, max_timeout_ms
             )
diff --git a/synapse/rest/media/media_repository_resource.py b/synapse/rest/media/media_repository_resource.py
index 2089bb1029..ca65116b84 100644
--- a/synapse/rest/media/media_repository_resource.py
+++ b/synapse/rest/media/media_repository_resource.py
@@ -18,10 +18,11 @@ from synapse.config._base import ConfigError
 from synapse.http.server import HttpServer, JsonResource
 
 from .config_resource import MediaConfigResource
+from .create_resource import CreateResource
 from .download_resource import DownloadResource
 from .preview_url_resource import PreviewUrlResource
 from .thumbnail_resource import ThumbnailResource
-from .upload_resource import UploadResource
+from .upload_resource import AsyncUploadServlet, UploadServlet
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -91,8 +92,9 @@ class MediaRepositoryResource(JsonResource):
 
         # Note that many of these should not exist as v1 endpoints, but empirically
         # a lot of traffic still goes to them.
-
-        UploadResource(hs, media_repo).register(http_server)
+        CreateResource(hs, media_repo).register(http_server)
+        UploadServlet(hs, media_repo).register(http_server)
+        AsyncUploadServlet(hs, media_repo).register(http_server)
         DownloadResource(hs, media_repo).register(http_server)
         ThumbnailResource(hs, media_repo, media_repo.media_storage).register(
             http_server
diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py
index 85b6bdbe72..681f2a5a27 100644
--- a/synapse/rest/media/thumbnail_resource.py
+++ b/synapse/rest/media/thumbnail_resource.py
@@ -23,6 +23,8 @@ from synapse.http.server import respond_with_json, set_corp_headers, set_cors_he
 from synapse.http.servlet import RestServlet, parse_integer, parse_string
 from synapse.http.site import SynapseRequest
 from synapse.media._base import (
+    DEFAULT_MAX_TIMEOUT_MS,
+    MAXIMUM_ALLOWED_MAX_TIMEOUT_MS,
     FileInfo,
     ThumbnailInfo,
     respond_404,
@@ -75,15 +77,19 @@ class ThumbnailResource(RestServlet):
         method = parse_string(request, "method", "scale")
         # TODO Parse the Accept header to get an prioritised list of thumbnail types.
         m_type = "image/png"
+        max_timeout_ms = parse_integer(
+            request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
+        )
+        max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
 
         if self._is_mine_server_name(server_name):
             if self.dynamic_thumbnails:
                 await self._select_or_generate_local_thumbnail(
-                    request, media_id, width, height, method, m_type
+                    request, media_id, width, height, method, m_type, max_timeout_ms
                 )
             else:
                 await self._respond_local_thumbnail(
-                    request, media_id, width, height, method, m_type
+                    request, media_id, width, height, method, m_type, max_timeout_ms
                 )
             self.media_repo.mark_recently_accessed(None, media_id)
         else:
@@ -95,14 +101,21 @@ class ThumbnailResource(RestServlet):
                 respond_404(request)
                 return
 
-            if self.dynamic_thumbnails:
-                await self._select_or_generate_remote_thumbnail(
-                    request, server_name, media_id, width, height, method, m_type
-                )
-            else:
-                await self._respond_remote_thumbnail(
-                    request, server_name, media_id, width, height, method, m_type
-                )
+            remote_resp_function = (
+                self._select_or_generate_remote_thumbnail
+                if self.dynamic_thumbnails
+                else self._respond_remote_thumbnail
+            )
+            await remote_resp_function(
+                request,
+                server_name,
+                media_id,
+                width,
+                height,
+                method,
+                m_type,
+                max_timeout_ms,
+            )
             self.media_repo.mark_recently_accessed(server_name, media_id)
 
     async def _respond_local_thumbnail(
@@ -113,15 +126,12 @@ class ThumbnailResource(RestServlet):
         height: int,
         method: str,
         m_type: str,
+        max_timeout_ms: int,
     ) -> None:
-        media_info = await self.store.get_local_media(media_id)
-
+        media_info = await self.media_repo.get_local_media_info(
+            request, media_id, max_timeout_ms
+        )
         if not media_info:
-            respond_404(request)
-            return
-        if media_info["quarantined_by"]:
-            logger.info("Media is quarantined")
-            respond_404(request)
             return
 
         thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
@@ -134,7 +144,7 @@ class ThumbnailResource(RestServlet):
             thumbnail_infos,
             media_id,
             media_id,
-            url_cache=bool(media_info["url_cache"]),
+            url_cache=bool(media_info.url_cache),
             server_name=None,
         )
 
@@ -146,15 +156,13 @@ class ThumbnailResource(RestServlet):
         desired_height: int,
         desired_method: str,
         desired_type: str,
+        max_timeout_ms: int,
     ) -> None:
-        media_info = await self.store.get_local_media(media_id)
+        media_info = await self.media_repo.get_local_media_info(
+            request, media_id, max_timeout_ms
+        )
 
         if not media_info:
-            respond_404(request)
-            return
-        if media_info["quarantined_by"]:
-            logger.info("Media is quarantined")
-            respond_404(request)
             return
 
         thumbnail_infos = await self.store.get_local_media_thumbnails(media_id)
@@ -168,7 +176,7 @@ class ThumbnailResource(RestServlet):
                 file_info = FileInfo(
                     server_name=None,
                     file_id=media_id,
-                    url_cache=media_info["url_cache"],
+                    url_cache=bool(media_info.url_cache),
                     thumbnail=info,
                 )
 
@@ -188,7 +196,7 @@ class ThumbnailResource(RestServlet):
             desired_height,
             desired_method,
             desired_type,
-            url_cache=bool(media_info["url_cache"]),
+            url_cache=bool(media_info.url_cache),
         )
 
         if file_path:
@@ -206,14 +214,20 @@ class ThumbnailResource(RestServlet):
         desired_height: int,
         desired_method: str,
         desired_type: str,
+        max_timeout_ms: int,
     ) -> None:
-        media_info = await self.media_repo.get_remote_media_info(server_name, media_id)
+        media_info = await self.media_repo.get_remote_media_info(
+            server_name, media_id, max_timeout_ms
+        )
+        if not media_info:
+            respond_404(request)
+            return
 
         thumbnail_infos = await self.store.get_remote_media_thumbnails(
             server_name, media_id
         )
 
-        file_id = media_info["filesystem_id"]
+        file_id = media_info.filesystem_id
 
         for info in thumbnail_infos:
             t_w = info.width == desired_width
@@ -224,7 +238,7 @@ class ThumbnailResource(RestServlet):
             if t_w and t_h and t_method and t_type:
                 file_info = FileInfo(
                     server_name=server_name,
-                    file_id=media_info["filesystem_id"],
+                    file_id=file_id,
                     thumbnail=info,
                 )
 
@@ -263,11 +277,16 @@ class ThumbnailResource(RestServlet):
         height: int,
         method: str,
         m_type: str,
+        max_timeout_ms: int,
     ) -> None:
         # TODO: Don't download the whole remote file
         # We should proxy the thumbnail from the remote server instead of
         # downloading the remote file and generating our own thumbnails.
-        media_info = await self.media_repo.get_remote_media_info(server_name, media_id)
+        media_info = await self.media_repo.get_remote_media_info(
+            server_name, media_id, max_timeout_ms
+        )
+        if not media_info:
+            return
 
         thumbnail_infos = await self.store.get_remote_media_thumbnails(
             server_name, media_id
@@ -280,7 +299,7 @@ class ThumbnailResource(RestServlet):
             m_type,
             thumbnail_infos,
             media_id,
-            media_info["filesystem_id"],
+            media_info.filesystem_id,
             url_cache=False,
             server_name=server_name,
         )
diff --git a/synapse/rest/media/upload_resource.py b/synapse/rest/media/upload_resource.py
index 949326d85d..62d3e228a8 100644
--- a/synapse/rest/media/upload_resource.py
+++ b/synapse/rest/media/upload_resource.py
@@ -15,7 +15,7 @@
 
 import logging
 import re
-from typing import IO, TYPE_CHECKING, Dict, List, Optional
+from typing import IO, TYPE_CHECKING, Dict, List, Optional, Tuple
 
 from synapse.api.errors import Codes, SynapseError
 from synapse.http.server import respond_with_json
@@ -29,23 +29,24 @@ if TYPE_CHECKING:
 
 logger = logging.getLogger(__name__)
 
+# The name of the lock to use when uploading media.
+_UPLOAD_MEDIA_LOCK_NAME = "upload_media"
 
-class UploadResource(RestServlet):
-    PATTERNS = [re.compile("/_matrix/media/(r0|v3|v1)/upload")]
 
+class BaseUploadServlet(RestServlet):
     def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"):
         super().__init__()
 
         self.media_repo = media_repo
         self.filepaths = media_repo.filepaths
         self.store = hs.get_datastores().main
-        self.clock = hs.get_clock()
+        self.server_name = hs.hostname
         self.auth = hs.get_auth()
         self.max_upload_size = hs.config.media.max_upload_size
-        self.clock = hs.get_clock()
 
-    async def on_POST(self, request: SynapseRequest) -> None:
-        requester = await self.auth.get_user_by_req(request)
+    def _get_file_metadata(
+        self, request: SynapseRequest
+    ) -> Tuple[int, Optional[str], str]:
         raw_content_length = request.getHeader("Content-Length")
         if raw_content_length is None:
             raise SynapseError(msg="Request must specify a Content-Length", code=400)
@@ -88,6 +89,16 @@ class UploadResource(RestServlet):
         #     disposition = headers.getRawHeaders(b"Content-Disposition")[0]
         # TODO(markjh): parse content-dispostion
 
+        return content_length, upload_name, media_type
+
+
+class UploadServlet(BaseUploadServlet):
+    PATTERNS = [re.compile("/_matrix/media/(r0|v3|v1)/upload$")]
+
+    async def on_POST(self, request: SynapseRequest) -> None:
+        requester = await self.auth.get_user_by_req(request)
+        content_length, upload_name, media_type = self._get_file_metadata(request)
+
         try:
             content: IO = request.content  # type: ignore
             content_uri = await self.media_repo.create_content(
@@ -103,3 +114,53 @@ class UploadResource(RestServlet):
         respond_with_json(
             request, 200, {"content_uri": str(content_uri)}, send_cors=True
         )
+
+
+class AsyncUploadServlet(BaseUploadServlet):
+    PATTERNS = [
+        re.compile(
+            "/_matrix/media/v3/upload/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)$"
+        )
+    ]
+
+    async def on_PUT(
+        self, request: SynapseRequest, server_name: str, media_id: str
+    ) -> None:
+        requester = await self.auth.get_user_by_req(request)
+
+        if server_name != self.server_name:
+            raise SynapseError(
+                404,
+                "Non-local server name specified",
+                errcode=Codes.NOT_FOUND,
+            )
+
+        lock = await self.store.try_acquire_lock(_UPLOAD_MEDIA_LOCK_NAME, media_id)
+        if not lock:
+            raise SynapseError(
+                409,
+                "Media ID cannot be overwritten",
+                errcode=Codes.CANNOT_OVERWRITE_MEDIA,
+            )
+
+        async with lock:
+            await self.media_repo.verify_can_upload(media_id, requester.user)
+            content_length, upload_name, media_type = self._get_file_metadata(request)
+
+            try:
+                content: IO = request.content  # type: ignore
+                await self.media_repo.update_content(
+                    media_id,
+                    media_type,
+                    upload_name,
+                    content,
+                    content_length,
+                    requester.user,
+                )
+            except SpamMediaException:
+                # For uploading of media we want to respond with a 400, instead of
+                # the default 404, as that would just be confusing.
+                raise SynapseError(400, "Bad content")
+
+            logger.info("Uploaded content for media ID %r", media_id)
+            respond_with_json(request, 200, {}, send_cors=True)
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 12829d3d7d..62fbd05534 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -28,6 +28,7 @@ from typing import (
     Sequence,
     Tuple,
     Type,
+    cast,
 )
 
 import attr
@@ -48,7 +49,11 @@ else:
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
-    from synapse.storage.database import DatabasePool, LoggingTransaction
+    from synapse.storage.database import (
+        DatabasePool,
+        LoggingDatabaseConnection,
+        LoggingTransaction,
+    )
 
 logger = logging.getLogger(__name__)
 
@@ -488,14 +493,14 @@ class BackgroundUpdater:
             True if we have finished running all the background updates, otherwise False
         """
 
-        def get_background_updates_txn(txn: Cursor) -> List[Dict[str, Any]]:
+        def get_background_updates_txn(txn: Cursor) -> List[Tuple[str, Optional[str]]]:
             txn.execute(
                 """
                 SELECT update_name, depends_on FROM background_updates
                 ORDER BY ordering, update_name
                 """
             )
-            return self.db_pool.cursor_to_dict(txn)
+            return cast(List[Tuple[str, Optional[str]]], txn.fetchall())
 
         if not self._current_background_update:
             all_pending_updates = await self.db_pool.runInteraction(
@@ -507,14 +512,13 @@ class BackgroundUpdater:
                 return True
 
             # find the first update which isn't dependent on another one in the queue.
-            pending = {update["update_name"] for update in all_pending_updates}
-            for upd in all_pending_updates:
-                depends_on = upd["depends_on"]
+            pending = {update_name for update_name, depends_on in all_pending_updates}
+            for update_name, depends_on in all_pending_updates:
                 if not depends_on or depends_on not in pending:
                     break
                 logger.info(
                     "Not starting on bg update %s until %s is done",
-                    upd["update_name"],
+                    update_name,
                     depends_on,
                 )
             else:
@@ -524,7 +528,7 @@ class BackgroundUpdater:
                     "another: dependency cycle?"
                 )
 
-            self._current_background_update = upd["update_name"]
+            self._current_background_update = update_name
 
         # We have a background update to run, otherwise we would have returned
         # early.
@@ -746,10 +750,10 @@ class BackgroundUpdater:
                 The named index will be dropped upon completion of the new index.
         """
 
-        def create_index_psql(conn: Connection) -> None:
+        def create_index_psql(conn: "LoggingDatabaseConnection") -> None:
             conn.rollback()
             # postgres insists on autocommit for the index
-            conn.set_session(autocommit=True)  # type: ignore
+            conn.engine.attempt_to_set_autocommit(conn.conn, True)
 
             try:
                 c = conn.cursor()
@@ -793,9 +797,9 @@ class BackgroundUpdater:
                 undo_timeout_sql = f"SET statement_timeout = {default_timeout}"
                 conn.cursor().execute(undo_timeout_sql)
 
-                conn.set_session(autocommit=False)  # type: ignore
+                conn.engine.attempt_to_set_autocommit(conn.conn, False)
 
-        def create_index_sqlite(conn: Connection) -> None:
+        def create_index_sqlite(conn: "LoggingDatabaseConnection") -> None:
             # Sqlite doesn't support concurrent creation of indexes.
             #
             # We assume that sqlite doesn't give us invalid indices; however
@@ -825,7 +829,9 @@ class BackgroundUpdater:
                 c.execute(sql)
 
         if isinstance(self.db_pool.engine, engines.PostgresEngine):
-            runner: Optional[Callable[[Connection], None]] = create_index_psql
+            runner: Optional[
+                Callable[[LoggingDatabaseConnection], None]
+            ] = create_index_psql
         elif psql_only:
             runner = None
         else:
diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py
index f39ae2d635..1529c86cc5 100644
--- a/synapse/storage/controllers/persist_events.py
+++ b/synapse/storage/controllers/persist_events.py
@@ -542,13 +542,15 @@ class EventsPersistenceStorageController:
         return await res.get_state(self._state_controller, StateFilter.all())
 
     async def _persist_event_batch(
-        self, _room_id: str, task: _PersistEventsTask
+        self, room_id: str, task: _PersistEventsTask
     ) -> Dict[str, str]:
         """Callback for the _event_persist_queue
 
         Calculates the change to current state and forward extremities, and
         persists the given events and with those updates.
 
+        Assumes that we are only persisting events for one room at a time.
+
         Returns:
             A dictionary of event ID to event ID we didn't persist as we already
             had another event persisted with the same TXN ID.
@@ -594,140 +596,23 @@ class EventsPersistenceStorageController:
             # We can't easily parallelize these since different chunks
             # might contain the same event. :(
 
-            # NB: Assumes that we are only persisting events for one room
-            # at a time.
-
-            # map room_id->set[event_ids] giving the new forward
-            # extremities in each room
-            new_forward_extremities: Dict[str, Set[str]] = {}
-
-            # map room_id->(to_delete, to_insert) where to_delete is a list
-            # of type/state keys to remove from current state, and to_insert
-            # is a map (type,key)->event_id giving the state delta in each
-            # room
-            state_delta_for_room: Dict[str, DeltaState] = {}
+            new_forward_extremities = None
+            state_delta_for_room = None
 
             if not backfilled:
                 with Measure(self._clock, "_calculate_state_and_extrem"):
-                    # Work out the new "current state" for each room.
+                    # Work out the new "current state" for the room.
                     # We do this by working out what the new extremities are and then
                     # calculating the state from that.
-                    events_by_room: Dict[str, List[Tuple[EventBase, EventContext]]] = {}
-                    for event, context in chunk:
-                        events_by_room.setdefault(event.room_id, []).append(
-                            (event, context)
-                        )
-
-                    for room_id, ev_ctx_rm in events_by_room.items():
-                        latest_event_ids = (
-                            await self.main_store.get_latest_event_ids_in_room(room_id)
-                        )
-                        new_latest_event_ids = await self._calculate_new_extremities(
-                            room_id, ev_ctx_rm, latest_event_ids
-                        )
-
-                        if new_latest_event_ids == latest_event_ids:
-                            # No change in extremities, so no change in state
-                            continue
-
-                        # there should always be at least one forward extremity.
-                        # (except during the initial persistence of the send_join
-                        # results, in which case there will be no existing
-                        # extremities, so we'll `continue` above and skip this bit.)
-                        assert new_latest_event_ids, "No forward extremities left!"
-
-                        new_forward_extremities[room_id] = new_latest_event_ids
-
-                        len_1 = (
-                            len(latest_event_ids) == 1
-                            and len(new_latest_event_ids) == 1
-                        )
-                        if len_1:
-                            all_single_prev_not_state = all(
-                                len(event.prev_event_ids()) == 1
-                                and not event.is_state()
-                                for event, ctx in ev_ctx_rm
-                            )
-                            # Don't bother calculating state if they're just
-                            # a long chain of single ancestor non-state events.
-                            if all_single_prev_not_state:
-                                continue
-
-                        state_delta_counter.inc()
-                        if len(new_latest_event_ids) == 1:
-                            state_delta_single_event_counter.inc()
-
-                            # This is a fairly handwavey check to see if we could
-                            # have guessed what the delta would have been when
-                            # processing one of these events.
-                            # What we're interested in is if the latest extremities
-                            # were the same when we created the event as they are
-                            # now. When this server creates a new event (as opposed
-                            # to receiving it over federation) it will use the
-                            # forward extremities as the prev_events, so we can
-                            # guess this by looking at the prev_events and checking
-                            # if they match the current forward extremities.
-                            for ev, _ in ev_ctx_rm:
-                                prev_event_ids = set(ev.prev_event_ids())
-                                if latest_event_ids == prev_event_ids:
-                                    state_delta_reuse_delta_counter.inc()
-                                    break
-
-                        logger.debug("Calculating state delta for room %s", room_id)
-                        with Measure(
-                            self._clock, "persist_events.get_new_state_after_events"
-                        ):
-                            res = await self._get_new_state_after_events(
-                                room_id,
-                                ev_ctx_rm,
-                                latest_event_ids,
-                                new_latest_event_ids,
-                            )
-                            current_state, delta_ids, new_latest_event_ids = res
-
-                            # there should always be at least one forward extremity.
-                            # (except during the initial persistence of the send_join
-                            # results, in which case there will be no existing
-                            # extremities, so we'll `continue` above and skip this bit.)
-                            assert new_latest_event_ids, "No forward extremities left!"
-
-                            new_forward_extremities[room_id] = new_latest_event_ids
-
-                        # If either are not None then there has been a change,
-                        # and we need to work out the delta (or use that
-                        # given)
-                        delta = None
-                        if delta_ids is not None:
-                            # If there is a delta we know that we've
-                            # only added or replaced state, never
-                            # removed keys entirely.
-                            delta = DeltaState([], delta_ids)
-                        elif current_state is not None:
-                            with Measure(
-                                self._clock, "persist_events.calculate_state_delta"
-                            ):
-                                delta = await self._calculate_state_delta(
-                                    room_id, current_state
-                                )
-
-                        if delta:
-                            # If we have a change of state then lets check
-                            # whether we're actually still a member of the room,
-                            # or if our last user left. If we're no longer in
-                            # the room then we delete the current state and
-                            # extremities.
-                            is_still_joined = await self._is_server_still_joined(
-                                room_id,
-                                ev_ctx_rm,
-                                delta,
-                            )
-                            if not is_still_joined:
-                                logger.info("Server no longer in room %s", room_id)
-                                delta.no_longer_in_room = True
-
-                            state_delta_for_room[room_id] = delta
+                    (
+                        new_forward_extremities,
+                        state_delta_for_room,
+                    ) = await self._calculate_new_forward_extremities_and_state_delta(
+                        room_id, chunk
+                    )
 
             await self.persist_events_store._persist_events_and_state_updates(
+                room_id,
                 chunk,
                 state_delta_for_room=state_delta_for_room,
                 new_forward_extremities=new_forward_extremities,
@@ -737,6 +622,117 @@ class EventsPersistenceStorageController:
 
         return replaced_events
 
+    async def _calculate_new_forward_extremities_and_state_delta(
+        self, room_id: str, ev_ctx_rm: List[Tuple[EventBase, EventContext]]
+    ) -> Tuple[Optional[Set[str]], Optional[DeltaState]]:
+        """Calculates the new forward extremities and state delta for a room
+        given events to persist.
+
+        Assumes that we are only persisting events for one room at a time.
+
+        Returns:
+            A tuple of:
+                A set of str giving the new forward extremities the room
+
+                The state delta for the room.
+        """
+
+        latest_event_ids = await self.main_store.get_latest_event_ids_in_room(room_id)
+        new_latest_event_ids = await self._calculate_new_extremities(
+            room_id, ev_ctx_rm, latest_event_ids
+        )
+
+        if new_latest_event_ids == latest_event_ids:
+            # No change in extremities, so no change in state
+            return (None, None)
+
+        # there should always be at least one forward extremity.
+        # (except during the initial persistence of the send_join
+        # results, in which case there will be no existing
+        # extremities, so we'll `continue` above and skip this bit.)
+        assert new_latest_event_ids, "No forward extremities left!"
+
+        new_forward_extremities = new_latest_event_ids
+
+        len_1 = len(latest_event_ids) == 1 and len(new_latest_event_ids) == 1
+        if len_1:
+            all_single_prev_not_state = all(
+                len(event.prev_event_ids()) == 1 and not event.is_state()
+                for event, ctx in ev_ctx_rm
+            )
+            # Don't bother calculating state if they're just
+            # a long chain of single ancestor non-state events.
+            if all_single_prev_not_state:
+                return (new_forward_extremities, None)
+
+        state_delta_counter.inc()
+        if len(new_latest_event_ids) == 1:
+            state_delta_single_event_counter.inc()
+
+            # This is a fairly handwavey check to see if we could
+            # have guessed what the delta would have been when
+            # processing one of these events.
+            # What we're interested in is if the latest extremities
+            # were the same when we created the event as they are
+            # now. When this server creates a new event (as opposed
+            # to receiving it over federation) it will use the
+            # forward extremities as the prev_events, so we can
+            # guess this by looking at the prev_events and checking
+            # if they match the current forward extremities.
+            for ev, _ in ev_ctx_rm:
+                prev_event_ids = set(ev.prev_event_ids())
+                if latest_event_ids == prev_event_ids:
+                    state_delta_reuse_delta_counter.inc()
+                    break
+
+        logger.debug("Calculating state delta for room %s", room_id)
+        with Measure(self._clock, "persist_events.get_new_state_after_events"):
+            res = await self._get_new_state_after_events(
+                room_id,
+                ev_ctx_rm,
+                latest_event_ids,
+                new_latest_event_ids,
+            )
+            current_state, delta_ids, new_latest_event_ids = res
+
+            # there should always be at least one forward extremity.
+            # (except during the initial persistence of the send_join
+            # results, in which case there will be no existing
+            # extremities, so we'll `continue` above and skip this bit.)
+            assert new_latest_event_ids, "No forward extremities left!"
+
+            new_forward_extremities = new_latest_event_ids
+
+        # If either are not None then there has been a change,
+        # and we need to work out the delta (or use that
+        # given)
+        delta = None
+        if delta_ids is not None:
+            # If there is a delta we know that we've
+            # only added or replaced state, never
+            # removed keys entirely.
+            delta = DeltaState([], delta_ids)
+        elif current_state is not None:
+            with Measure(self._clock, "persist_events.calculate_state_delta"):
+                delta = await self._calculate_state_delta(room_id, current_state)
+
+        if delta:
+            # If we have a change of state then lets check
+            # whether we're actually still a member of the room,
+            # or if our last user left. If we're no longer in
+            # the room then we delete the current state and
+            # extremities.
+            is_still_joined = await self._is_server_still_joined(
+                room_id,
+                ev_ctx_rm,
+                delta,
+            )
+            if not is_still_joined:
+                logger.info("Server no longer in room %s", room_id)
+                delta.no_longer_in_room = True
+
+        return (new_forward_extremities, delta)
+
     async def _calculate_new_extremities(
         self,
         room_id: str,
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index a4e7048368..eb34de4df5 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -18,7 +18,6 @@ import logging
 import time
 import types
 from collections import defaultdict
-from sys import intern
 from time import monotonic as monotonic_time
 from typing import (
     TYPE_CHECKING,
@@ -1042,20 +1041,6 @@ class DatabasePool:
             self._db_pool.runWithConnection(inner_func, *args, **kwargs)
         )
 
-    @staticmethod
-    def cursor_to_dict(cursor: Cursor) -> List[Dict[str, Any]]:
-        """Converts a SQL cursor into an list of dicts.
-
-        Args:
-            cursor: The DBAPI cursor which has executed a query.
-        Returns:
-            A list of dicts where the key is the column header.
-        """
-        assert cursor.description is not None, "cursor.description was None"
-        col_headers = [intern(str(column[0])) for column in cursor.description]
-        results = [dict(zip(col_headers, row)) for row in cursor]
-        return results
-
     async def execute(self, desc: str, query: str, *args: Any) -> List[Tuple[Any, ...]]:
         """Runs a single query for a result set.
 
@@ -1131,8 +1116,8 @@ class DatabasePool:
     def simple_insert_many_txn(
         txn: LoggingTransaction,
         table: str,
-        keys: Collection[str],
-        values: Iterable[Iterable[Any]],
+        keys: Sequence[str],
+        values: Collection[Iterable[Any]],
     ) -> None:
         """Executes an INSERT query on the named table.
 
@@ -1145,6 +1130,9 @@ class DatabasePool:
             keys: list of column names
             values: for each row, a list of values in the same order as `keys`
         """
+        # If there's nothing to insert, then skip executing the query.
+        if not values:
+            return
 
         if isinstance(txn.database_engine, PostgresEngine):
             # We use `execute_values` as it can be a lot faster than `execute_batch`,
@@ -1416,12 +1404,12 @@ class DatabasePool:
             allvalues.update(values)
             latter = "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values)
 
-        sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) %s DO %s" % (
+        sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) %sDO %s" % (
             table,
             ", ".join(k for k in allvalues),
             ", ".join("?" for _ in allvalues),
             ", ".join(k for k in keyvalues),
-            f"WHERE {where_clause}" if where_clause else "",
+            f"WHERE {where_clause} " if where_clause else "",
             latter,
         )
         txn.execute(sql, list(allvalues.values()))
@@ -1470,7 +1458,7 @@ class DatabasePool:
         key_names: Collection[str],
         key_values: Collection[Iterable[Any]],
         value_names: Collection[str],
-        value_values: Iterable[Iterable[Any]],
+        value_values: Collection[Iterable[Any]],
     ) -> None:
         """
         Upsert, many times.
@@ -1483,6 +1471,19 @@ class DatabasePool:
             value_values: A list of each row's value column values.
                 Ignored if value_names is empty.
         """
+        # If there's nothing to upsert, then skip executing the query.
+        if not key_values:
+            return
+
+        # No value columns, therefore make a blank list so that the following
+        # zip() works correctly.
+        if not value_names:
+            value_values = [() for x in range(len(key_values))]
+        elif len(value_values) != len(key_values):
+            raise ValueError(
+                f"{len(key_values)} key rows and {len(value_values)} value rows: should be the same number."
+            )
+
         if table not in self._unsafe_to_upsert_tables:
             return self.simple_upsert_many_txn_native_upsert(
                 txn, table, key_names, key_values, value_names, value_values
@@ -1517,10 +1518,6 @@ class DatabasePool:
             value_values: A list of each row's value column values.
                 Ignored if value_names is empty.
         """
-        # No value columns, therefore make a blank list so that the following
-        # zip() works correctly.
-        if not value_names:
-            value_values = [() for x in range(len(key_values))]
 
         # Lock the table just once, to prevent it being done once per row.
         # Note that, according to Postgres' documentation, once obtained,
@@ -1558,10 +1555,7 @@ class DatabasePool:
         allnames.extend(value_names)
 
         if not value_names:
-            # No value columns, therefore make a blank list so that the
-            # following zip() works correctly.
             latter = "NOTHING"
-            value_values = [() for x in range(len(key_values))]
         else:
             latter = "UPDATE SET " + ", ".join(
                 k + "=EXCLUDED." + k for k in value_names
@@ -1603,7 +1597,7 @@ class DatabasePool:
         retcols: Collection[str],
         allow_none: Literal[False] = False,
         desc: str = "simple_select_one",
-    ) -> Dict[str, Any]:
+    ) -> Tuple[Any, ...]:
         ...
 
     @overload
@@ -1614,7 +1608,7 @@ class DatabasePool:
         retcols: Collection[str],
         allow_none: Literal[True] = True,
         desc: str = "simple_select_one",
-    ) -> Optional[Dict[str, Any]]:
+    ) -> Optional[Tuple[Any, ...]]:
         ...
 
     async def simple_select_one(
@@ -1624,7 +1618,7 @@ class DatabasePool:
         retcols: Collection[str],
         allow_none: bool = False,
         desc: str = "simple_select_one",
-    ) -> Optional[Dict[str, Any]]:
+    ) -> Optional[Tuple[Any, ...]]:
         """Executes a SELECT query on the named table, which is expected to
         return a single row, returning multiple columns from it.
 
@@ -1925,6 +1919,7 @@ class DatabasePool:
         Returns:
             The results as a list of tuples.
         """
+        # If there's nothing to select, then skip executing the query.
         if not iterable:
             return []
 
@@ -2059,13 +2054,13 @@ class DatabasePool:
             raise ValueError(
                 f"{len(key_values)} key rows and {len(value_values)} value rows: should be the same number."
             )
+        # If there is nothing to update, then skip executing the query.
+        if not key_values:
+            return
 
         # List of tuples of (value values, then key values)
         # (This matches the order needed for the query)
-        args = [tuple(x) + tuple(y) for x, y in zip(value_values, key_values)]
-
-        for ks, vs in zip(key_values, value_values):
-            args.append(tuple(vs) + tuple(ks))
+        args = [tuple(vv) + tuple(kv) for vv, kv in zip(value_values, key_values)]
 
         # 'col1 = ?, col2 = ?, ...'
         set_clause = ", ".join(f"{n} = ?" for n in value_names)
@@ -2077,9 +2072,7 @@ class DatabasePool:
             where_clause = ""
 
         # UPDATE mytable SET col1 = ?, col2 = ? WHERE col3 = ? AND col4 = ?
-        sql = f"""
-            UPDATE {table} SET {set_clause} {where_clause}
-        """
+        sql = f"UPDATE {table} SET {set_clause} {where_clause}"
 
         txn.execute_batch(sql, args)
 
@@ -2134,7 +2127,7 @@ class DatabasePool:
         keyvalues: Dict[str, Any],
         retcols: Collection[str],
         allow_none: bool = False,
-    ) -> Optional[Dict[str, Any]]:
+    ) -> Optional[Tuple[Any, ...]]:
         select_sql = "SELECT %s FROM %s" % (", ".join(retcols), table)
 
         if keyvalues:
@@ -2152,7 +2145,7 @@ class DatabasePool:
         if txn.rowcount > 1:
             raise StoreError(500, "More than one row matched (%s)" % (table,))
 
-        return dict(zip(retcols, row))
+        return row
 
     async def simple_delete_one(
         self, table: str, keyvalues: Dict[str, Any], desc: str = "simple_delete_one"
@@ -2295,11 +2288,10 @@ class DatabasePool:
         Returns:
             Number rows deleted
         """
+        # If there's nothing to delete, then skip executing the query.
         if not values:
             return 0
 
-        sql = "DELETE FROM %s" % table
-
         clause, values = make_in_list_sql_clause(txn.database_engine, column, values)
         clauses = [clause]
 
@@ -2307,8 +2299,7 @@ class DatabasePool:
             clauses.append("%s = ?" % (key,))
             values.append(value)
 
-        if clauses:
-            sql = "%s WHERE %s" % (sql, " AND ".join(clauses))
+        sql = "DELETE FROM %s WHERE %s" % (table, " AND ".join(clauses))
         txn.execute(sql, values)
 
         return txn.rowcount
diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py
index 7aa24ccf21..b57e260fe0 100644
--- a/synapse/storage/databases/__init__.py
+++ b/synapse/storage/databases/__init__.py
@@ -45,7 +45,7 @@ class Databases(Generic[DataStoreT]):
     """
 
     databases: List[DatabasePool]
-    main: "DataStore"  # FIXME: #11165: actually an instance of `main_store_class`
+    main: "DataStore"  # FIXME: https://github.com/matrix-org/synapse/issues/11165: actually an instance of `main_store_class`
     state: StateGroupDataStore
     persist_events: Optional[PersistEventsStore]
 
diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py
index 840d725114..89f4077351 100644
--- a/synapse/storage/databases/main/__init__.py
+++ b/synapse/storage/databases/main/__init__.py
@@ -17,6 +17,8 @@
 import logging
 from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast
 
+import attr
+
 from synapse.api.constants import Direction
 from synapse.config.homeserver import HomeServerConfig
 from synapse.storage._base import make_in_list_sql_clause
@@ -28,7 +30,7 @@ from synapse.storage.database import (
 from synapse.storage.databases.main.stats import UserSortOrder
 from synapse.storage.engines import BaseDatabaseEngine
 from synapse.storage.types import Cursor
-from synapse.types import JsonDict, get_domain_from_id
+from synapse.types import get_domain_from_id
 
 from .account_data import AccountDataStore
 from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore
@@ -82,6 +84,25 @@ if TYPE_CHECKING:
 logger = logging.getLogger(__name__)
 
 
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class UserPaginateResponse:
+    """This is very similar to UserInfo, but not quite the same."""
+
+    name: str
+    user_type: Optional[str]
+    is_guest: bool
+    admin: bool
+    deactivated: bool
+    shadow_banned: bool
+    displayname: Optional[str]
+    avatar_url: Optional[str]
+    creation_ts: Optional[int]
+    approved: bool
+    erased: bool
+    last_seen_ts: int
+    locked: bool
+
+
 class DataStore(
     EventsBackgroundUpdatesStore,
     ExperimentalFeaturesStore,
@@ -156,7 +177,7 @@ class DataStore(
         approved: bool = True,
         not_user_types: Optional[List[str]] = None,
         locked: bool = False,
-    ) -> Tuple[List[JsonDict], int]:
+    ) -> Tuple[List[UserPaginateResponse], int]:
         """Function to retrieve a paginated list of users from
         users list. This will return a json list of users and the
         total number of users matching the filter criteria.
@@ -182,7 +203,7 @@ class DataStore(
 
         def get_users_paginate_txn(
             txn: LoggingTransaction,
-        ) -> Tuple[List[JsonDict], int]:
+        ) -> Tuple[List[UserPaginateResponse], int]:
             filters = []
             args: list = []
 
@@ -282,13 +303,24 @@ class DataStore(
             """
             args += [limit, start]
             txn.execute(sql, args)
-            users = self.db_pool.cursor_to_dict(txn)
-
-            # some of those boolean values are returned as integers when we're on SQLite
-            columns_to_boolify = ["erased"]
-            for user in users:
-                for column in columns_to_boolify:
-                    user[column] = bool(user[column])
+            users = [
+                UserPaginateResponse(
+                    name=row[0],
+                    user_type=row[1],
+                    is_guest=bool(row[2]),
+                    admin=bool(row[3]),
+                    deactivated=bool(row[4]),
+                    shadow_banned=bool(row[5]),
+                    displayname=row[6],
+                    avatar_url=row[7],
+                    creation_ts=row[8],
+                    approved=bool(row[9]),
+                    erased=bool(row[10]),
+                    last_seen_ts=row[11],
+                    locked=bool(row[12]),
+                )
+                for row in txn
+            ]
 
             return users, count
 
diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py
index d7482a1f4e..07f9b65af3 100644
--- a/synapse/storage/databases/main/account_data.py
+++ b/synapse/storage/databases/main/account_data.py
@@ -747,8 +747,16 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
         )
 
         # Invalidate the cache for any ignored users which were added or removed.
-        for ignored_user_id in previously_ignored_users ^ currently_ignored_users:
-            self._invalidate_cache_and_stream(txn, self.ignored_by, (ignored_user_id,))
+        self._invalidate_cache_and_stream_bulk(
+            txn,
+            self.ignored_by,
+            [
+                (ignored_user_id,)
+                for ignored_user_id in (
+                    previously_ignored_users ^ currently_ignored_users
+                )
+            ],
+        )
         self._invalidate_cache_and_stream(txn, self.ignored_users, (user_id,))
 
     async def remove_account_data_for_user(
@@ -824,10 +832,14 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
                 )
 
                 # Invalidate the cache for ignored users which were removed.
-                for ignored_user_id in previously_ignored_users:
-                    self._invalidate_cache_and_stream(
-                        txn, self.ignored_by, (ignored_user_id,)
-                    )
+                self._invalidate_cache_and_stream_bulk(
+                    txn,
+                    self.ignored_by,
+                    [
+                        (ignored_user_id,)
+                        for ignored_user_id in previously_ignored_users
+                    ],
+                )
 
                 # Invalidate for this user the cache tracking ignored users.
                 self._invalidate_cache_and_stream(txn, self.ignored_users, (user_id,))
diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
index 4d0470ffd9..d7232f566b 100644
--- a/synapse/storage/databases/main/cache.py
+++ b/synapse/storage/databases/main/cache.py
@@ -483,6 +483,30 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
         txn.call_after(cache_func.invalidate, keys)
         self._send_invalidation_to_replication(txn, cache_func.__name__, keys)
 
+    def _invalidate_cache_and_stream_bulk(
+        self,
+        txn: LoggingTransaction,
+        cache_func: CachedFunction,
+        key_tuples: Collection[Tuple[Any, ...]],
+    ) -> None:
+        """A bulk version of _invalidate_cache_and_stream.
+
+        Locally invalidate every key-tuple in `key_tuples`, then emit invalidations
+        for each key-tuple over replication.
+
+        This implementation is more efficient than a loop which repeatedly calls the
+        non-bulk version.
+        """
+        if not key_tuples:
+            return
+
+        for keys in key_tuples:
+            txn.call_after(cache_func.invalidate, keys)
+
+        self._send_invalidation_to_replication_bulk(
+            txn, cache_func.__name__, key_tuples
+        )
+
     def _invalidate_all_cache_and_stream(
         self, txn: LoggingTransaction, cache_func: CachedFunction
     ) -> None:
@@ -564,10 +588,6 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
         if isinstance(self.database_engine, PostgresEngine):
             assert self._cache_id_gen is not None
 
-            # get_next() returns a context manager which is designed to wrap
-            # the transaction. However, we want to only get an ID when we want
-            # to use it, here, so we need to call __enter__ manually, and have
-            # __exit__ called after the transaction finishes.
             stream_id = self._cache_id_gen.get_next_txn(txn)
             txn.call_after(self.hs.get_notifier().on_new_replication_data)
 
@@ -586,6 +606,53 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
                 },
             )
 
+    def _send_invalidation_to_replication_bulk(
+        self,
+        txn: LoggingTransaction,
+        cache_name: str,
+        key_tuples: Collection[Tuple[Any, ...]],
+    ) -> None:
+        """Announce the invalidation of multiple (but not all) cache entries.
+
+        This is more efficient than repeated calls to the non-bulk version. It should
+        NOT be used to invalidating the entire cache: use
+        `_send_invalidation_to_replication` with keys=None.
+
+        Note that this does *not* invalidate the cache locally.
+
+        Args:
+            txn
+            cache_name
+            key_tuples: Key-tuples to invalidate. Assumed to be non-empty.
+        """
+        if isinstance(self.database_engine, PostgresEngine):
+            assert self._cache_id_gen is not None
+
+            stream_ids = self._cache_id_gen.get_next_mult_txn(txn, len(key_tuples))
+            ts = self._clock.time_msec()
+            txn.call_after(self.hs.get_notifier().on_new_replication_data)
+            self.db_pool.simple_insert_many_txn(
+                txn,
+                table="cache_invalidation_stream_by_instance",
+                keys=(
+                    "stream_id",
+                    "instance_name",
+                    "cache_func",
+                    "keys",
+                    "invalidation_ts",
+                ),
+                values=[
+                    # We convert key_tuples to a list here because psycopg2 serialises
+                    # lists as pq arrrays, but serialises tuples as "composite types".
+                    # (We need an array because the `keys` column has type `[]text`.)
+                    # See:
+                    #     https://www.psycopg.org/docs/usage.html#adapt-list
+                    #     https://www.psycopg.org/docs/usage.html#adapt-tuple
+                    (stream_id, self._instance_name, cache_name, list(key_tuple), ts)
+                    for stream_id, key_tuple in zip(stream_ids, key_tuples)
+                ],
+            )
+
     def get_cache_stream_token_for_writer(self, instance_name: str) -> int:
         if self._cache_id_gen:
             return self._cache_id_gen.get_current_token_for_writer(instance_name)
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 3e7425d4a6..02dddd1da4 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -450,14 +450,12 @@ class DeviceInboxWorkerStore(SQLBaseStore):
         user_id: str,
         device_id: Optional[str],
         up_to_stream_id: int,
-        limit: Optional[int] = None,
     ) -> int:
         """
         Args:
             user_id: The recipient user_id.
             device_id: The recipient device_id.
             up_to_stream_id: Where to delete messages up to.
-            limit: maximum number of messages to delete
 
         Returns:
             The number of messages deleted.
@@ -478,32 +476,22 @@ class DeviceInboxWorkerStore(SQLBaseStore):
                 log_kv({"message": "No changes in cache since last check"})
                 return 0
 
-        def delete_messages_for_device_txn(txn: LoggingTransaction) -> int:
-            limit_statement = "" if limit is None else f"LIMIT {limit}"
-            sql = f"""
-                DELETE FROM device_inbox WHERE user_id = ? AND device_id = ? AND stream_id <= (
-                  SELECT MAX(stream_id) FROM (
-                    SELECT stream_id FROM device_inbox
-                    WHERE user_id = ? AND device_id = ? AND stream_id <= ?
-                    ORDER BY stream_id
-                    {limit_statement}
-                  ) AS q1
-                )
-                """
-            txn.execute(sql, (user_id, device_id, user_id, device_id, up_to_stream_id))
-            return txn.rowcount
-
-        count = await self.db_pool.runInteraction(
-            "delete_messages_for_device", delete_messages_for_device_txn
-        )
+        from_stream_id = None
+        count = 0
+        while True:
+            from_stream_id, loop_count = await self.delete_messages_for_device_between(
+                user_id,
+                device_id,
+                from_stream_id=from_stream_id,
+                to_stream_id=up_to_stream_id,
+                limit=1000,
+            )
+            count += loop_count
+            if from_stream_id is None:
+                break
 
         log_kv({"message": f"deleted {count} messages for device", "count": count})
 
-        # In this case we don't know if we hit the limit or the delete is complete
-        # so let's not update the cache.
-        if count == limit:
-            return count
-
         # Update the cache, ensuring that we only ever increase the value
         updated_last_deleted_stream_id = self._last_device_delete_cache.get(
             (user_id, device_id), 0
@@ -515,6 +503,74 @@ class DeviceInboxWorkerStore(SQLBaseStore):
         return count
 
     @trace
+    async def delete_messages_for_device_between(
+        self,
+        user_id: str,
+        device_id: Optional[str],
+        from_stream_id: Optional[int],
+        to_stream_id: int,
+        limit: int,
+    ) -> Tuple[Optional[int], int]:
+        """Delete N device messages between the stream IDs, returning the
+        highest stream ID deleted (or None if all messages in the range have
+        been deleted) and the number of messages deleted.
+
+        This is more efficient than `delete_messages_for_device` when calling in
+        a loop to batch delete messages.
+        """
+
+        # Keeping track of a lower bound of stream ID where we've deleted
+        # everything below makes the queries much faster. Otherwise, every time
+        # we scan for rows to delete we'd re-scan across all the rows that have
+        # previously deleted (until the next table VACUUM).
+
+        if from_stream_id is None:
+            # Minimum device stream ID is 1.
+            from_stream_id = 0
+
+        def delete_messages_for_device_between_txn(
+            txn: LoggingTransaction,
+        ) -> Tuple[Optional[int], int]:
+            txn.execute(
+                """
+                SELECT MAX(stream_id) FROM (
+                    SELECT stream_id FROM device_inbox
+                    WHERE user_id = ? AND device_id = ?
+                        AND ? < stream_id AND stream_id <= ?
+                    ORDER BY stream_id
+                    LIMIT ?
+                ) AS d
+                """,
+                (user_id, device_id, from_stream_id, to_stream_id, limit),
+            )
+            row = txn.fetchone()
+            if row is None or row[0] is None:
+                return None, 0
+
+            (max_stream_id,) = row
+
+            txn.execute(
+                """
+                DELETE FROM device_inbox
+                WHERE user_id = ? AND device_id = ?
+                AND ? < stream_id AND stream_id <= ?
+                """,
+                (user_id, device_id, from_stream_id, max_stream_id),
+            )
+
+            num_deleted = txn.rowcount
+            if num_deleted < limit:
+                return None, num_deleted
+
+            return max_stream_id, num_deleted
+
+        return await self.db_pool.runInteraction(
+            "delete_messages_for_device_between",
+            delete_messages_for_device_between_txn,
+            db_autocommit=True,  # We don't need to run in a transaction
+        )
+
+    @trace
     async def get_new_device_msgs_for_remote(
         self, destination: str, last_stream_id: int, current_stream_id: int, limit: int
     ) -> Tuple[List[JsonDict], int]:
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 49edbb9e06..775abbac79 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -255,33 +255,16 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
             A dict containing the device information, or `None` if the device does not
             exist.
         """
-        return await self.db_pool.simple_select_one(
-            table="devices",
-            keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
-            retcols=("user_id", "device_id", "display_name"),
-            desc="get_device",
-            allow_none=True,
-        )
-
-    async def get_device_opt(
-        self, user_id: str, device_id: str
-    ) -> Optional[Dict[str, Any]]:
-        """Retrieve a device. Only returns devices that are not marked as
-        hidden.
-
-        Args:
-            user_id: The ID of the user which owns the device
-            device_id: The ID of the device to retrieve
-        Returns:
-            A dict containing the device information, or None if the device does not exist.
-        """
-        return await self.db_pool.simple_select_one(
+        row = await self.db_pool.simple_select_one(
             table="devices",
             keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
             retcols=("user_id", "device_id", "display_name"),
             desc="get_device",
             allow_none=True,
         )
+        if row is None:
+            return None
+        return {"user_id": row[0], "device_id": row[1], "display_name": row[2]}
 
     async def get_devices_by_user(
         self, user_id: str
@@ -703,7 +686,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
             key_names=("destination", "user_id"),
             key_values=[(destination, user_id) for user_id, _ in rows],
             value_names=("stream_id",),
-            value_values=((stream_id,) for _, stream_id in rows),
+            value_values=[(stream_id,) for _, stream_id in rows],
         )
 
         # Delete all sent outbound pokes
@@ -1221,9 +1204,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
             retcols=["device_id", "device_data"],
             allow_none=True,
         )
-        return (
-            (row["device_id"], json_decoder.decode(row["device_data"])) if row else None
-        )
+        return (row[0], json_decoder.decode(row[1])) if row else None
 
     def _store_dehydrated_device_txn(
         self,
@@ -1620,7 +1601,6 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
         #
         # For each duplicate, we delete all the existing rows and put one back.
 
-        KEY_COLS = ["stream_id", "destination", "user_id", "device_id"]
         last_row = progress.get(
             "last_row",
             {"stream_id": 0, "destination": "", "user_id": "", "device_id": ""},
@@ -1628,44 +1608,62 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
 
         def _txn(txn: LoggingTransaction) -> int:
             clause, args = make_tuple_comparison_clause(
-                [(x, last_row[x]) for x in KEY_COLS]
+                [
+                    ("stream_id", last_row["stream_id"]),
+                    ("destination", last_row["destination"]),
+                    ("user_id", last_row["user_id"]),
+                    ("device_id", last_row["device_id"]),
+                ]
             )
-            sql = """
+            sql = f"""
                 SELECT stream_id, destination, user_id, device_id, MAX(ts) AS ts
                 FROM device_lists_outbound_pokes
-                WHERE %s
-                GROUP BY %s
+                WHERE {clause}
+                GROUP BY stream_id, destination, user_id, device_id
                 HAVING count(*) > 1
-                ORDER BY %s
+                ORDER BY stream_id, destination, user_id, device_id
                 LIMIT ?
-                """ % (
-                clause,  # WHERE
-                ",".join(KEY_COLS),  # GROUP BY
-                ",".join(KEY_COLS),  # ORDER BY
-            )
+                """
             txn.execute(sql, args + [batch_size])
-            rows = self.db_pool.cursor_to_dict(txn)
+            rows = txn.fetchall()
 
-            row = None
-            for row in rows:
+            stream_id, destination, user_id, device_id = None, None, None, None
+            for stream_id, destination, user_id, device_id, _ in rows:
                 self.db_pool.simple_delete_txn(
                     txn,
                     "device_lists_outbound_pokes",
-                    {x: row[x] for x in KEY_COLS},
+                    {
+                        "stream_id": stream_id,
+                        "destination": destination,
+                        "user_id": user_id,
+                        "device_id": device_id,
+                    },
                 )
 
-                row["sent"] = False
                 self.db_pool.simple_insert_txn(
                     txn,
                     "device_lists_outbound_pokes",
-                    row,
+                    {
+                        "stream_id": stream_id,
+                        "destination": destination,
+                        "user_id": user_id,
+                        "device_id": device_id,
+                        "sent": False,
+                    },
                 )
 
-            if row:
+            if rows:
                 self.db_pool.updates._background_update_progress_txn(
                     txn,
                     BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES,
-                    {"last_row": row},
+                    {
+                        "last_row": {
+                            "stream_id": stream_id,
+                            "destination": destination,
+                            "user_id": user_id,
+                            "device_id": device_id,
+                        }
+                    },
                 )
 
             return len(rows)
@@ -2309,13 +2307,15 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         `FALSE` have not been converted.
         """
 
-        row = await self.db_pool.simple_select_one(
-            table="device_lists_changes_converted_stream_position",
-            keyvalues={},
-            retcols=["stream_id", "room_id"],
-            desc="get_device_change_last_converted_pos",
+        return cast(
+            Tuple[int, str],
+            await self.db_pool.simple_select_one(
+                table="device_lists_changes_converted_stream_position",
+                keyvalues={},
+                retcols=["stream_id", "room_id"],
+                desc="get_device_change_last_converted_pos",
+            ),
         )
-        return row["stream_id"], row["room_id"]
 
     async def set_device_change_last_converted_pos(
         self,
diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py
index ad904a26a6..fae23c3407 100644
--- a/synapse/storage/databases/main/e2e_room_keys.py
+++ b/synapse/storage/databases/main/e2e_room_keys.py
@@ -506,19 +506,26 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore):
                     # it isn't there.
                     raise StoreError(404, "No backup with that version exists")
 
-            result = self.db_pool.simple_select_one_txn(
-                txn,
-                table="e2e_room_keys_versions",
-                keyvalues={"user_id": user_id, "version": this_version, "deleted": 0},
-                retcols=("version", "algorithm", "auth_data", "etag"),
-                allow_none=False,
+            row = cast(
+                Tuple[int, str, str, Optional[int]],
+                self.db_pool.simple_select_one_txn(
+                    txn,
+                    table="e2e_room_keys_versions",
+                    keyvalues={
+                        "user_id": user_id,
+                        "version": this_version,
+                        "deleted": 0,
+                    },
+                    retcols=("version", "algorithm", "auth_data", "etag"),
+                    allow_none=False,
+                ),
             )
-            assert result is not None  # see comment on `simple_select_one_txn`
-            result["auth_data"] = db_to_json(result["auth_data"])
-            result["version"] = str(result["version"])
-            if result["etag"] is None:
-                result["etag"] = 0
-            return result
+            return {
+                "auth_data": db_to_json(row[2]),
+                "version": str(row[0]),
+                "algorithm": row[1],
+                "etag": 0 if row[3] is None else row[3],
+            }
 
         return await self.db_pool.runInteraction(
             "get_e2e_room_keys_version_info", _get_e2e_room_keys_version_info_txn
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 4f96ac25c7..9e98729330 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -1237,13 +1237,11 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
         for user_id, device_id, algorithm, key_id, key_json in claimed_keys:
             device_results = results.setdefault(user_id, {}).setdefault(device_id, {})
             device_results[f"{algorithm}:{key_id}"] = json_decoder.decode(key_json)
-
-            if (user_id, device_id) in seen_user_device:
-                continue
             seen_user_device.add((user_id, device_id))
-            self._invalidate_cache_and_stream(
-                txn, self.get_e2e_unused_fallback_key_types, (user_id, device_id)
-            )
+
+        self._invalidate_cache_and_stream_bulk(
+            txn, self.get_e2e_unused_fallback_key_types, seen_user_device
+        )
 
         return results
 
@@ -1268,9 +1266,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
             if row is None:
                 continue
 
-            key_id = row["key_id"]
-            key_json = row["key_json"]
-            used = row["used"]
+            key_id, key_json, used = row
 
             # Mark fallback key as used if not already.
             if not used and mark_as_used:
@@ -1376,17 +1372,62 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
             List[Tuple[str, str, str, str, str]], txn.execute_values(sql, query_list)
         )
 
-        seen_user_device: Set[Tuple[str, str]] = set()
-        for user_id, device_id, _, _, _ in otk_rows:
-            if (user_id, device_id) in seen_user_device:
-                continue
-            seen_user_device.add((user_id, device_id))
-            self._invalidate_cache_and_stream(
-                txn, self.count_e2e_one_time_keys, (user_id, device_id)
-            )
+        seen_user_device = {
+            (user_id, device_id) for user_id, device_id, _, _, _ in otk_rows
+        }
+        self._invalidate_cache_and_stream_bulk(
+            txn,
+            self.count_e2e_one_time_keys,
+            seen_user_device,
+        )
 
         return otk_rows
 
+    async def get_master_cross_signing_key_updatable_before(
+        self, user_id: str
+    ) -> Tuple[bool, Optional[int]]:
+        """Get time before which a master cross-signing key may be replaced without UIA.
+
+        (UIA means "User-Interactive Auth".)
+
+        There are three cases to distinguish:
+         (1) No master cross-signing key.
+         (2) The key exists, but there is no replace-without-UI timestamp in the DB.
+         (3) The key exists, and has such a timestamp recorded.
+
+        Returns: a 2-tuple of:
+          - a boolean: is there a master cross-signing key already?
+          - an optional timestamp, directly taken from the DB.
+
+        In terms of the cases above, these are:
+         (1) (False, None).
+         (2) (True, None).
+         (3) (True, <timestamp in ms>).
+
+        """
+
+        def impl(txn: LoggingTransaction) -> Tuple[bool, Optional[int]]:
+            # We want to distinguish between three cases:
+            txn.execute(
+                """
+                SELECT updatable_without_uia_before_ms
+                FROM e2e_cross_signing_keys
+                WHERE user_id = ? AND keytype = 'master'
+                ORDER BY stream_id DESC
+                LIMIT 1
+            """,
+                (user_id,),
+            )
+            row = cast(Optional[Tuple[Optional[int]]], txn.fetchone())
+            if row is None:
+                return False, None
+            return True, row[0]
+
+        return await self.db_pool.runInteraction(
+            "e2e_cross_signing_keys",
+            impl,
+        )
+
 
 class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
     def __init__(
@@ -1634,3 +1675,42 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
             ],
             desc="add_e2e_signing_key",
         )
+
+    async def allow_master_cross_signing_key_replacement_without_uia(
+        self, user_id: str, duration_ms: int
+    ) -> Optional[int]:
+        """Mark this user's latest master key as being replaceable without UIA.
+
+        Said replacement will only be permitted for a short time after calling this
+        function. That time period is controlled by the duration argument.
+
+        Returns:
+            None, if there is no such key.
+            Otherwise, the timestamp before which replacement is allowed without UIA.
+        """
+        timestamp = self._clock.time_msec() + duration_ms
+
+        def impl(txn: LoggingTransaction) -> Optional[int]:
+            txn.execute(
+                """
+                UPDATE e2e_cross_signing_keys
+                SET updatable_without_uia_before_ms = ?
+                WHERE stream_id = (
+                    SELECT stream_id
+                    FROM e2e_cross_signing_keys
+                    WHERE user_id = ? AND keytype = 'master'
+                    ORDER BY stream_id DESC
+                    LIMIT 1
+                )
+            """,
+                (timestamp, user_id),
+            )
+            if txn.rowcount == 0:
+                return None
+
+            return timestamp
+
+        return await self.db_pool.runInteraction(
+            "allow_master_cross_signing_key_replacement_without_uia",
+            impl,
+        )
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index f1b0991503..7e992ca4a2 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -193,7 +193,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
         # Check if we have indexed the room so we can use the chain cover
         # algorithm.
         room = await self.get_room(room_id)  # type: ignore[attr-defined]
-        if room["has_auth_chain_index"]:
+        # If the room has an auth chain index.
+        if room[1]:
             try:
                 return await self.db_pool.runInteraction(
                     "get_auth_chain_ids_chains",
@@ -411,7 +412,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
         # Check if we have indexed the room so we can use the chain cover
         # algorithm.
         room = await self.get_room(room_id)  # type: ignore[attr-defined]
-        if room["has_auth_chain_index"]:
+        # If the room has an auth chain index.
+        if room[1]:
             try:
                 return await self.db_pool.runInteraction(
                     "get_auth_chain_difference_chains",
@@ -1437,24 +1439,18 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
             )
 
             if event_lookup_result is not None:
+                event_type, depth, stream_ordering = event_lookup_result
                 logger.debug(
                     "_get_backfill_events(room_id=%s): seed_event_id=%s depth=%s stream_ordering=%s type=%s",
                     room_id,
                     seed_event_id,
-                    event_lookup_result["depth"],
-                    event_lookup_result["stream_ordering"],
-                    event_lookup_result["type"],
+                    depth,
+                    stream_ordering,
+                    event_type,
                 )
 
-                if event_lookup_result["depth"]:
-                    queue.put(
-                        (
-                            -event_lookup_result["depth"],
-                            -event_lookup_result["stream_ordering"],
-                            seed_event_id,
-                            event_lookup_result["type"],
-                        )
-                    )
+                if depth:
+                    queue.put((-depth, -stream_ordering, seed_event_id, event_type))
 
         while not queue.empty() and len(event_id_results) < limit:
             try:
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 3c1492e3ad..5207cc0f4e 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -79,7 +79,7 @@ class DeltaState:
     Attributes:
         to_delete: List of type/state_keys to delete from current state
         to_insert: Map of state to upsert into current state
-        no_longer_in_room: The server is not longer in the room, so the room
+        no_longer_in_room: The server is no longer in the room, so the room
             should e.g. be removed from `current_state_events` table.
     """
 
@@ -131,22 +131,25 @@ class PersistEventsStore:
     @trace
     async def _persist_events_and_state_updates(
         self,
+        room_id: str,
         events_and_contexts: List[Tuple[EventBase, EventContext]],
         *,
-        state_delta_for_room: Dict[str, DeltaState],
-        new_forward_extremities: Dict[str, Set[str]],
+        state_delta_for_room: Optional[DeltaState],
+        new_forward_extremities: Optional[Set[str]],
         use_negative_stream_ordering: bool = False,
         inhibit_local_membership_updates: bool = False,
     ) -> None:
         """Persist a set of events alongside updates to the current state and
-        forward extremities tables.
+                forward extremities tables.
+
+        Assumes that we are only persisting events for one room at a time.
 
         Args:
+            room_id:
             events_and_contexts:
-            state_delta_for_room: Map from room_id to the delta to apply to
-                room state
-            new_forward_extremities: Map from room_id to set of event IDs
-                that are the new forward extremities of the room.
+            state_delta_for_room: The delta to apply to the room state
+            new_forward_extremities: A set of event IDs that are the new forward
+                extremities of the room.
             use_negative_stream_ordering: Whether to start stream_ordering on
                 the negative side and decrement. This should be set as True
                 for backfilled events because backfilled events get a negative
@@ -196,6 +199,7 @@ class PersistEventsStore:
             await self.db_pool.runInteraction(
                 "persist_events",
                 self._persist_events_txn,
+                room_id=room_id,
                 events_and_contexts=events_and_contexts,
                 inhibit_local_membership_updates=inhibit_local_membership_updates,
                 state_delta_for_room=state_delta_for_room,
@@ -221,9 +225,9 @@ class PersistEventsStore:
 
                 event_counter.labels(event.type, origin_type, origin_entity).inc()
 
-            for room_id, latest_event_ids in new_forward_extremities.items():
+            if new_forward_extremities:
                 self.store.get_latest_event_ids_in_room.prefill(
-                    (room_id,), frozenset(latest_event_ids)
+                    (room_id,), frozenset(new_forward_extremities)
                 )
 
     async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]:
@@ -336,10 +340,11 @@ class PersistEventsStore:
         self,
         txn: LoggingTransaction,
         *,
+        room_id: str,
         events_and_contexts: List[Tuple[EventBase, EventContext]],
         inhibit_local_membership_updates: bool,
-        state_delta_for_room: Dict[str, DeltaState],
-        new_forward_extremities: Dict[str, Set[str]],
+        state_delta_for_room: Optional[DeltaState],
+        new_forward_extremities: Optional[Set[str]],
     ) -> None:
         """Insert some number of room events into the necessary database tables.
 
@@ -347,8 +352,11 @@ class PersistEventsStore:
         and the rejections table. Things reading from those table will need to check
         whether the event was rejected.
 
+        Assumes that we are only persisting events for one room at a time.
+
         Args:
             txn
+            room_id: The room the events are from
             events_and_contexts: events to persist
             inhibit_local_membership_updates: Stop the local_current_membership
                 from being updated by these events. This should be set to True
@@ -357,10 +365,9 @@ class PersistEventsStore:
             delete_existing True to purge existing table rows for the events
                 from the database. This is useful when retrying due to
                 IntegrityError.
-            state_delta_for_room: The current-state delta for each room.
-            new_forward_extremities: The new forward extremities for each room.
-                For each room, a list of the event ids which are the forward
-                extremities.
+            state_delta_for_room: The current-state delta for the room.
+            new_forward_extremities: The new forward extremities for the room:
+                a set of the event ids which are the forward extremities.
 
         Raises:
             PartialStateConflictError: if attempting to persist a partial state event in
@@ -376,14 +383,13 @@ class PersistEventsStore:
         #
         # Annoyingly SQLite doesn't support row level locking.
         if isinstance(self.database_engine, PostgresEngine):
-            for room_id in {e.room_id for e, _ in events_and_contexts}:
-                txn.execute(
-                    "SELECT room_version FROM rooms WHERE room_id = ? FOR SHARE",
-                    (room_id,),
-                )
-                row = txn.fetchone()
-                if row is None:
-                    raise Exception(f"Room does not exist {room_id}")
+            txn.execute(
+                "SELECT room_version FROM rooms WHERE room_id = ? FOR SHARE",
+                (room_id,),
+            )
+            row = txn.fetchone()
+            if row is None:
+                raise Exception(f"Room does not exist {room_id}")
 
         # stream orderings should have been assigned by now
         assert min_stream_order
@@ -419,7 +425,9 @@ class PersistEventsStore:
             events_and_contexts
         )
 
-        self._update_room_depths_txn(txn, events_and_contexts=events_and_contexts)
+        self._update_room_depths_txn(
+            txn, room_id, events_and_contexts=events_and_contexts
+        )
 
         # _update_outliers_txn filters out any events which have already been
         # persisted, and returns the filtered list.
@@ -432,11 +440,13 @@ class PersistEventsStore:
 
         self._store_event_txn(txn, events_and_contexts=events_and_contexts)
 
-        self._update_forward_extremities_txn(
-            txn,
-            new_forward_extremities=new_forward_extremities,
-            max_stream_order=max_stream_order,
-        )
+        if new_forward_extremities:
+            self._update_forward_extremities_txn(
+                txn,
+                room_id,
+                new_forward_extremities=new_forward_extremities,
+                max_stream_order=max_stream_order,
+            )
 
         self._persist_transaction_ids_txn(txn, events_and_contexts)
 
@@ -464,7 +474,10 @@ class PersistEventsStore:
         # We call this last as it assumes we've inserted the events into
         # room_memberships, where applicable.
         # NB: This function invalidates all state related caches
-        self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)
+        if state_delta_for_room:
+            self._update_current_state_txn(
+                txn, room_id, state_delta_for_room, min_stream_order
+            )
 
     def _persist_event_auth_chain_txn(
         self,
@@ -1026,74 +1039,75 @@ class PersistEventsStore:
             await self.db_pool.runInteraction(
                 "update_current_state",
                 self._update_current_state_txn,
-                state_delta_by_room={room_id: state_delta},
+                room_id,
+                delta_state=state_delta,
                 stream_id=stream_ordering,
             )
 
     def _update_current_state_txn(
         self,
         txn: LoggingTransaction,
-        state_delta_by_room: Dict[str, DeltaState],
+        room_id: str,
+        delta_state: DeltaState,
         stream_id: int,
     ) -> None:
-        for room_id, delta_state in state_delta_by_room.items():
-            to_delete = delta_state.to_delete
-            to_insert = delta_state.to_insert
-
-            # Figure out the changes of membership to invalidate the
-            # `get_rooms_for_user` cache.
-            # We find out which membership events we may have deleted
-            # and which we have added, then we invalidate the caches for all
-            # those users.
-            members_changed = {
-                state_key
-                for ev_type, state_key in itertools.chain(to_delete, to_insert)
-                if ev_type == EventTypes.Member
-            }
+        to_delete = delta_state.to_delete
+        to_insert = delta_state.to_insert
+
+        # Figure out the changes of membership to invalidate the
+        # `get_rooms_for_user` cache.
+        # We find out which membership events we may have deleted
+        # and which we have added, then we invalidate the caches for all
+        # those users.
+        members_changed = {
+            state_key
+            for ev_type, state_key in itertools.chain(to_delete, to_insert)
+            if ev_type == EventTypes.Member
+        }
 
-            if delta_state.no_longer_in_room:
-                # Server is no longer in the room so we delete the room from
-                # current_state_events, being careful we've already updated the
-                # rooms.room_version column (which gets populated in a
-                # background task).
-                self._upsert_room_version_txn(txn, room_id)
+        if delta_state.no_longer_in_room:
+            # Server is no longer in the room so we delete the room from
+            # current_state_events, being careful we've already updated the
+            # rooms.room_version column (which gets populated in a
+            # background task).
+            self._upsert_room_version_txn(txn, room_id)
 
-                # Before deleting we populate the current_state_delta_stream
-                # so that async background tasks get told what happened.
-                sql = """
+            # Before deleting we populate the current_state_delta_stream
+            # so that async background tasks get told what happened.
+            sql = """
                     INSERT INTO current_state_delta_stream
                         (stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id)
                     SELECT ?, ?, room_id, type, state_key, null, event_id
                         FROM current_state_events
                         WHERE room_id = ?
                 """
-                txn.execute(sql, (stream_id, self._instance_name, room_id))
+            txn.execute(sql, (stream_id, self._instance_name, room_id))
 
-                # We also want to invalidate the membership caches for users
-                # that were in the room.
-                users_in_room = self.store.get_users_in_room_txn(txn, room_id)
-                members_changed.update(users_in_room)
+            # We also want to invalidate the membership caches for users
+            # that were in the room.
+            users_in_room = self.store.get_users_in_room_txn(txn, room_id)
+            members_changed.update(users_in_room)
 
-                self.db_pool.simple_delete_txn(
-                    txn,
-                    table="current_state_events",
-                    keyvalues={"room_id": room_id},
-                )
-            else:
-                # We're still in the room, so we update the current state as normal.
+            self.db_pool.simple_delete_txn(
+                txn,
+                table="current_state_events",
+                keyvalues={"room_id": room_id},
+            )
+        else:
+            # We're still in the room, so we update the current state as normal.
 
-                # First we add entries to the current_state_delta_stream. We
-                # do this before updating the current_state_events table so
-                # that we can use it to calculate the `prev_event_id`. (This
-                # allows us to not have to pull out the existing state
-                # unnecessarily).
-                #
-                # The stream_id for the update is chosen to be the minimum of the stream_ids
-                # for the batch of the events that we are persisting; that means we do not
-                # end up in a situation where workers see events before the
-                # current_state_delta updates.
-                #
-                sql = """
+            # First we add entries to the current_state_delta_stream. We
+            # do this before updating the current_state_events table so
+            # that we can use it to calculate the `prev_event_id`. (This
+            # allows us to not have to pull out the existing state
+            # unnecessarily).
+            #
+            # The stream_id for the update is chosen to be the minimum of the stream_ids
+            # for the batch of the events that we are persisting; that means we do not
+            # end up in a situation where workers see events before the
+            # current_state_delta updates.
+            #
+            sql = """
                     INSERT INTO current_state_delta_stream
                     (stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id)
                     SELECT ?, ?, ?, ?, ?, ?, (
@@ -1101,39 +1115,39 @@ class PersistEventsStore:
                         WHERE room_id = ? AND type = ? AND state_key = ?
                     )
                 """
-                txn.execute_batch(
-                    sql,
+            txn.execute_batch(
+                sql,
+                (
                     (
-                        (
-                            stream_id,
-                            self._instance_name,
-                            room_id,
-                            etype,
-                            state_key,
-                            to_insert.get((etype, state_key)),
-                            room_id,
-                            etype,
-                            state_key,
-                        )
-                        for etype, state_key in itertools.chain(to_delete, to_insert)
-                    ),
-                )
-                # Now we actually update the current_state_events table
+                        stream_id,
+                        self._instance_name,
+                        room_id,
+                        etype,
+                        state_key,
+                        to_insert.get((etype, state_key)),
+                        room_id,
+                        etype,
+                        state_key,
+                    )
+                    for etype, state_key in itertools.chain(to_delete, to_insert)
+                ),
+            )
+            # Now we actually update the current_state_events table
 
-                txn.execute_batch(
-                    "DELETE FROM current_state_events"
-                    " WHERE room_id = ? AND type = ? AND state_key = ?",
-                    (
-                        (room_id, etype, state_key)
-                        for etype, state_key in itertools.chain(to_delete, to_insert)
-                    ),
-                )
+            txn.execute_batch(
+                "DELETE FROM current_state_events"
+                " WHERE room_id = ? AND type = ? AND state_key = ?",
+                (
+                    (room_id, etype, state_key)
+                    for etype, state_key in itertools.chain(to_delete, to_insert)
+                ),
+            )
 
-                # We include the membership in the current state table, hence we do
-                # a lookup when we insert. This assumes that all events have already
-                # been inserted into room_memberships.
-                txn.execute_batch(
-                    """INSERT INTO current_state_events
+            # We include the membership in the current state table, hence we do
+            # a lookup when we insert. This assumes that all events have already
+            # been inserted into room_memberships.
+            txn.execute_batch(
+                """INSERT INTO current_state_events
                         (room_id, type, state_key, event_id, membership, event_stream_ordering)
                     VALUES (
                         ?, ?, ?, ?,
@@ -1141,34 +1155,34 @@ class PersistEventsStore:
                         (SELECT stream_ordering FROM events WHERE event_id = ?)
                     )
                     """,
-                    [
-                        (room_id, key[0], key[1], ev_id, ev_id, ev_id)
-                        for key, ev_id in to_insert.items()
-                    ],
-                )
+                [
+                    (room_id, key[0], key[1], ev_id, ev_id, ev_id)
+                    for key, ev_id in to_insert.items()
+                ],
+            )
 
-            # We now update `local_current_membership`. We do this regardless
-            # of whether we're still in the room or not to handle the case where
-            # e.g. we just got banned (where we need to record that fact here).
-
-            # Note: Do we really want to delete rows here (that we do not
-            # subsequently reinsert below)? While technically correct it means
-            # we have no record of the fact the user *was* a member of the
-            # room but got, say, state reset out of it.
-            if to_delete or to_insert:
-                txn.execute_batch(
-                    "DELETE FROM local_current_membership"
-                    " WHERE room_id = ? AND user_id = ?",
-                    (
-                        (room_id, state_key)
-                        for etype, state_key in itertools.chain(to_delete, to_insert)
-                        if etype == EventTypes.Member and self.is_mine_id(state_key)
-                    ),
-                )
+        # We now update `local_current_membership`. We do this regardless
+        # of whether we're still in the room or not to handle the case where
+        # e.g. we just got banned (where we need to record that fact here).
+
+        # Note: Do we really want to delete rows here (that we do not
+        # subsequently reinsert below)? While technically correct it means
+        # we have no record of the fact the user *was* a member of the
+        # room but got, say, state reset out of it.
+        if to_delete or to_insert:
+            txn.execute_batch(
+                "DELETE FROM local_current_membership"
+                " WHERE room_id = ? AND user_id = ?",
+                (
+                    (room_id, state_key)
+                    for etype, state_key in itertools.chain(to_delete, to_insert)
+                    if etype == EventTypes.Member and self.is_mine_id(state_key)
+                ),
+            )
 
-            if to_insert:
-                txn.execute_batch(
-                    """INSERT INTO local_current_membership
+        if to_insert:
+            txn.execute_batch(
+                """INSERT INTO local_current_membership
                         (room_id, user_id, event_id, membership, event_stream_ordering)
                     VALUES (
                         ?, ?, ?,
@@ -1176,29 +1190,27 @@ class PersistEventsStore:
                         (SELECT stream_ordering FROM events WHERE event_id = ?)
                     )
                     """,
-                    [
-                        (room_id, key[1], ev_id, ev_id, ev_id)
-                        for key, ev_id in to_insert.items()
-                        if key[0] == EventTypes.Member and self.is_mine_id(key[1])
-                    ],
-                )
-
-            txn.call_after(
-                self.store._curr_state_delta_stream_cache.entity_has_changed,
-                room_id,
-                stream_id,
+                [
+                    (room_id, key[1], ev_id, ev_id, ev_id)
+                    for key, ev_id in to_insert.items()
+                    if key[0] == EventTypes.Member and self.is_mine_id(key[1])
+                ],
             )
 
-            # Invalidate the various caches
-            self.store._invalidate_state_caches_and_stream(
-                txn, room_id, members_changed
-            )
+        txn.call_after(
+            self.store._curr_state_delta_stream_cache.entity_has_changed,
+            room_id,
+            stream_id,
+        )
 
-            # Check if any of the remote membership changes requires us to
-            # unsubscribe from their device lists.
-            self.store.handle_potentially_left_users_txn(
-                txn, {m for m in members_changed if not self.hs.is_mine_id(m)}
-            )
+        # Invalidate the various caches
+        self.store._invalidate_state_caches_and_stream(txn, room_id, members_changed)
+
+        # Check if any of the remote membership changes requires us to
+        # unsubscribe from their device lists.
+        self.store.handle_potentially_left_users_txn(
+            txn, {m for m in members_changed if not self.hs.is_mine_id(m)}
+        )
 
     def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str) -> None:
         """Update the room version in the database based off current state
@@ -1232,23 +1244,19 @@ class PersistEventsStore:
     def _update_forward_extremities_txn(
         self,
         txn: LoggingTransaction,
-        new_forward_extremities: Dict[str, Set[str]],
+        room_id: str,
+        new_forward_extremities: Set[str],
         max_stream_order: int,
     ) -> None:
-        for room_id in new_forward_extremities.keys():
-            self.db_pool.simple_delete_txn(
-                txn, table="event_forward_extremities", keyvalues={"room_id": room_id}
-            )
+        self.db_pool.simple_delete_txn(
+            txn, table="event_forward_extremities", keyvalues={"room_id": room_id}
+        )
 
         self.db_pool.simple_insert_many_txn(
             txn,
             table="event_forward_extremities",
             keys=("event_id", "room_id"),
-            values=[
-                (ev_id, room_id)
-                for room_id, new_extrem in new_forward_extremities.items()
-                for ev_id in new_extrem
-            ],
+            values=[(ev_id, room_id) for ev_id in new_forward_extremities],
         )
         # We now insert into stream_ordering_to_exterm a mapping from room_id,
         # new stream_ordering to new forward extremeties in the room.
@@ -1260,8 +1268,7 @@ class PersistEventsStore:
             keys=("room_id", "event_id", "stream_ordering"),
             values=[
                 (room_id, event_id, max_stream_order)
-                for room_id, new_extrem in new_forward_extremities.items()
-                for event_id in new_extrem
+                for event_id in new_forward_extremities
             ],
         )
 
@@ -1298,36 +1305,45 @@ class PersistEventsStore:
     def _update_room_depths_txn(
         self,
         txn: LoggingTransaction,
+        room_id: str,
         events_and_contexts: List[Tuple[EventBase, EventContext]],
     ) -> None:
         """Update min_depth for each room
 
         Args:
             txn: db connection
+            room_id: The room ID
             events_and_contexts: events we are persisting
         """
-        depth_updates: Dict[str, int] = {}
+        stream_ordering: Optional[int] = None
+        depth_update = 0
         for event, context in events_and_contexts:
-            # Then update the `stream_ordering` position to mark the latest
-            # event as the front of the room. This should not be done for
-            # backfilled events because backfilled events have negative
-            # stream_ordering and happened in the past so we know that we don't
-            # need to update the stream_ordering tip/front for the room.
+            # Don't update the stream ordering for backfilled events because
+            # backfilled events have negative stream_ordering and happened in the
+            # past, so we know that we don't need to update the stream_ordering
+            # tip/front for the room.
             assert event.internal_metadata.stream_ordering is not None
             if event.internal_metadata.stream_ordering >= 0:
-                txn.call_after(
-                    self.store._events_stream_cache.entity_has_changed,
-                    event.room_id,
-                    event.internal_metadata.stream_ordering,
-                )
+                if stream_ordering is None:
+                    stream_ordering = event.internal_metadata.stream_ordering
+                else:
+                    stream_ordering = max(
+                        stream_ordering, event.internal_metadata.stream_ordering
+                    )
 
             if not event.internal_metadata.is_outlier() and not context.rejected:
-                depth_updates[event.room_id] = max(
-                    event.depth, depth_updates.get(event.room_id, event.depth)
-                )
+                depth_update = max(event.depth, depth_update)
 
-        for room_id, depth in depth_updates.items():
-            self._update_min_depth_for_room_txn(txn, room_id, depth)
+        # Then update the `stream_ordering` position to mark the latest event as
+        # the front of the room.
+        if stream_ordering is not None:
+            txn.call_after(
+                self.store._events_stream_cache.entity_has_changed,
+                room_id,
+                stream_ordering,
+            )
+
+        self._update_min_depth_for_room_txn(txn, room_id, depth_update)
 
     def _update_outliers_txn(
         self,
@@ -1350,13 +1366,19 @@ class PersistEventsStore:
             PartialStateConflictError: if attempting to persist a partial state event in
                 a room that has been un-partial stated.
         """
-        txn.execute(
-            "SELECT event_id, outlier FROM events WHERE event_id in (%s)"
-            % (",".join(["?"] * len(events_and_contexts)),),
-            [event.event_id for event, _ in events_and_contexts],
+        rows = cast(
+            List[Tuple[str, bool]],
+            self.db_pool.simple_select_many_txn(
+                txn,
+                "events",
+                "event_id",
+                [event.event_id for event, _ in events_and_contexts],
+                keyvalues={},
+                retcols=("event_id", "outlier"),
+            ),
         )
 
-        have_persisted = dict(cast(Iterable[Tuple[str, bool]], txn))
+        have_persisted = dict(rows)
 
         logger.debug(
             "_update_outliers_txn: events=%s have_persisted=%s",
@@ -1454,7 +1476,7 @@ class PersistEventsStore:
             txn,
             table="event_json",
             keys=("event_id", "room_id", "internal_metadata", "json", "format_version"),
-            values=(
+            values=[
                 (
                     event.event_id,
                     event.room_id,
@@ -1463,7 +1485,7 @@ class PersistEventsStore:
                     event.format_version,
                 )
                 for event, _ in events_and_contexts
-            ),
+            ],
         )
 
         self.db_pool.simple_insert_many_txn(
@@ -1486,7 +1508,7 @@ class PersistEventsStore:
                 "state_key",
                 "rejection_reason",
             ),
-            values=(
+            values=[
                 (
                     self._instance_name,
                     event.internal_metadata.stream_ordering,
@@ -1505,7 +1527,7 @@ class PersistEventsStore:
                     context.rejected,
                 )
                 for event, context in events_and_contexts
-            ),
+            ],
         )
 
         # If we're persisting an unredacted event we go and ensure
@@ -1528,11 +1550,11 @@ class PersistEventsStore:
             txn,
             table="state_events",
             keys=("event_id", "room_id", "type", "state_key"),
-            values=(
+            values=[
                 (event.event_id, event.room_id, event.type, event.state_key)
                 for event, _ in events_and_contexts
                 if event.is_state()
-            ),
+            ],
         )
 
     def _store_rejected_events_txn(
@@ -1912,8 +1934,7 @@ class PersistEventsStore:
         if row is None:
             return
 
-        redacted_relates_to = row["relates_to_id"]
-        rel_type = row["relation_type"]
+        redacted_relates_to, rel_type = row
         self.db_pool.simple_delete_txn(
             txn, table="event_relations", keyvalues={"event_id": redacted_event_id}
         )
diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py
index 0061805150..0c91f19c8e 100644
--- a/synapse/storage/databases/main/events_bg_updates.py
+++ b/synapse/storage/databases/main/events_bg_updates.py
@@ -425,7 +425,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
         """Background update to clean out extremities that should have been
         deleted previously.
 
-        Mainly used to deal with the aftermath of #5269.
+        Mainly used to deal with the aftermath of https://github.com/matrix-org/synapse/issues/5269.
         """
 
         # This works by first copying all existing forward extremities into the
@@ -558,7 +558,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
             )
 
             logger.info(
-                "Deleted %d forward extremities of %d checked, to clean up #5269",
+                "Deleted %d forward extremities of %d checked, to clean up matrix-org/synapse#5269",
                 deleted,
                 len(original_set),
             )
@@ -1222,14 +1222,13 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
                 )
 
                 # Iterate the parent IDs and invalidate caches.
-                for parent_id in {r[1] for r in relations_to_insert}:
-                    cache_tuple = (parent_id,)
-                    self._invalidate_cache_and_stream(  # type: ignore[attr-defined]
-                        txn, self.get_relations_for_event, cache_tuple  # type: ignore[attr-defined]
-                    )
-                    self._invalidate_cache_and_stream(  # type: ignore[attr-defined]
-                        txn, self.get_thread_summary, cache_tuple  # type: ignore[attr-defined]
-                    )
+                cache_tuples = {(r[1],) for r in relations_to_insert}
+                self._invalidate_cache_and_stream_bulk(  # type: ignore[attr-defined]
+                    txn, self.get_relations_for_event, cache_tuples  # type: ignore[attr-defined]
+                )
+                self._invalidate_cache_and_stream_bulk(  # type: ignore[attr-defined]
+                    txn, self.get_thread_summary, cache_tuples  # type: ignore[attr-defined]
+                )
 
             if results:
                 latest_event_id = results[-1][0]
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 5bf864c1fb..4125059061 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -1312,7 +1312,8 @@ class EventsWorkerStore(SQLBaseStore):
             room_version: Optional[RoomVersion]
             if not room_version_id:
                 # this should only happen for out-of-band membership events which
-                # arrived before #6983 landed. For all other events, we should have
+                # arrived before https://github.com/matrix-org/synapse/issues/6983
+                # landed. For all other events, we should have
                 # an entry in the 'rooms' table.
                 #
                 # However, the 'out_of_band_membership' flag is unreliable for older
@@ -1323,7 +1324,8 @@ class EventsWorkerStore(SQLBaseStore):
                         "Room %s for event %s is unknown" % (d["room_id"], event_id)
                     )
 
-                # so, assuming this is an out-of-band-invite that arrived before #6983
+                # so, assuming this is an out-of-band-invite that arrived before
+                # https://github.com/matrix-org/synapse/issues/6983
                 # landed, we know that the room version must be v5 or earlier (because
                 # v6 hadn't been invented at that point, so invites from such rooms
                 # would have been rejected.)
@@ -1998,7 +2000,7 @@ class EventsWorkerStore(SQLBaseStore):
         if not res:
             raise SynapseError(404, "Could not find event %s" % (event_id,))
 
-        return int(res["topological_ordering"]), int(res["stream_ordering"])
+        return int(res[0]), int(res[1])
 
     async def get_next_event_to_expire(self) -> Optional[Tuple[str, int]]:
         """Retrieve the entry with the lowest expiry timestamp in the event_expiry
diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py
index ce88772f9e..c700872fdc 100644
--- a/synapse/storage/databases/main/keys.py
+++ b/synapse/storage/databases/main/keys.py
@@ -107,13 +107,16 @@ class KeyStore(CacheInvalidationWorkerStore):
             # invalidate takes a tuple corresponding to the params of
             # _get_server_keys_json. _get_server_keys_json only takes one
             # param, which is itself the 2-tuple (server_name, key_id).
-            for key_id in verify_keys:
-                self._invalidate_cache_and_stream(
-                    txn, self._get_server_keys_json, ((server_name, key_id),)
-                )
-                self._invalidate_cache_and_stream(
-                    txn, self.get_server_key_json_for_remote, (server_name, key_id)
-                )
+            self._invalidate_cache_and_stream_bulk(
+                txn,
+                self._get_server_keys_json,
+                [((server_name, key_id),) for key_id in verify_keys],
+            )
+            self._invalidate_cache_and_stream_bulk(
+                txn,
+                self.get_server_key_json_for_remote,
+                [(server_name, key_id) for key_id in verify_keys],
+            )
 
         await self.db_pool.runInteraction(
             "store_server_keys_response", store_server_keys_response_txn
diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py
index aeb3db596c..149135b8b5 100644
--- a/synapse/storage/databases/main/media_repository.py
+++ b/synapse/storage/databases/main/media_repository.py
@@ -15,9 +15,7 @@
 from enum import Enum
 from typing import (
     TYPE_CHECKING,
-    Any,
     Collection,
-    Dict,
     Iterable,
     List,
     Optional,
@@ -26,6 +24,8 @@ from typing import (
     cast,
 )
 
+import attr
+
 from synapse.api.constants import Direction
 from synapse.logging.opentracing import trace
 from synapse.media._base import ThumbnailInfo
@@ -45,6 +45,40 @@ BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2 = (
 )
 
 
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class LocalMedia:
+    media_id: str
+    media_type: str
+    media_length: Optional[int]
+    upload_name: str
+    created_ts: int
+    url_cache: Optional[str]
+    last_access_ts: int
+    quarantined_by: Optional[str]
+    safe_from_quarantine: bool
+    user_id: Optional[str]
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class RemoteMedia:
+    media_origin: str
+    media_id: str
+    media_type: str
+    media_length: int
+    upload_name: Optional[str]
+    filesystem_id: str
+    created_ts: int
+    last_access_ts: int
+    quarantined_by: Optional[str]
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class UrlCache:
+    response_code: int
+    expires_ts: int
+    og: Union[str, bytes]
+
+
 class MediaSortOrder(Enum):
     """
     Enum to define the sorting method used when returning media with
@@ -116,6 +150,13 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore):
             self._drop_media_index_without_method,
         )
 
+        if hs.config.media.can_load_media_repo:
+            self.unused_expiration_time: Optional[
+                int
+            ] = hs.config.media.unused_expiration_time
+        else:
+            self.unused_expiration_time = None
+
     async def _drop_media_index_without_method(
         self, progress: JsonDict, batch_size: int
     ) -> int:
@@ -151,13 +192,13 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         super().__init__(database, db_conn, hs)
         self.server_name: str = hs.hostname
 
-    async def get_local_media(self, media_id: str) -> Optional[Dict[str, Any]]:
+    async def get_local_media(self, media_id: str) -> Optional[LocalMedia]:
         """Get the metadata for a local piece of media
 
         Returns:
             None if the media_id doesn't exist.
         """
-        return await self.db_pool.simple_select_one(
+        row = await self.db_pool.simple_select_one(
             "local_media_repository",
             {"media_id": media_id},
             (
@@ -167,11 +208,27 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
                 "created_ts",
                 "quarantined_by",
                 "url_cache",
+                "last_access_ts",
                 "safe_from_quarantine",
+                "user_id",
             ),
             allow_none=True,
             desc="get_local_media",
         )
+        if row is None:
+            return None
+        return LocalMedia(
+            media_id=media_id,
+            media_type=row[0],
+            media_length=row[1],
+            upload_name=row[2],
+            created_ts=row[3],
+            quarantined_by=row[4],
+            url_cache=row[5],
+            last_access_ts=row[6],
+            safe_from_quarantine=row[7],
+            user_id=row[8],
+        )
 
     async def get_local_media_by_user_paginate(
         self,
@@ -180,7 +237,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         user_id: str,
         order_by: str = MediaSortOrder.CREATED_TS.value,
         direction: Direction = Direction.FORWARDS,
-    ) -> Tuple[List[Dict[str, Any]], int]:
+    ) -> Tuple[List[LocalMedia], int]:
         """Get a paginated list of metadata for a local piece of media
         which an user_id has uploaded
 
@@ -197,7 +254,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
 
         def get_local_media_by_user_paginate_txn(
             txn: LoggingTransaction,
-        ) -> Tuple[List[Dict[str, Any]], int]:
+        ) -> Tuple[List[LocalMedia], int]:
             # Set ordering
             order_by_column = MediaSortOrder(order_by).value
 
@@ -217,14 +274,16 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
 
             sql = """
                 SELECT
-                    "media_id",
-                    "media_type",
-                    "media_length",
-                    "upload_name",
-                    "created_ts",
-                    "last_access_ts",
-                    "quarantined_by",
-                    "safe_from_quarantine"
+                    media_id,
+                    media_type,
+                    media_length,
+                    upload_name,
+                    created_ts,
+                    url_cache,
+                    last_access_ts,
+                    quarantined_by,
+                    safe_from_quarantine,
+                    user_id
                 FROM local_media_repository
                 WHERE user_id = ?
                 ORDER BY {order_by_column} {order}, media_id ASC
@@ -236,7 +295,21 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
 
             args += [limit, start]
             txn.execute(sql, args)
-            media = self.db_pool.cursor_to_dict(txn)
+            media = [
+                LocalMedia(
+                    media_id=row[0],
+                    media_type=row[1],
+                    media_length=row[2],
+                    upload_name=row[3],
+                    created_ts=row[4],
+                    url_cache=row[5],
+                    last_access_ts=row[6],
+                    quarantined_by=row[7],
+                    safe_from_quarantine=bool(row[8]),
+                    user_id=row[9],
+                )
+                for row in txn
+            ]
             return media, count
 
         return await self.db_pool.runInteraction(
@@ -331,6 +404,23 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         )
 
     @trace
+    async def store_local_media_id(
+        self,
+        media_id: str,
+        time_now_ms: int,
+        user_id: UserID,
+    ) -> None:
+        await self.db_pool.simple_insert(
+            "local_media_repository",
+            {
+                "media_id": media_id,
+                "created_ts": time_now_ms,
+                "user_id": user_id.to_string(),
+            },
+            desc="store_local_media_id",
+        )
+
+    @trace
     async def store_local_media(
         self,
         media_id: str,
@@ -355,6 +445,30 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
             desc="store_local_media",
         )
 
+    async def update_local_media(
+        self,
+        media_id: str,
+        media_type: str,
+        upload_name: Optional[str],
+        media_length: int,
+        user_id: UserID,
+        url_cache: Optional[str] = None,
+    ) -> None:
+        await self.db_pool.simple_update_one(
+            "local_media_repository",
+            keyvalues={
+                "user_id": user_id.to_string(),
+                "media_id": media_id,
+            },
+            updatevalues={
+                "media_type": media_type,
+                "upload_name": upload_name,
+                "media_length": media_length,
+                "url_cache": url_cache,
+            },
+            desc="update_local_media",
+        )
+
     async def mark_local_media_as_safe(self, media_id: str, safe: bool = True) -> None:
         """Mark a local media as safe or unsafe from quarantining."""
         await self.db_pool.simple_update_one(
@@ -364,51 +478,72 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
             desc="mark_local_media_as_safe",
         )
 
-    async def get_url_cache(self, url: str, ts: int) -> Optional[Dict[str, Any]]:
+    async def count_pending_media(self, user_id: UserID) -> Tuple[int, int]:
+        """Count the number of pending media for a user.
+
+        Returns:
+            A tuple of two integers: the total pending media requests and the earliest
+            expiration timestamp.
+        """
+
+        def get_pending_media_txn(txn: LoggingTransaction) -> Tuple[int, int]:
+            sql = """
+            SELECT COUNT(*), MIN(created_ts)
+            FROM local_media_repository
+            WHERE user_id = ?
+                AND created_ts > ?
+                AND media_length IS NULL
+            """
+            assert self.unused_expiration_time is not None
+            txn.execute(
+                sql,
+                (
+                    user_id.to_string(),
+                    self._clock.time_msec() - self.unused_expiration_time,
+                ),
+            )
+            row = txn.fetchone()
+            if not row:
+                return 0, 0
+            return row[0], (row[1] + self.unused_expiration_time if row[1] else 0)
+
+        return await self.db_pool.runInteraction(
+            "get_pending_media", get_pending_media_txn
+        )
+
+    async def get_url_cache(self, url: str, ts: int) -> Optional[UrlCache]:
         """Get the media_id and ts for a cached URL as of the given timestamp
         Returns:
             None if the URL isn't cached.
         """
 
-        def get_url_cache_txn(txn: LoggingTransaction) -> Optional[Dict[str, Any]]:
+        def get_url_cache_txn(txn: LoggingTransaction) -> Optional[UrlCache]:
             # get the most recently cached result (relative to the given ts)
-            sql = (
-                "SELECT response_code, etag, expires_ts, og, media_id, download_ts"
-                " FROM local_media_repository_url_cache"
-                " WHERE url = ? AND download_ts <= ?"
-                " ORDER BY download_ts DESC LIMIT 1"
-            )
+            sql = """
+                SELECT response_code, expires_ts, og
+                FROM local_media_repository_url_cache
+                WHERE url = ? AND download_ts <= ?
+                ORDER BY download_ts DESC LIMIT 1
+            """
             txn.execute(sql, (url, ts))
             row = txn.fetchone()
 
             if not row:
                 # ...or if we've requested a timestamp older than the oldest
                 # copy in the cache, return the oldest copy (if any)
-                sql = (
-                    "SELECT response_code, etag, expires_ts, og, media_id, download_ts"
-                    " FROM local_media_repository_url_cache"
-                    " WHERE url = ? AND download_ts > ?"
-                    " ORDER BY download_ts ASC LIMIT 1"
-                )
+                sql = """
+                    SELECT response_code, expires_ts, og
+                    FROM local_media_repository_url_cache
+                    WHERE url = ? AND download_ts > ?
+                    ORDER BY download_ts ASC LIMIT 1
+                """
                 txn.execute(sql, (url, ts))
                 row = txn.fetchone()
 
             if not row:
                 return None
 
-            return dict(
-                zip(
-                    (
-                        "response_code",
-                        "etag",
-                        "expires_ts",
-                        "og",
-                        "media_id",
-                        "download_ts",
-                    ),
-                    row,
-                )
-            )
+            return UrlCache(response_code=row[0], expires_ts=row[1], og=row[2])
 
         return await self.db_pool.runInteraction("get_url_cache", get_url_cache_txn)
 
@@ -418,7 +553,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         response_code: int,
         etag: Optional[str],
         expires_ts: int,
-        og: Optional[str],
+        og: str,
         media_id: str,
         download_ts: int,
     ) -> None:
@@ -484,8 +619,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
 
     async def get_cached_remote_media(
         self, origin: str, media_id: str
-    ) -> Optional[Dict[str, Any]]:
-        return await self.db_pool.simple_select_one(
+    ) -> Optional[RemoteMedia]:
+        row = await self.db_pool.simple_select_one(
             "remote_media_cache",
             {"media_origin": origin, "media_id": media_id},
             (
@@ -494,11 +629,25 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
                 "upload_name",
                 "created_ts",
                 "filesystem_id",
+                "last_access_ts",
                 "quarantined_by",
             ),
             allow_none=True,
             desc="get_cached_remote_media",
         )
+        if row is None:
+            return row
+        return RemoteMedia(
+            media_origin=origin,
+            media_id=media_id,
+            media_type=row[0],
+            media_length=row[1],
+            upload_name=row[2],
+            created_ts=row[3],
+            filesystem_id=row[4],
+            last_access_ts=row[5],
+            quarantined_by=row[6],
+        )
 
     async def store_cached_remote_media(
         self,
@@ -597,10 +746,10 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         t_width: int,
         t_height: int,
         t_type: str,
-    ) -> Optional[Dict[str, Any]]:
+    ) -> Optional[ThumbnailInfo]:
         """Fetch the thumbnail info of given width, height and type."""
 
-        return await self.db_pool.simple_select_one(
+        row = await self.db_pool.simple_select_one(
             table="remote_media_cache_thumbnails",
             keyvalues={
                 "media_origin": origin,
@@ -615,11 +764,15 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
                 "thumbnail_method",
                 "thumbnail_type",
                 "thumbnail_length",
-                "filesystem_id",
             ),
             allow_none=True,
             desc="get_remote_media_thumbnail",
         )
+        if row is None:
+            return None
+        return ThumbnailInfo(
+            width=row[0], height=row[1], method=row[2], type=row[3], length=row[4]
+        )
 
     @trace
     async def store_remote_media_thumbnail(
diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py
index 4b1061e6d7..2911e53310 100644
--- a/synapse/storage/databases/main/monthly_active_users.py
+++ b/synapse/storage/databases/main/monthly_active_users.py
@@ -317,7 +317,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
             if user_id:
                 is_support = self.is_support_user_txn(txn, user_id)
                 if not is_support:
-                    # We do this manually here to avoid hitting #6791
+                    # We do this manually here to avoid hitting https://github.com/matrix-org/synapse/issues/6791
                     self.db_pool.simple_upsert_txn(
                         txn,
                         table="monthly_active_users",
diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py
index 3b444d2d07..0198bb09d2 100644
--- a/synapse/storage/databases/main/presence.py
+++ b/synapse/storage/databases/main/presence.py
@@ -363,10 +363,11 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
                 # for their user ID.
                 value_values=[(presence_stream_id,) for _ in user_ids],
             )
-            for user_id in user_ids:
-                self._invalidate_cache_and_stream(
-                    txn, self._get_full_presence_stream_token_for_user, (user_id,)
-                )
+            self._invalidate_cache_and_stream_bulk(
+                txn,
+                self._get_full_presence_stream_token_for_user,
+                [(user_id,) for user_id in user_ids],
+            )
 
         return await self.db_pool.runInteraction(
             "add_users_to_send_full_presence_to", _add_users_to_send_full_presence_to
diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py
index 3ba9cc8853..7ed111f632 100644
--- a/synapse/storage/databases/main/profile.py
+++ b/synapse/storage/databases/main/profile.py
@@ -13,7 +13,6 @@
 # limitations under the License.
 from typing import TYPE_CHECKING, Optional
 
-from synapse.api.errors import StoreError
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.database import (
     DatabasePool,
@@ -138,23 +137,18 @@ class ProfileWorkerStore(SQLBaseStore):
         return 50
 
     async def get_profileinfo(self, user_id: UserID) -> ProfileInfo:
-        try:
-            profile = await self.db_pool.simple_select_one(
-                table="profiles",
-                keyvalues={"full_user_id": user_id.to_string()},
-                retcols=("displayname", "avatar_url"),
-                desc="get_profileinfo",
-            )
-        except StoreError as e:
-            if e.code == 404:
-                # no match
-                return ProfileInfo(None, None)
-            else:
-                raise
-
-        return ProfileInfo(
-            avatar_url=profile["avatar_url"], display_name=profile["displayname"]
+        profile = await self.db_pool.simple_select_one(
+            table="profiles",
+            keyvalues={"full_user_id": user_id.to_string()},
+            retcols=("displayname", "avatar_url"),
+            desc="get_profileinfo",
+            allow_none=True,
         )
+        if profile is None:
+            # no match
+            return ProfileInfo(None, None)
+
+        return ProfileInfo(avatar_url=profile[1], display_name=profile[0])
 
     async def get_profile_displayname(self, user_id: UserID) -> Optional[str]:
         return await self.db_pool.simple_select_one_onecol(
diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index 1e11bf2706..1a5b5731bb 100644
--- a/synapse/storage/databases/main/purge_events.py
+++ b/synapse/storage/databases/main/purge_events.py
@@ -295,19 +295,28 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
         # so make sure to keep this actually last.
         txn.execute("DROP TABLE events_to_purge")
 
-        for event_id, should_delete in event_rows:
-            self._invalidate_cache_and_stream(
-                txn, self._get_state_group_for_event, (event_id,)
-            )
+        self._invalidate_cache_and_stream_bulk(
+            txn,
+            self._get_state_group_for_event,
+            [(event_id,) for event_id, _ in event_rows],
+        )
 
-            # XXX: This is racy, since have_seen_events could be called between the
-            #    transaction completing and the invalidation running. On the other hand,
-            #    that's no different to calling `have_seen_events` just before the
-            #    event is deleted from the database.
+        # XXX: This is racy, since have_seen_events could be called between the
+        #    transaction completing and the invalidation running. On the other hand,
+        #    that's no different to calling `have_seen_events` just before the
+        #    event is deleted from the database.
+        self._invalidate_cache_and_stream_bulk(
+            txn,
+            self.have_seen_event,
+            [
+                (room_id, event_id)
+                for event_id, should_delete in event_rows
+                if should_delete
+            ],
+        )
+
+        for event_id, should_delete in event_rows:
             if should_delete:
-                self._invalidate_cache_and_stream(
-                    txn, self.have_seen_event, (room_id, event_id)
-                )
                 self.invalidate_get_event_cache_after_txn(txn, event_id)
 
         logger.info("[purge] done")
@@ -485,7 +494,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
         #  - room_tags_revisions
         #       The problem with these is that they are largeish and there is no room_id
         #       index on them. In any case we should be clearing out 'stream' tables
-        #       periodically anyway (#5888)
+        #       periodically anyway (https://github.com/matrix-org/synapse/issues/5888)
 
         self._invalidate_caches_for_room_and_stream(txn, room_id)
 
diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index 22025eca56..cf622e195c 100644
--- a/synapse/storage/databases/main/push_rule.py
+++ b/synapse/storage/databases/main/push_rule.py
@@ -28,8 +28,11 @@ from typing import (
     cast,
 )
 
+from twisted.internet import defer
+
 from synapse.api.errors import StoreError
 from synapse.config.homeserver import ExperimentalConfig
+from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.replication.tcp.streams import PushRulesStream
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.database import (
@@ -51,7 +54,8 @@ from synapse.storage.util.id_generators import (
 )
 from synapse.synapse_rust.push import FilteredPushRules, PushRule, PushRules
 from synapse.types import JsonDict
-from synapse.util import json_encoder
+from synapse.util import json_encoder, unwrapFirstError
+from synapse.util.async_helpers import gather_results
 from synapse.util.caches.descriptors import cached, cachedList
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
@@ -249,23 +253,33 @@ class PushRulesWorkerStore(
             user_id: [] for user_id in user_ids
         }
 
-        rows = cast(
-            List[Tuple[str, str, int, int, str, str]],
-            await self.db_pool.simple_select_many_batch(
-                table="push_rules",
-                column="user_name",
-                iterable=user_ids,
-                retcols=(
-                    "user_name",
-                    "rule_id",
-                    "priority_class",
-                    "priority",
-                    "conditions",
-                    "actions",
+        # gatherResults loses all type information.
+        rows, enabled_map_by_user = await make_deferred_yieldable(
+            gather_results(
+                (
+                    cast(
+                        "defer.Deferred[List[Tuple[str, str, int, int, str, str]]]",
+                        run_in_background(
+                            self.db_pool.simple_select_many_batch,
+                            table="push_rules",
+                            column="user_name",
+                            iterable=user_ids,
+                            retcols=(
+                                "user_name",
+                                "rule_id",
+                                "priority_class",
+                                "priority",
+                                "conditions",
+                                "actions",
+                            ),
+                            desc="bulk_get_push_rules",
+                            batch_size=1000,
+                        ),
+                    ),
+                    run_in_background(self.bulk_get_push_rules_enabled, user_ids),
                 ),
-                desc="bulk_get_push_rules",
-                batch_size=1000,
-            ),
+                consumeErrors=True,
+            ).addErrback(unwrapFirstError)
         )
 
         # Sort by highest priority_class, then highest priority.
@@ -276,8 +290,6 @@ class PushRulesWorkerStore(
                 (rule_id, priority_class, conditions, actions)
             )
 
-        enabled_map_by_user = await self.bulk_get_push_rules_enabled(user_ids)
-
         results: Dict[str, FilteredPushRules] = {}
 
         for user_id, rules in raw_rules.items():
@@ -437,27 +449,28 @@ class PushRuleStore(PushRulesWorkerStore):
         before: str,
         after: str,
     ) -> None:
-        # Lock the table since otherwise we'll have annoying races between the
-        # SELECT here and the UPSERT below.
-        self.database_engine.lock_table(txn, "push_rules")
-
         relative_to_rule = before or after
 
-        res = self.db_pool.simple_select_one_txn(
-            txn,
-            table="push_rules",
-            keyvalues={"user_name": user_id, "rule_id": relative_to_rule},
-            retcols=["priority_class", "priority"],
-            allow_none=True,
-        )
+        sql = """
+            SELECT priority, priority_class FROM push_rules
+            WHERE user_name = ? AND rule_id = ?
+        """
 
-        if not res:
+        if isinstance(self.database_engine, PostgresEngine):
+            sql += " FOR UPDATE"
+        else:
+            # Annoyingly SQLite doesn't support row level locking, so lock the whole table
+            self.database_engine.lock_table(txn, "push_rules")
+
+        txn.execute(sql, (user_id, relative_to_rule))
+        row = txn.fetchone()
+
+        if row is None:
             raise RuleNotFoundException(
                 "before/after rule not found: %s" % (relative_to_rule,)
             )
 
-        base_priority_class = res["priority_class"]
-        base_rule_priority = res["priority"]
+        base_rule_priority, base_priority_class = row
 
         if base_priority_class != priority_class:
             raise InconsistentRuleException(
@@ -505,9 +518,18 @@ class PushRuleStore(PushRulesWorkerStore):
         conditions_json: str,
         actions_json: str,
     ) -> None:
-        # Lock the table since otherwise we'll have annoying races between the
-        # SELECT here and the UPSERT below.
-        self.database_engine.lock_table(txn, "push_rules")
+        if isinstance(self.database_engine, PostgresEngine):
+            # Postgres doesn't do FOR UPDATE on aggregate functions, so select the rows first
+            # then re-select the count/max below.
+            sql = """
+                SELECT * FROM push_rules
+                WHERE user_name = ? and priority_class = ?
+                FOR UPDATE
+            """
+            txn.execute(sql, (user_id, priority_class))
+        else:
+            # Annoyingly SQLite doesn't support row level locking, so lock the whole table
+            self.database_engine.lock_table(txn, "push_rules")
 
         # find the highest priority rule in that class
         sql = (
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index 56e8eb16a8..3484ce9ef9 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -701,8 +701,8 @@ class ReceiptsWorkerStore(SQLBaseStore):
             allow_none=True,
         )
 
-        stream_ordering = int(res["stream_ordering"]) if res else None
-        rx_ts = res["received_ts"] if res else 0
+        stream_ordering = int(res[0]) if res else None
+        rx_ts = res[1] if res else 0
 
         # We don't want to clobber receipts for more recent events, so we
         # have to compare orderings of existing receipts
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index e09ab21593..2c3f30e2eb 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -425,17 +425,14 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
                     account timestamp as milliseconds since the epoch. None if the account
                     has not been renewed using the current token yet.
         """
-        ret_dict = await self.db_pool.simple_select_one(
-            table="account_validity",
-            keyvalues={"renewal_token": renewal_token},
-            retcols=["user_id", "expiration_ts_ms", "token_used_ts_ms"],
-            desc="get_user_from_renewal_token",
-        )
-
-        return (
-            ret_dict["user_id"],
-            ret_dict["expiration_ts_ms"],
-            ret_dict["token_used_ts_ms"],
+        return cast(
+            Tuple[str, int, Optional[int]],
+            await self.db_pool.simple_select_one(
+                table="account_validity",
+                keyvalues={"renewal_token": renewal_token},
+                retcols=["user_id", "expiration_ts_ms", "token_used_ts_ms"],
+                desc="get_user_from_renewal_token",
+            ),
         )
 
     async def get_renewal_token_for_user(self, user_id: str) -> str:
@@ -564,16 +561,15 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
                 updatevalues={"shadow_banned": shadow_banned},
             )
             # In order for this to apply immediately, clear the cache for this user.
-            tokens = self.db_pool.simple_select_onecol_txn(
+            tokens = self.db_pool.simple_select_list_txn(
                 txn,
                 table="access_tokens",
                 keyvalues={"user_id": user_id},
-                retcol="token",
+                retcols=("token",),
+            )
+            self._invalidate_cache_and_stream_bulk(
+                txn, self.get_user_by_access_token, tokens
             )
-            for token in tokens:
-                self._invalidate_cache_and_stream(
-                    txn, self.get_user_by_access_token, (token,)
-                )
             self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
 
         await self.db_pool.runInteraction("set_shadow_banned", set_shadow_banned_txn)
@@ -989,16 +985,13 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
         Returns:
             user id, or None if no user id/threepid mapping exists
         """
-        ret = self.db_pool.simple_select_one_txn(
+        return self.db_pool.simple_select_one_onecol_txn(
             txn,
             "user_threepids",
             {"medium": medium, "address": address},
-            ["user_id"],
+            "user_id",
             True,
         )
-        if ret:
-            return ret["user_id"]
-        return None
 
     async def user_add_threepid(
         self,
@@ -1435,16 +1428,15 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
         if res is None:
             return False
 
+        uses_allowed, pending, completed, expiry_time = res
+
         # Check if the token has expired
         now = self._clock.time_msec()
-        if res["expiry_time"] and res["expiry_time"] < now:
+        if expiry_time and expiry_time < now:
             return False
 
         # Check if the token has been used up
-        if (
-            res["uses_allowed"]
-            and res["pending"] + res["completed"] >= res["uses_allowed"]
-        ):
+        if uses_allowed and pending + completed >= uses_allowed:
             return False
 
         # Otherwise, the token is valid
@@ -1490,8 +1482,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
             # Override type because the return type is only optional if
             # allow_none is True, and we don't want mypy throwing errors
             # about None not being indexable.
-            res = cast(
-                Dict[str, Any],
+            pending, completed = cast(
+                Tuple[int, int],
                 self.db_pool.simple_select_one_txn(
                     txn,
                     "registration_tokens",
@@ -1506,8 +1498,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
                 "registration_tokens",
                 keyvalues={"token": token},
                 updatevalues={
-                    "completed": res["completed"] + 1,
-                    "pending": res["pending"] - 1,
+                    "completed": completed + 1,
+                    "pending": pending - 1,
                 },
             )
 
@@ -1517,7 +1509,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
 
     async def get_registration_tokens(
         self, valid: Optional[bool] = None
-    ) -> List[Dict[str, Any]]:
+    ) -> List[Tuple[str, Optional[int], int, int, Optional[int]]]:
         """List all registration tokens. Used by the admin API.
 
         Args:
@@ -1526,34 +1518,48 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
               Default is None: return all tokens regardless of validity.
 
         Returns:
-            A list of dicts, each containing details of a token.
+            A list of tuples containing:
+                * The token
+                * The number of users allowed (or None)
+                * Whether it is pending
+                * Whether it has been completed
+                * An expiry time (or None if no expiry)
         """
 
         def select_registration_tokens_txn(
             txn: LoggingTransaction, now: int, valid: Optional[bool]
-        ) -> List[Dict[str, Any]]:
+        ) -> List[Tuple[str, Optional[int], int, int, Optional[int]]]:
             if valid is None:
                 # Return all tokens regardless of validity
-                txn.execute("SELECT * FROM registration_tokens")
+                txn.execute(
+                    """
+                    SELECT token, uses_allowed, pending, completed, expiry_time
+                    FROM registration_tokens
+                    """
+                )
 
             elif valid:
                 # Select valid tokens only
-                sql = (
-                    "SELECT * FROM registration_tokens WHERE "
-                    "(uses_allowed > pending + completed OR uses_allowed IS NULL) "
-                    "AND (expiry_time > ? OR expiry_time IS NULL)"
-                )
+                sql = """
+                SELECT token, uses_allowed, pending, completed, expiry_time
+                FROM registration_tokens
+                WHERE (uses_allowed > pending + completed OR uses_allowed IS NULL)
+                    AND (expiry_time > ? OR expiry_time IS NULL)
+                """
                 txn.execute(sql, [now])
 
             else:
                 # Select invalid tokens only
-                sql = (
-                    "SELECT * FROM registration_tokens WHERE "
-                    "uses_allowed <= pending + completed OR expiry_time <= ?"
-                )
+                sql = """
+                SELECT token, uses_allowed, pending, completed, expiry_time
+                FROM registration_tokens
+                WHERE uses_allowed <= pending + completed OR expiry_time <= ?
+                """
                 txn.execute(sql, [now])
 
-            return self.db_pool.cursor_to_dict(txn)
+            return cast(
+                List[Tuple[str, Optional[int], int, int, Optional[int]]], txn.fetchall()
+            )
 
         return await self.db_pool.runInteraction(
             "select_registration_tokens",
@@ -1571,13 +1577,22 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
         Returns:
             A dict, or None if token doesn't exist.
         """
-        return await self.db_pool.simple_select_one(
+        row = await self.db_pool.simple_select_one(
             "registration_tokens",
             keyvalues={"token": token},
             retcols=["token", "uses_allowed", "pending", "completed", "expiry_time"],
             allow_none=True,
             desc="get_one_registration_token",
         )
+        if row is None:
+            return None
+        return {
+            "token": row[0],
+            "uses_allowed": row[1],
+            "pending": row[2],
+            "completed": row[3],
+            "expiry_time": row[4],
+        }
 
     async def generate_registration_token(
         self, length: int, chars: str
@@ -1700,7 +1715,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
                 return None
 
             # Get all info about the token so it can be sent in the response
-            return self.db_pool.simple_select_one_txn(
+            result = self.db_pool.simple_select_one_txn(
                 txn,
                 "registration_tokens",
                 keyvalues={"token": token},
@@ -1714,6 +1729,17 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
                 allow_none=True,
             )
 
+            if result is None:
+                return result
+
+            return {
+                "token": result[0],
+                "uses_allowed": result[1],
+                "pending": result[2],
+                "completed": result[3],
+                "expiry_time": result[4],
+            }
+
         return await self.db_pool.runInteraction(
             "update_registration_token", _update_registration_token_txn
         )
@@ -1925,11 +1951,13 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
             keyvalues={"token": token},
             updatevalues={"used_ts": ts},
         )
-        user_id = values["user_id"]
-        expiry_ts = values["expiry_ts"]
-        used_ts = values["used_ts"]
-        auth_provider_id = values["auth_provider_id"]
-        auth_provider_session_id = values["auth_provider_session_id"]
+        (
+            user_id,
+            expiry_ts,
+            used_ts,
+            auth_provider_id,
+            auth_provider_session_id,
+        ) = values
 
         # Token was already used
         if used_ts is not None:
@@ -2654,10 +2682,11 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
             )
             tokens_and_devices = [(r[0], r[1], r[2]) for r in txn]
 
-            for token, _, _ in tokens_and_devices:
-                self._invalidate_cache_and_stream(
-                    txn, self.get_user_by_access_token, (token,)
-                )
+            self._invalidate_cache_and_stream_bulk(
+                txn,
+                self.get_user_by_access_token,
+                [(token,) for token, _, _ in tokens_and_devices],
+            )
 
             txn.execute("DELETE FROM access_tokens WHERE %s" % where_clause, values)
 
@@ -2742,12 +2771,11 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
                     # reason, the next check is on the client secret, which is NOT NULL,
                     # so we don't have to worry about the client secret matching by
                     # accident.
-                    row = {"client_secret": None, "validated_at": None}
+                    row = None, None
                 else:
                     raise ThreepidValidationError("Unknown session_id")
 
-            retrieved_client_secret = row["client_secret"]
-            validated_at = row["validated_at"]
+            retrieved_client_secret, validated_at = row
 
             row = self.db_pool.simple_select_one_txn(
                 txn,
@@ -2761,8 +2789,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
                 raise ThreepidValidationError(
                     "Validation token not found or has expired"
                 )
-            expires = row["expires"]
-            next_link = row["next_link"]
+            expires, next_link = row
 
             if retrieved_client_secret != client_secret:
                 raise ThreepidValidationError(
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 3e8fcf1975..ef26d5d9d3 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -78,6 +78,31 @@ class RatelimitOverride:
     burst_count: int
 
 
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class LargestRoomStats:
+    room_id: str
+    name: Optional[str]
+    canonical_alias: Optional[str]
+    joined_members: int
+    join_rules: Optional[str]
+    guest_access: Optional[str]
+    history_visibility: Optional[str]
+    state_events: int
+    avatar: Optional[str]
+    topic: Optional[str]
+    room_type: Optional[str]
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class RoomStats(LargestRoomStats):
+    joined_local_members: int
+    version: Optional[str]
+    creator: Optional[str]
+    encryption: Optional[str]
+    federatable: bool
+    public: bool
+
+
 class RoomSortOrder(Enum):
     """
     Enum to define the sorting method used when returning rooms with get_rooms_paginate
@@ -188,23 +213,33 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
             logger.error("store_room with room_id=%s failed: %s", room_id, e)
             raise StoreError(500, "Problem creating room.")
 
-    async def get_room(self, room_id: str) -> Optional[Dict[str, Any]]:
+    async def get_room(self, room_id: str) -> Optional[Tuple[bool, bool]]:
         """Retrieve a room.
 
         Args:
             room_id: The ID of the room to retrieve.
         Returns:
-            A dict containing the room information, or None if the room is unknown.
+            A tuple containing the room information:
+                * True if the room is public
+                * True if the room has an auth chain index
+
+            or None if the room is unknown.
         """
-        return await self.db_pool.simple_select_one(
-            table="rooms",
-            keyvalues={"room_id": room_id},
-            retcols=("room_id", "is_public", "creator", "has_auth_chain_index"),
-            desc="get_room",
-            allow_none=True,
+        row = cast(
+            Optional[Tuple[Optional[Union[int, bool]], Optional[Union[int, bool]]]],
+            await self.db_pool.simple_select_one(
+                table="rooms",
+                keyvalues={"room_id": room_id},
+                retcols=("is_public", "has_auth_chain_index"),
+                desc="get_room",
+                allow_none=True,
+            ),
         )
+        if row is None:
+            return row
+        return bool(row[0]), bool(row[1])
 
-    async def get_room_with_stats(self, room_id: str) -> Optional[Dict[str, Any]]:
+    async def get_room_with_stats(self, room_id: str) -> Optional[RoomStats]:
         """Retrieve room with statistics.
 
         Args:
@@ -215,7 +250,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
 
         def get_room_with_stats_txn(
             txn: LoggingTransaction, room_id: str
-        ) -> Optional[Dict[str, Any]]:
+        ) -> Optional[RoomStats]:
             sql = """
                 SELECT room_id, state.name, state.canonical_alias, curr.joined_members,
                   curr.local_users_in_room AS joined_local_members, rooms.room_version AS version,
@@ -229,15 +264,28 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
                 WHERE room_id = ?
                 """
             txn.execute(sql, [room_id])
-            # Catch error if sql returns empty result to return "None" instead of an error
-            try:
-                res = self.db_pool.cursor_to_dict(txn)[0]
-            except IndexError:
+            row = txn.fetchone()
+            if not row:
                 return None
-
-            res["federatable"] = bool(res["federatable"])
-            res["public"] = bool(res["public"])
-            return res
+            return RoomStats(
+                room_id=row[0],
+                name=row[1],
+                canonical_alias=row[2],
+                joined_members=row[3],
+                joined_local_members=row[4],
+                version=row[5],
+                creator=row[6],
+                encryption=row[7],
+                federatable=bool(row[8]),
+                public=bool(row[9]),
+                join_rules=row[10],
+                guest_access=row[11],
+                history_visibility=row[12],
+                state_events=row[13],
+                avatar=row[14],
+                topic=row[15],
+                room_type=row[16],
+            )
 
         return await self.db_pool.runInteraction(
             "get_room_with_stats", get_room_with_stats_txn, room_id
@@ -368,7 +416,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
         bounds: Optional[Tuple[int, str]],
         forwards: bool,
         ignore_non_federatable: bool = False,
-    ) -> List[Dict[str, Any]]:
+    ) -> List[LargestRoomStats]:
         """Gets the largest public rooms (where largest is in terms of joined
         members, as tracked in the statistics table).
 
@@ -505,20 +553,34 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
 
         def _get_largest_public_rooms_txn(
             txn: LoggingTransaction,
-        ) -> List[Dict[str, Any]]:
+        ) -> List[LargestRoomStats]:
             txn.execute(sql, query_args)
 
-            results = self.db_pool.cursor_to_dict(txn)
+            results = [
+                LargestRoomStats(
+                    room_id=r[0],
+                    name=r[1],
+                    canonical_alias=r[3],
+                    joined_members=r[4],
+                    join_rules=r[8],
+                    guest_access=r[7],
+                    history_visibility=r[6],
+                    state_events=0,
+                    avatar=r[5],
+                    topic=r[2],
+                    room_type=r[9],
+                )
+                for r in txn
+            ]
 
             if not forwards:
                 results.reverse()
 
             return results
 
-        ret_val = await self.db_pool.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_largest_public_rooms", _get_largest_public_rooms_txn
         )
-        return ret_val
 
     @cached(max_entries=10000)
     async def is_room_blocked(self, room_id: str) -> Optional[bool]:
@@ -742,10 +804,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
         )
 
         if row:
-            return RatelimitOverride(
-                messages_per_second=row["messages_per_second"],
-                burst_count=row["burst_count"],
-            )
+            return RatelimitOverride(messages_per_second=row[0], burst_count=row[1])
         else:
             return None
 
@@ -1319,13 +1378,15 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
         join.
         """
 
-        result = await self.db_pool.simple_select_one(
-            table="partial_state_rooms",
-            keyvalues={"room_id": room_id},
-            retcols=("join_event_id", "device_lists_stream_id"),
-            desc="get_join_event_id_for_partial_state",
+        return cast(
+            Tuple[str, int],
+            await self.db_pool.simple_select_one(
+                table="partial_state_rooms",
+                keyvalues={"room_id": room_id},
+                retcols=("join_event_id", "device_lists_stream_id"),
+                desc="get_join_event_id_for_partial_state",
+            ),
         )
-        return result["join_event_id"], result["device_lists_stream_id"]
 
     def get_un_partial_stated_rooms_token(self, instance_name: str) -> int:
         return self._un_partial_stated_rooms_stream_id_gen.get_current_token_for_writer(
@@ -2216,7 +2277,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
             txn,
             table="partial_state_rooms_servers",
             keys=("room_id", "server_name"),
-            values=((room_id, s) for s in servers),
+            values=[(room_id, s) for s in servers],
         )
         self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
         self._invalidate_cache_and_stream(
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index 1ed7f2d0ef..60d4a9ef30 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -559,17 +559,20 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
                 "non-local user %s" % (user_id,),
             )
 
-        results_dict = await self.db_pool.simple_select_one(
-            "local_current_membership",
-            {"room_id": room_id, "user_id": user_id},
-            ("membership", "event_id"),
-            allow_none=True,
-            desc="get_local_current_membership_for_user_in_room",
+        results = cast(
+            Optional[Tuple[str, str]],
+            await self.db_pool.simple_select_one(
+                "local_current_membership",
+                {"room_id": room_id, "user_id": user_id},
+                ("membership", "event_id"),
+                allow_none=True,
+                desc="get_local_current_membership_for_user_in_room",
+            ),
         )
-        if not results_dict:
+        if not results:
             return None, None
 
-        return results_dict.get("membership"), results_dict.get("event_id")
+        return results
 
     @cached(max_entries=500000, iterable=True)
     async def get_rooms_for_user_with_stream_ordering(
diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index dbde9130c6..e25d86818b 100644
--- a/synapse/storage/databases/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -106,7 +106,7 @@ class SearchWorkerStore(SQLBaseStore):
                 txn,
                 table="event_search",
                 keys=("event_id", "room_id", "key", "value"),
-                values=(
+                values=[
                     (
                         entry.event_id,
                         entry.room_id,
@@ -114,7 +114,7 @@ class SearchWorkerStore(SQLBaseStore):
                         _clean_value_for_search(entry.value),
                     )
                     for entry in entries
-                ),
+                ],
             )
 
         else:
@@ -275,7 +275,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
 
             # we have to set autocommit, because postgres refuses to
             # CREATE INDEX CONCURRENTLY without it.
-            conn.set_session(autocommit=True)
+            conn.engine.attempt_to_set_autocommit(conn.conn, True)
 
             try:
                 c = conn.cursor()
@@ -301,7 +301,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
                 # we should now be able to delete the GIST index.
                 c.execute("DROP INDEX IF EXISTS event_search_fts_idx_gist")
             finally:
-                conn.set_session(autocommit=False)
+                conn.engine.attempt_to_set_autocommit(conn.conn, False)
 
         if isinstance(self.database_engine, PostgresEngine):
             await self.db_pool.runWithConnection(create_index)
@@ -323,7 +323,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
 
             def create_index(conn: LoggingDatabaseConnection) -> None:
                 conn.rollback()
-                conn.set_session(autocommit=True)
+                conn.engine.attempt_to_set_autocommit(conn.conn, True)
                 c = conn.cursor()
 
                 # We create with NULLS FIRST so that when we search *backwards*
@@ -340,7 +340,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
                     ON event_search(origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)
                     """
                 )
-                conn.set_session(autocommit=False)
+                conn.engine.attempt_to_set_autocommit(conn.conn, False)
 
             await self.db_pool.runWithConnection(create_index)
 
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 2225f8272d..563c275a2c 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -1014,9 +1014,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             desc="get_position_for_event",
         )
 
-        return PersistedEventPosition(
-            row["instance_name"] or "master", row["stream_ordering"]
-        )
+        return PersistedEventPosition(row[1] or "master", row[0])
 
     async def get_topological_token_for_event(self, event_id: str) -> RoomStreamToken:
         """The stream token for an event
@@ -1033,9 +1031,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             retcols=("stream_ordering", "topological_ordering"),
             desc="get_topological_token_for_event",
         )
-        return RoomStreamToken(
-            topological=row["topological_ordering"], stream=row["stream_ordering"]
-        )
+        return RoomStreamToken(topological=row[1], stream=row[0])
 
     async def get_current_topological_token(self, room_id: str, stream_key: int) -> int:
         """Gets the topological token in a room after or at the given stream
@@ -1180,26 +1176,24 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             dict
         """
 
-        results = self.db_pool.simple_select_one_txn(
-            txn,
-            "events",
-            keyvalues={"event_id": event_id, "room_id": room_id},
-            retcols=["stream_ordering", "topological_ordering"],
+        stream_ordering, topological_ordering = cast(
+            Tuple[int, int],
+            self.db_pool.simple_select_one_txn(
+                txn,
+                "events",
+                keyvalues={"event_id": event_id, "room_id": room_id},
+                retcols=["stream_ordering", "topological_ordering"],
+            ),
         )
 
-        # This cannot happen as `allow_none=False`.
-        assert results is not None
-
         # Paginating backwards includes the event at the token, but paginating
         # forward doesn't.
         before_token = RoomStreamToken(
-            topological=results["topological_ordering"] - 1,
-            stream=results["stream_ordering"],
+            topological=topological_ordering - 1, stream=stream_ordering
         )
 
         after_token = RoomStreamToken(
-            topological=results["topological_ordering"],
-            stream=results["stream_ordering"],
+            topological=topological_ordering, stream=stream_ordering
         )
 
         rows, start_token = self._paginate_room_events_txn(
diff --git a/synapse/storage/databases/main/task_scheduler.py b/synapse/storage/databases/main/task_scheduler.py
index 5555b53575..64543b4d61 100644
--- a/synapse/storage/databases/main/task_scheduler.py
+++ b/synapse/storage/databases/main/task_scheduler.py
@@ -183,39 +183,27 @@ class TaskSchedulerWorkerStore(SQLBaseStore):
 
         Returns: the task if available, `None` otherwise
         """
-        row = await self.db_pool.simple_select_one(
-            table="scheduled_tasks",
-            keyvalues={"id": id},
-            retcols=(
-                "id",
-                "action",
-                "status",
-                "timestamp",
-                "resource_id",
-                "params",
-                "result",
-                "error",
+        row = cast(
+            Optional[ScheduledTaskRow],
+            await self.db_pool.simple_select_one(
+                table="scheduled_tasks",
+                keyvalues={"id": id},
+                retcols=(
+                    "id",
+                    "action",
+                    "status",
+                    "timestamp",
+                    "resource_id",
+                    "params",
+                    "result",
+                    "error",
+                ),
+                allow_none=True,
+                desc="get_scheduled_task",
             ),
-            allow_none=True,
-            desc="get_scheduled_task",
         )
 
-        return (
-            TaskSchedulerWorkerStore._convert_row_to_task(
-                (
-                    row["id"],
-                    row["action"],
-                    row["status"],
-                    row["timestamp"],
-                    row["resource_id"],
-                    row["params"],
-                    row["result"],
-                    row["error"],
-                )
-            )
-            if row
-            else None
-        )
+        return TaskSchedulerWorkerStore._convert_row_to_task(row) if row else None
 
     async def delete_scheduled_task(self, id: str) -> None:
         """Delete a specific task from its id.
diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py
index fecddb4144..2d341affaa 100644
--- a/synapse/storage/databases/main/transactions.py
+++ b/synapse/storage/databases/main/transactions.py
@@ -118,19 +118,13 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
             txn,
             table="received_transactions",
             keyvalues={"transaction_id": transaction_id, "origin": origin},
-            retcols=(
-                "transaction_id",
-                "origin",
-                "ts",
-                "response_code",
-                "response_json",
-                "has_been_referenced",
-            ),
+            retcols=("response_code", "response_json"),
             allow_none=True,
         )
 
-        if result and result["response_code"]:
-            return result["response_code"], db_to_json(result["response_json"])
+        # If the result exists and the response code is non-0.
+        if result and result[0]:
+            return result[0], db_to_json(result[1])
 
         else:
             return None
@@ -200,8 +194,10 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
 
         # check we have a row and retry_last_ts is not null or zero
         # (retry_last_ts can't be negative)
-        if result and result["retry_last_ts"]:
-            return DestinationRetryTimings(**result)
+        if result and result[1]:
+            return DestinationRetryTimings(
+                failure_ts=result[0], retry_last_ts=result[1], retry_interval=result[2]
+            )
         else:
             return None
 
diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py
index 8ab7c42c4a..5b164fed8e 100644
--- a/synapse/storage/databases/main/ui_auth.py
+++ b/synapse/storage/databases/main/ui_auth.py
@@ -122,9 +122,13 @@ class UIAuthWorkerStore(SQLBaseStore):
             desc="get_ui_auth_session",
         )
 
-        result["clientdict"] = db_to_json(result["clientdict"])
-
-        return UIAuthSessionData(session_id, **result)
+        return UIAuthSessionData(
+            session_id,
+            clientdict=db_to_json(result[0]),
+            uri=result[1],
+            method=result[2],
+            description=result[3],
+        )
 
     async def mark_ui_auth_stage_complete(
         self,
@@ -231,18 +235,15 @@ class UIAuthWorkerStore(SQLBaseStore):
         self, txn: LoggingTransaction, session_id: str, key: str, value: Any
     ) -> None:
         # Get the current value.
-        result = cast(
-            Dict[str, Any],
-            self.db_pool.simple_select_one_txn(
-                txn,
-                table="ui_auth_sessions",
-                keyvalues={"session_id": session_id},
-                retcols=("serverdict",),
-            ),
+        result = self.db_pool.simple_select_one_onecol_txn(
+            txn,
+            table="ui_auth_sessions",
+            keyvalues={"session_id": session_id},
+            retcol="serverdict",
         )
 
         # Update it and add it back to the database.
-        serverdict = db_to_json(result["serverdict"])
+        serverdict = db_to_json(result)
         serverdict[key] = value
 
         self.db_pool.simple_update_one_txn(
@@ -265,14 +266,14 @@ class UIAuthWorkerStore(SQLBaseStore):
         Raises:
             StoreError if the session cannot be found.
         """
-        result = await self.db_pool.simple_select_one(
+        result = await self.db_pool.simple_select_one_onecol(
             table="ui_auth_sessions",
             keyvalues={"session_id": session_id},
-            retcols=("serverdict",),
+            retcol="serverdict",
             desc="get_ui_auth_session_data",
         )
 
-        serverdict = db_to_json(result["serverdict"])
+        serverdict = db_to_json(result)
 
         return serverdict.get(key, default)
 
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index a9f5d68b63..1a38f3d785 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -20,7 +20,6 @@ from typing import (
     Collection,
     Iterable,
     List,
-    Mapping,
     Optional,
     Sequence,
     Set,
@@ -833,13 +832,25 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
             "delete_all_from_user_dir", _delete_all_from_user_dir_txn
         )
 
-    async def _get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]:
-        return await self.db_pool.simple_select_one(
-            table="user_directory",
-            keyvalues={"user_id": user_id},
-            retcols=("display_name", "avatar_url"),
-            allow_none=True,
-            desc="get_user_in_directory",
+    async def _get_user_in_directory(
+        self, user_id: str
+    ) -> Optional[Tuple[Optional[str], Optional[str]]]:
+        """
+        Fetch the user information in the user directory.
+
+        Returns:
+            None if the user is unknown, otherwise a tuple of display name and
+            avatar URL (both of which may be None).
+        """
+        return cast(
+            Optional[Tuple[Optional[str], Optional[str]]],
+            await self.db_pool.simple_select_one(
+                table="user_directory",
+                keyvalues={"user_id": user_id},
+                retcols=("display_name", "avatar_url"),
+                allow_none=True,
+                desc="get_user_in_directory",
+            ),
         )
 
     async def update_user_directory_stream_pos(self, stream_id: Optional[int]) -> None:
diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py
index 0f9c550b27..2c3151526d 100644
--- a/synapse/storage/databases/state/bg_updates.py
+++ b/synapse/storage/databases/state/bg_updates.py
@@ -492,7 +492,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
             conn.rollback()
             if isinstance(self.database_engine, PostgresEngine):
                 # postgres insists on autocommit for the index
-                conn.set_session(autocommit=True)
+                conn.engine.attempt_to_set_autocommit(conn.conn, True)
                 try:
                     txn = conn.cursor()
                     txn.execute(
@@ -501,7 +501,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
                     )
                     txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
                 finally:
-                    conn.set_session(autocommit=False)
+                    conn.engine.attempt_to_set_autocommit(conn.conn, False)
             else:
                 txn = conn.cursor()
                 txn.execute(
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 6309363217..ec4c4041b7 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -38,7 +38,8 @@ class PostgresEngine(
         super().__init__(psycopg2, database_config)
         psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
 
-        # Disables passing `bytes` to txn.execute, c.f. #6186. If you do
+        # Disables passing `bytes` to txn.execute, c.f.
+        # https://github.com/matrix-org/synapse/issues/6186. If you do
         # actually want to use bytes than wrap it in `bytearray`.
         def _disable_bytes_adapter(_: bytes) -> NoReturn:
             raise Exception("Passing bytes to DB is disabled.")
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index 158b528dce..03e5a0f55d 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -109,7 +109,8 @@ Changes in SCHEMA_VERSION = 78
 
 Changes in SCHEMA_VERSION = 79
     - Add tables to handle in DB read-write locks.
-    - Add some mitigations for a painful race between foreground and background updates, cf #15677.
+    - Add some mitigations for a painful race between foreground and background updates, cf
+      https://github.com/matrix-org/synapse/issues/15677.
 
 Changes in SCHEMA_VERSION = 80
     - The event_txn_id_device_id is always written to for new events.
diff --git a/synapse/storage/schema/main/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/main/delta/54/delete_forward_extremities.sql
index b062ec840c..f713e42aa0 100644
--- a/synapse/storage/schema/main/delta/54/delete_forward_extremities.sql
+++ b/synapse/storage/schema/main/delta/54/delete_forward_extremities.sql
@@ -14,7 +14,7 @@
  */
 
 -- Start a background job to cleanup extremities that were incorrectly added
--- by bug #5269.
+-- by bug https://github.com/matrix-org/synapse/issues/5269.
 INSERT INTO background_updates (update_name, progress_json) VALUES
   ('delete_soft_failed_extremities', '{}');
 
diff --git a/synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql b/synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql
index aeb17813d3..246c3359f7 100644
--- a/synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql
+++ b/synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql
@@ -13,6 +13,7 @@
  * limitations under the License.
  */
 
--- Now that #6232 is a thing, we can remove old rooms from the directory.
+-- Now that https://github.com/matrix-org/synapse/pull/6232 is a thing, we can
+-- remove old rooms from the directory.
 INSERT INTO background_updates (update_name, progress_json) VALUES
   ('remove_tombstoned_rooms_from_directory', '{}');
diff --git a/synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql b/synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql
index aed79635b2..31a61defa7 100644
--- a/synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql
+++ b/synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql
@@ -13,7 +13,8 @@
  * limitations under the License.
  */
 
--- Clean up left over rows from bug #11833, which was fixed in #12770.
+-- Clean up left over rows from bug https://github.com/matrix-org/synapse/issues/11833,
+-- which was fixed in https://github.com/matrix-org/synapse/pull/12770.
 DELETE FROM federation_inbound_events_staging WHERE room_id not in (
     SELECT room_id FROM rooms
 );
diff --git a/synapse/storage/schema/main/delta/83/05_cross_signing_key_update_grant.sql b/synapse/storage/schema/main/delta/83/05_cross_signing_key_update_grant.sql
new file mode 100644
index 0000000000..b74bdd71fa
--- /dev/null
+++ b/synapse/storage/schema/main/delta/83/05_cross_signing_key_update_grant.sql
@@ -0,0 +1,15 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ALTER TABLE e2e_cross_signing_keys ADD COLUMN updatable_without_uia_before_ms bigint DEFAULT NULL;
\ No newline at end of file
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index 9c3eafb562..bd3c81827f 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -650,8 +650,8 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
 
         next_id = self._load_next_id_txn(txn)
 
-        txn.call_after(self._mark_id_as_finished, next_id)
-        txn.call_on_exception(self._mark_id_as_finished, next_id)
+        txn.call_after(self._mark_ids_as_finished, [next_id])
+        txn.call_on_exception(self._mark_ids_as_finished, [next_id])
         txn.call_after(self._notifier.notify_replication)
 
         # Update the `stream_positions` table with newly updated stream
@@ -671,14 +671,50 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
 
         return self._return_factor * next_id
 
-    def _mark_id_as_finished(self, next_id: int) -> None:
-        """The ID has finished being processed so we should advance the
+    def get_next_mult_txn(self, txn: LoggingTransaction, n: int) -> List[int]:
+        """
+        Usage:
+
+            stream_id = stream_id_gen.get_next_txn(txn)
+            # ... persist event ...
+        """
+
+        # If we have a list of instances that are allowed to write to this
+        # stream, make sure we're in it.
+        if self._writers and self._instance_name not in self._writers:
+            raise Exception("Tried to allocate stream ID on non-writer")
+
+        next_ids = self._load_next_mult_id_txn(txn, n)
+
+        txn.call_after(self._mark_ids_as_finished, next_ids)
+        txn.call_on_exception(self._mark_ids_as_finished, next_ids)
+        txn.call_after(self._notifier.notify_replication)
+
+        # Update the `stream_positions` table with newly updated stream
+        # ID (unless self._writers is not set in which case we don't
+        # bother, as nothing will read it).
+        #
+        # We only do this on the success path so that the persisted current
+        # position points to a persisted row with the correct instance name.
+        if self._writers:
+            txn.call_after(
+                run_as_background_process,
+                "MultiWriterIdGenerator._update_table",
+                self._db.runInteraction,
+                "MultiWriterIdGenerator._update_table",
+                self._update_stream_positions_table_txn,
+            )
+
+        return [self._return_factor * next_id for next_id in next_ids]
+
+    def _mark_ids_as_finished(self, next_ids: List[int]) -> None:
+        """These IDs have finished being processed so we should advance the
         current position if possible.
         """
 
         with self._lock:
-            self._unfinished_ids.discard(next_id)
-            self._finished_ids.add(next_id)
+            self._unfinished_ids.difference_update(next_ids)
+            self._finished_ids.update(next_ids)
 
             new_cur: Optional[int] = None
 
@@ -727,7 +763,10 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
                     curr, new_cur, self._max_position_of_local_instance
                 )
 
-            self._add_persisted_position(next_id)
+            # TODO Can we call this for just the last position or somehow batch
+            # _add_persisted_position.
+            for next_id in next_ids:
+                self._add_persisted_position(next_id)
 
     def get_current_token(self) -> int:
         return self.get_persisted_upto_position()
@@ -933,8 +972,7 @@ class _MultiWriterCtxManager:
         exc: Optional[BaseException],
         tb: Optional[TracebackType],
     ) -> bool:
-        for i in self.stream_ids:
-            self.id_gen._mark_id_as_finished(i)
+        self.id_gen._mark_ids_as_finished(self.stream_ids)
 
         self.notifier.notify_replication()
 
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index 9f3b8741c1..8d9df352b2 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -93,7 +93,7 @@ class Clock:
 
     _reactor: IReactorTime = attr.ib()
 
-    @defer.inlineCallbacks  # type: ignore[arg-type]  # Issue in Twisted's type annotations
+    @defer.inlineCallbacks
     def sleep(self, seconds: float) -> "Generator[Deferred[float], Any, Any]":
         d: defer.Deferred[float] = defer.Deferred()
         with context.PreserveLoggingContext():
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 0cbeb0c365..8a55e4e41d 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -345,6 +345,7 @@ async def yieldable_gather_results_delaying_cancellation(
 T1 = TypeVar("T1")
 T2 = TypeVar("T2")
 T3 = TypeVar("T3")
+T4 = TypeVar("T4")
 
 
 @overload
@@ -380,6 +381,19 @@ def gather_results(
     ...
 
 
+@overload
+def gather_results(
+    deferredList: Tuple[
+        "defer.Deferred[T1]",
+        "defer.Deferred[T2]",
+        "defer.Deferred[T3]",
+        "defer.Deferred[T4]",
+    ],
+    consumeErrors: bool = ...,
+) -> "defer.Deferred[Tuple[T1, T2, T3, T4]]":
+    ...
+
+
 def gather_results(  # type: ignore[misc]
     deferredList: Tuple["defer.Deferred[T1]", ...],
     consumeErrors: bool = False,
diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py
index f7cead9e12..6f008734a0 100644
--- a/synapse/util/check_dependencies.py
+++ b/synapse/util/check_dependencies.py
@@ -189,7 +189,8 @@ def check_requirements(extra: Optional[str] = None) -> None:
                 errors.append(_not_installed(requirement, extra))
         else:
             if dist.version is None:
-                # This shouldn't happen---it suggests a borked virtualenv. (See #12223)
+                # This shouldn't happen---it suggests a borked virtualenv. (See
+                # https://github.com/matrix-org/synapse/issues/12223)
                 # Try to give a vaguely helpful error message anyway.
                 # Type-ignore: the annotations don't reflect reality: see
                 #     https://github.com/python/typeshed/issues/7513
diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py
index a0efb96d3b..f4c0194af0 100644
--- a/synapse/util/iterutils.py
+++ b/synapse/util/iterutils.py
@@ -135,3 +135,54 @@ def sorted_topologically(
                 degree_map[edge] -= 1
                 if degree_map[edge] == 0:
                     heapq.heappush(zero_degree, edge)
+
+
+def sorted_topologically_batched(
+    nodes: Iterable[T],
+    graph: Mapping[T, Collection[T]],
+) -> Generator[Collection[T], None, None]:
+    r"""Walk the graph topologically, returning batches of nodes where all nodes
+    that references it have been previously returned.
+
+    For example, given the following graph:
+
+         A
+        / \
+       B   C
+        \ /
+         D
+
+    This function will return: `[[A], [B, C], [D]]`.
+
+    This function is useful for e.g. batch persisting events in an auth chain,
+    where we can only persist an event if all its auth events have already been
+    persisted.
+    """
+
+    degree_map = {node: 0 for node in nodes}
+    reverse_graph: Dict[T, Set[T]] = {}
+
+    for node, edges in graph.items():
+        if node not in degree_map:
+            continue
+
+        for edge in set(edges):
+            if edge in degree_map:
+                degree_map[node] += 1
+
+            reverse_graph.setdefault(edge, set()).add(node)
+        reverse_graph.setdefault(node, set())
+
+    zero_degree = [node for node, degree in degree_map.items() if degree == 0]
+
+    while zero_degree:
+        new_zero_degree = []
+        for node in zero_degree:
+            for edge in reverse_graph.get(node, []):
+                if edge in degree_map:
+                    degree_map[edge] -= 1
+                    if degree_map[edge] == 0:
+                        new_zero_degree.append(edge)
+
+        yield zero_degree
+        zero_degree = new_zero_degree
diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py
index caf13b3474..8c2df233d3 100644
--- a/synapse/util/task_scheduler.py
+++ b/synapse/util/task_scheduler.py
@@ -71,7 +71,7 @@ class TaskScheduler:
     # Time before a complete or failed task is deleted from the DB
     KEEP_TASKS_FOR_MS = 7 * 24 * 60 * 60 * 1000  # 1 week
     # Maximum number of tasks that can run at the same time
-    MAX_CONCURRENT_RUNNING_TASKS = 10
+    MAX_CONCURRENT_RUNNING_TASKS = 5
     # Time from the last task update after which we will log a warning
     LAST_UPDATE_BEFORE_WARNING_MS = 24 * 60 * 60 * 1000  # 24hrs
 
@@ -193,7 +193,7 @@ class TaskScheduler:
         result: Optional[JsonMapping] = None,
         error: Optional[str] = None,
     ) -> bool:
-        """Update some task associated values. This is exposed publically so it can
+        """Update some task associated values. This is exposed publicly so it can
         be used inside task functions, mainly to update the result and be able to
         resume a task at a specific step after a restart of synapse.
 
diff --git a/sytest-blacklist b/sytest-blacklist
index d5fa36cec7..9ec0cecfd4 100644
--- a/sytest-blacklist
+++ b/sytest-blacklist
@@ -29,5 +29,5 @@ We can't peek into rooms with joined history_visibility
 Local users can peek by room alias
 Peeked rooms only turn up in the sync for the device who peeked them
 
-# Validation needs to be added to Synapse: #10554
+# Validation needs to be added to Synapse: https://github.com/matrix-org/synapse/issues/10554
 Rejects invalid device keys
diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index caf04b54cb..2c970fc827 100644
--- a/tests/federation/test_federation_sender.py
+++ b/tests/federation/test_federation_sender.py
@@ -478,7 +478,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
         # expect two edus, in one or two transactions. We don't know what order the
         # devices will be updated.
         self.assertEqual(len(self.edus), 2)
-        stream_id = None  # FIXME: there is a discontinuity in the stream IDs: see #7142
+        stream_id = None  # FIXME: there is a discontinuity in the stream IDs: see https://github.com/matrix-org/synapse/issues/7142
         for edu in self.edus:
             self.assertEqual(edu["edu_type"], EduTypes.DEVICE_LIST_UPDATE)
             c = edu["content"]
diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
index 90b4da9ad5..07eb63f95e 100644
--- a/tests/handlers/test_e2e_keys.py
+++ b/tests/handlers/test_e2e_keys.py
@@ -1602,3 +1602,50 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
                 }
             },
         )
+
+    def test_check_cross_signing_setup(self) -> None:
+        # First check what happens with no master key.
+        alice = "@alice:test"
+        exists, replaceable_without_uia = self.get_success(
+            self.handler.check_cross_signing_setup(alice)
+        )
+        self.assertIs(exists, False)
+        self.assertIs(replaceable_without_uia, False)
+
+        # Upload a master key but don't specify a replacement timestamp.
+        dummy_key = {"keys": {"a": "b"}}
+        self.get_success(
+            self.store.set_e2e_cross_signing_key("@alice:test", "master", dummy_key)
+        )
+
+        # Should now find the key exists.
+        exists, replaceable_without_uia = self.get_success(
+            self.handler.check_cross_signing_setup(alice)
+        )
+        self.assertIs(exists, True)
+        self.assertIs(replaceable_without_uia, False)
+
+        # Set an expiry timestamp in the future.
+        self.get_success(
+            self.store.allow_master_cross_signing_key_replacement_without_uia(
+                alice,
+                1000,
+            )
+        )
+
+        # Should now be allowed to replace the key without UIA.
+        exists, replaceable_without_uia = self.get_success(
+            self.handler.check_cross_signing_setup(alice)
+        )
+        self.assertIs(exists, True)
+        self.assertIs(replaceable_without_uia, True)
+
+        # Wait 2 seconds, so that the timestamp is in the past.
+        self.reactor.advance(2.0)
+
+        # Should no longer be allowed to replace the key without UIA.
+        exists, replaceable_without_uia = self.get_success(
+            self.handler.check_cross_signing_setup(alice)
+        )
+        self.assertIs(exists, True)
+        self.assertIs(replaceable_without_uia, False)
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index 4fc0742413..a035232905 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -112,7 +112,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
         """
         Check that we store the state group correctly for rejected non-state events.
 
-        Regression test for #6289.
+        Regression test for https://github.com/matrix-org/synapse/issues/6289.
         """
         OTHER_SERVER = "otherserver"
         OTHER_USER = "@otheruser:" + OTHER_SERVER
@@ -165,7 +165,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
         """
         Check that we store the state group correctly for rejected state events.
 
-        Regression test for #6289.
+        Regression test for https://github.com/matrix-org/synapse/issues/6289.
         """
         OTHER_SERVER = "otherserver"
         OTHER_USER = "@otheruser:" + OTHER_SERVER
@@ -222,7 +222,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
         of backwards extremities(the magic number is more than 5),
         no errors are thrown.
 
-        Regression test, see #11027
+        Regression test, see https://github.com/matrix-org/synapse/pull/11027
         """
         # create the room
         user_id = self.register_user("kermit", "test")
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index e9fbf32c7c..032b89d684 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -342,10 +342,10 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
         # Ensure the room is properly not federated.
         room = self.get_success(self.store.get_room_with_stats(room_id["room_id"]))
         assert room is not None
-        self.assertFalse(room["federatable"])
-        self.assertFalse(room["public"])
-        self.assertEqual(room["join_rules"], "public")
-        self.assertIsNone(room["guest_access"])
+        self.assertFalse(room.federatable)
+        self.assertFalse(room.public)
+        self.assertEqual(room.join_rules, "public")
+        self.assertIsNone(room.guest_access)
 
         # The user should be in the room.
         rooms = self.get_success(self.store.get_rooms_for_user(user_id))
@@ -372,7 +372,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
         # Ensure the room is properly a public room.
         room = self.get_success(self.store.get_room_with_stats(room_id["room_id"]))
         assert room is not None
-        self.assertEqual(room["join_rules"], "public")
+        self.assertEqual(room.join_rules, "public")
 
         # Both users should be in the room.
         rooms = self.get_success(self.store.get_rooms_for_user(inviter))
@@ -411,9 +411,9 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
         # Ensure the room is properly a private room.
         room = self.get_success(self.store.get_room_with_stats(room_id["room_id"]))
         assert room is not None
-        self.assertFalse(room["public"])
-        self.assertEqual(room["join_rules"], "invite")
-        self.assertEqual(room["guest_access"], "can_join")
+        self.assertFalse(room.public)
+        self.assertEqual(room.join_rules, "invite")
+        self.assertEqual(room.guest_access, "can_join")
 
         # Both users should be in the room.
         rooms = self.get_success(self.store.get_rooms_for_user(inviter))
@@ -455,9 +455,9 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
         # Ensure the room is properly a private room.
         room = self.get_success(self.store.get_room_with_stats(room_id["room_id"]))
         assert room is not None
-        self.assertFalse(room["public"])
-        self.assertEqual(room["join_rules"], "invite")
-        self.assertEqual(room["guest_access"], "can_join")
+        self.assertFalse(room.public)
+        self.assertEqual(room.join_rules, "invite")
+        self.assertEqual(room.guest_access, "can_join")
 
         # Both users should be in the room.
         rooms = self.get_success(self.store.get_rooms_for_user(inviter))
diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py
index 76c56d5434..15e19b15fb 100644
--- a/tests/handlers/test_stats.py
+++ b/tests/handlers/test_stats.py
@@ -84,7 +84,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
 
         cols = list(stats.ABSOLUTE_STATS_FIELDS[stats_type])
 
-        return self.get_success(
+        row = self.get_success(
             self.store.db_pool.simple_select_one(
                 table + "_current",
                 {id_col: stat_id},
@@ -93,6 +93,8 @@ class StatsRoomTests(unittest.HomeserverTestCase):
             )
         )
 
+        return None if row is None else dict(zip(cols, row))
+
     def _perform_background_initial_update(self) -> None:
         # Do the initial population of the stats via the background update
         self._add_background_updates()
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index b5f15aa7d4..388447eea6 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -366,7 +366,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
         )
         profile = self.get_success(self.store._get_user_in_directory(regular_user_id))
         assert profile is not None
-        self.assertTrue(profile["display_name"] == display_name)
+        self.assertTrue(profile[0] == display_name)
 
     def test_handle_local_profile_change_with_deactivated_user(self) -> None:
         # create user
@@ -385,7 +385,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
         # profile is in directory
         profile = self.get_success(self.store._get_user_in_directory(r_user_id))
         assert profile is not None
-        self.assertTrue(profile["display_name"] == display_name)
+        self.assertEqual(profile[0], display_name)
 
         # deactivate user
         self.get_success(self.store.set_user_deactivated_status(r_user_id, True))
diff --git a/tests/http/__init__.py b/tests/http/__init__.py
index d5306e7ee0..9108a3007b 100644
--- a/tests/http/__init__.py
+++ b/tests/http/__init__.py
@@ -182,7 +182,7 @@ def wrap_server_factory_for_tls(
         )
     else:
         return TLSMemoryBIOFactory(
-            connection_creator, isClient=False, wrappedFactory=factory, clock=clock  # type: ignore[call-arg]
+            connection_creator, isClient=False, wrappedFactory=factory, clock=clock
         )
 
 
diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py
index bf1d287699..b7337d3926 100644
--- a/tests/http/test_matrixfederationclient.py
+++ b/tests/http/test_matrixfederationclient.py
@@ -368,7 +368,8 @@ class FederationClientTests(HomeserverTestCase):
         """
         If a connection is made to a client but the client rejects it due to
         requiring a trailing slash. We need to retry the request with a
-        trailing slash. Workaround for Synapse <= v0.99.3, explained in #3622.
+        trailing slash. Workaround for Synapse <= v0.99.3, explained in
+        https://github.com/matrix-org/synapse/issues/3622.
         """
         d = defer.ensureDeferred(
             self.cl.get_json("testserv:8008", "foo/bar", try_trailing_slash_on_400=True)
diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py
index 15f5d644e4..f262304c3d 100644
--- a/tests/media/test_media_storage.py
+++ b/tests/media/test_media_storage.py
@@ -318,7 +318,9 @@ class MediaRepoTests(unittest.HomeserverTestCase):
         self.assertEqual(
             self.fetches[0][2], "/_matrix/media/r0/download/" + self.media_id
         )
-        self.assertEqual(self.fetches[0][3], {"allow_remote": "false"})
+        self.assertEqual(
+            self.fetches[0][3], {"allow_remote": "false", "timeout_ms": "20000"}
+        )
 
         headers = {
             b"Content-Length": [b"%d" % (len(self.test_image.data))],
@@ -504,7 +506,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
         origin, media_id = self.media_id.split("/")
         info = self.get_success(self.store.get_cached_remote_media(origin, media_id))
         assert info is not None
-        file_id = info["filesystem_id"]
+        file_id = info.filesystem_id
 
         thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir(
             origin, file_id
diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py
index 7c23b77e0a..907ee1488c 100644
--- a/tests/push/test_bulk_push_rule_evaluator.py
+++ b/tests/push/test_bulk_push_rule_evaluator.py
@@ -92,7 +92,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
         - the bad power level value for "room", before JSON serisalistion
         - whether Bob should expect the message to be highlighted
 
-        Reproduces #14060.
+        Reproduces https://github.com/matrix-org/synapse/issues/14060.
 
         A lack of validation: the gift that keeps on giving.
         """
diff --git a/tests/replication/tcp/streams/test_to_device.py b/tests/replication/tcp/streams/test_to_device.py
index ab379e8cf1..85adf84ece 100644
--- a/tests/replication/tcp/streams/test_to_device.py
+++ b/tests/replication/tcp/streams/test_to_device.py
@@ -62,7 +62,7 @@ class ToDeviceStreamTestCase(BaseStreamTestCase):
             )
 
         # add one more message, for user2 this time
-        # this message would be dropped before fixing #15335
+        # this message would be dropped before fixing https://github.com/matrix-org/synapse/issues/15335
         msg["content"] = {"device": {}}
         messages = {user2: {"device": msg}}
 
diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py
index 5a38ac831f..20b3d431ba 100644
--- a/tests/replication/tcp/streams/test_typing.py
+++ b/tests/replication/tcp/streams/test_typing.py
@@ -35,6 +35,10 @@ class TypingStreamTestCase(BaseStreamTestCase):
         typing = self.hs.get_typing_handler()
         assert isinstance(typing, TypingWriterHandler)
 
+        # Create a typing update before we reconnect so that there is a missing
+        # update to fetch.
+        typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True)
+
         self.reconnect()
 
         typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True)
@@ -91,6 +95,10 @@ class TypingStreamTestCase(BaseStreamTestCase):
         typing = self.hs.get_typing_handler()
         assert isinstance(typing, TypingWriterHandler)
 
+        # Create a typing update before we reconnect so that there is a missing
+        # update to fetch.
+        typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True)
+
         self.reconnect()
 
         typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True)
diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index 278808abb5..dac79bd745 100644
--- a/tests/rest/admin/test_media.py
+++ b/tests/rest/admin/test_media.py
@@ -642,7 +642,7 @@ class QuarantineMediaByIDTestCase(_AdminMediaTests):
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
         assert media_info is not None
-        self.assertFalse(media_info["quarantined_by"])
+        self.assertFalse(media_info.quarantined_by)
 
         # quarantining
         channel = self.make_request(
@@ -656,7 +656,7 @@ class QuarantineMediaByIDTestCase(_AdminMediaTests):
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
         assert media_info is not None
-        self.assertTrue(media_info["quarantined_by"])
+        self.assertTrue(media_info.quarantined_by)
 
         # remove from quarantine
         channel = self.make_request(
@@ -670,7 +670,7 @@ class QuarantineMediaByIDTestCase(_AdminMediaTests):
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
         assert media_info is not None
-        self.assertFalse(media_info["quarantined_by"])
+        self.assertFalse(media_info.quarantined_by)
 
     def test_quarantine_protected_media(self) -> None:
         """
@@ -683,7 +683,7 @@ class QuarantineMediaByIDTestCase(_AdminMediaTests):
         # verify protection
         media_info = self.get_success(self.store.get_local_media(self.media_id))
         assert media_info is not None
-        self.assertTrue(media_info["safe_from_quarantine"])
+        self.assertTrue(media_info.safe_from_quarantine)
 
         # quarantining
         channel = self.make_request(
@@ -698,7 +698,7 @@ class QuarantineMediaByIDTestCase(_AdminMediaTests):
         # verify that is not in quarantine
         media_info = self.get_success(self.store.get_local_media(self.media_id))
         assert media_info is not None
-        self.assertFalse(media_info["quarantined_by"])
+        self.assertFalse(media_info.quarantined_by)
 
 
 class ProtectMediaByIDTestCase(_AdminMediaTests):
@@ -756,7 +756,7 @@ class ProtectMediaByIDTestCase(_AdminMediaTests):
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
         assert media_info is not None
-        self.assertFalse(media_info["safe_from_quarantine"])
+        self.assertFalse(media_info.safe_from_quarantine)
 
         # protect
         channel = self.make_request(
@@ -770,7 +770,7 @@ class ProtectMediaByIDTestCase(_AdminMediaTests):
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
         assert media_info is not None
-        self.assertTrue(media_info["safe_from_quarantine"])
+        self.assertTrue(media_info.safe_from_quarantine)
 
         # unprotect
         channel = self.make_request(
@@ -784,7 +784,7 @@ class ProtectMediaByIDTestCase(_AdminMediaTests):
 
         media_info = self.get_success(self.store.get_local_media(self.media_id))
         assert media_info is not None
-        self.assertFalse(media_info["safe_from_quarantine"])
+        self.assertFalse(media_info.safe_from_quarantine)
 
 
 class PurgeMediaCacheTestCase(_AdminMediaTests):
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 37f37a09d8..cf71bbb461 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -1478,7 +1478,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
     def test_deactivate_user_erase_true_avatar_nonnull_but_empty(self) -> None:
         """Check we can erase a user whose avatar is the empty string.
 
-        Reproduces #12257.
+        Reproduces https://github.com/matrix-org/synapse/issues/12257.
         """
         # Patch `self.other_user` to have an empty string as their avatar.
         self.get_success(
@@ -2706,7 +2706,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         # is in user directory
         profile = self.get_success(self.store._get_user_in_directory(self.other_user))
         assert profile is not None
-        self.assertTrue(profile["display_name"] == "User")
+        self.assertEqual(profile[0], "User")
 
         # Deactivate user
         channel = self.make_request(
@@ -4854,3 +4854,59 @@ class UsersByThreePidTestCase(unittest.HomeserverTestCase):
             {"user_id": self.other_user},
             channel.json_body,
         )
+
+
+class AllowCrossSigningReplacementTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+    ]
+
+    @staticmethod
+    def url(user: str) -> str:
+        template = (
+            "/_synapse/admin/v1/users/{}/_allow_cross_signing_replacement_without_uia"
+        )
+        return template.format(urllib.parse.quote(user))
+
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+        self.other_user = self.register_user("user", "pass")
+
+    def test_error_cases(self) -> None:
+        fake_user = "@bums:other"
+        channel = self.make_request(
+            "POST", self.url(fake_user), access_token=self.admin_user_tok
+        )
+        # Fail: user doesn't exist
+        self.assertEqual(404, channel.code, msg=channel.json_body)
+
+        channel = self.make_request(
+            "POST", self.url(self.other_user), access_token=self.admin_user_tok
+        )
+        # Fail: user exists, but has no master cross-signing key
+        self.assertEqual(404, channel.code, msg=channel.json_body)
+
+    def test_success(self) -> None:
+        # Upload a master key.
+        dummy_key = {"keys": {"a": "b"}}
+        self.get_success(
+            self.store.set_e2e_cross_signing_key(self.other_user, "master", dummy_key)
+        )
+
+        channel = self.make_request(
+            "POST", self.url(self.other_user), access_token=self.admin_user_tok
+        )
+        # Success!
+        self.assertEqual(200, channel.code, msg=channel.json_body)
+
+        # Should now find that the key exists.
+        _, timestamp = self.get_success(
+            self.store.get_master_cross_signing_key_updatable_before(self.other_user)
+        )
+        assert timestamp is not None
+        self.assertGreater(timestamp, self.clock.time_msec())
diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py
index cffbda9a7d..bd59bb50cf 100644
--- a/tests/rest/client/test_account.py
+++ b/tests/rest/client/test_account.py
@@ -139,12 +139,12 @@ class PasswordResetTestCase(unittest.HomeserverTestCase):
         #
         # Note that we don't have the UI Auth session ID, so just pull out the single
         # row.
-        ui_auth_data = self.get_success(
-            self.store.db_pool.simple_select_one(
-                "ui_auth_sessions", keyvalues={}, retcols=("clientdict",)
+        result = self.get_success(
+            self.store.db_pool.simple_select_one_onecol(
+                "ui_auth_sessions", keyvalues={}, retcol="clientdict"
             )
         )
-        client_dict = db_to_json(ui_auth_data["clientdict"])
+        client_dict = db_to_json(result)
         self.assertNotIn("new_password", client_dict)
 
     @override_config({"rc_3pid_validation": {"burst_count": 3}})
diff --git a/tests/rest/client/test_events.py b/tests/rest/client/test_events.py
index 141e0f57a3..8bea860beb 100644
--- a/tests/rest/client/test_events.py
+++ b/tests/rest/client/test_events.py
@@ -64,7 +64,7 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase):
         # 403. However, since the v1 spec no longer exists and the v1
         # implementation is now part of the r0 implementation, the newer
         # behaviour is used instead to be consistent with the r0 spec.
-        # see issue #2602
+        # see issue https://github.com/matrix-org/synapse/issues/2602
         channel = self.make_request(
             "GET", "/events?access_token=%s" % ("invalid" + self.token,)
         )
diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py
index 8ee5489057..9f81a695fa 100644
--- a/tests/rest/client/test_keys.py
+++ b/tests/rest/client/test_keys.py
@@ -11,8 +11,9 @@
 #  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #  See the License for the specific language governing permissions and
 #  limitations under the License
-
+import urllib.parse
 from http import HTTPStatus
+from unittest.mock import patch
 
 from signedjson.key import (
     encode_verify_key_base64,
@@ -24,12 +25,19 @@ from signedjson.sign import sign_json
 from synapse.api.errors import Codes
 from synapse.rest import admin
 from synapse.rest.client import keys, login
-from synapse.types import JsonDict
+from synapse.types import JsonDict, Requester, create_requester
 
 from tests import unittest
 from tests.http.server._base import make_request_with_cancellation_test
 from tests.unittest import override_config
 
+try:
+    import authlib  # noqa: F401
+
+    HAS_AUTHLIB = True
+except ImportError:
+    HAS_AUTHLIB = False
+
 
 class KeyQueryTestCase(unittest.HomeserverTestCase):
     servlets = [
@@ -259,3 +267,179 @@ class KeyQueryTestCase(unittest.HomeserverTestCase):
             alice_token,
         )
         self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+
+
+class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        admin.register_servlets,
+        keys.register_servlets,
+    ]
+
+    OIDC_ADMIN_TOKEN = "_oidc_admin_token"
+
+    @unittest.skip_unless(HAS_AUTHLIB, "requires authlib")
+    @override_config(
+        {
+            "enable_registration": False,
+            "experimental_features": {
+                "msc3861": {
+                    "enabled": True,
+                    "issuer": "https://issuer",
+                    "account_management_url": "https://my-account.issuer",
+                    "client_id": "id",
+                    "client_auth_method": "client_secret_post",
+                    "client_secret": "secret",
+                    "admin_token": OIDC_ADMIN_TOKEN,
+                },
+            },
+        }
+    )
+    def test_master_cross_signing_key_replacement_msc3861(self) -> None:
+        # Provision a user like MAS would, cribbing from
+        # https://github.com/matrix-org/matrix-authentication-service/blob/08d46a79a4adb22819ac9d55e15f8375dfe2c5c7/crates/matrix-synapse/src/lib.rs#L224-L229
+        alice = "@alice:test"
+        channel = self.make_request(
+            "PUT",
+            f"/_synapse/admin/v2/users/{urllib.parse.quote(alice)}",
+            access_token=self.OIDC_ADMIN_TOKEN,
+            content={},
+        )
+        self.assertEqual(channel.code, HTTPStatus.CREATED, channel.json_body)
+
+        # Provision a device like MAS would, cribbing from
+        # https://github.com/matrix-org/matrix-authentication-service/blob/08d46a79a4adb22819ac9d55e15f8375dfe2c5c7/crates/matrix-synapse/src/lib.rs#L260-L262
+        alice_device = "alice_device"
+        channel = self.make_request(
+            "POST",
+            f"/_synapse/admin/v2/users/{urllib.parse.quote(alice)}/devices",
+            access_token=self.OIDC_ADMIN_TOKEN,
+            content={"device_id": alice_device},
+        )
+        self.assertEqual(channel.code, HTTPStatus.CREATED, channel.json_body)
+
+        # Prepare a mock MAS access token.
+        alice_token = "alice_token_1234_oidcwhatyoudidthere"
+
+        async def mocked_get_user_by_access_token(
+            token: str, allow_expired: bool = False
+        ) -> Requester:
+            self.assertEqual(token, alice_token)
+            return create_requester(
+                user_id=alice,
+                device_id=alice_device,
+                scope=[],
+                is_guest=False,
+            )
+
+        patch_get_user_by_access_token = patch.object(
+            self.hs.get_auth(),
+            "get_user_by_access_token",
+            wraps=mocked_get_user_by_access_token,
+        )
+
+        # Copied from E2eKeysHandlerTestCase
+        master_pubkey = "nqOvzeuGWT/sRx3h7+MHoInYj3Uk2LD/unI9kDYcHwk"
+        master_pubkey2 = "fHZ3NPiKxoLQm5OoZbKa99SYxprOjNs4TwJUKP+twCM"
+        master_pubkey3 = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY"
+
+        master_key: JsonDict = {
+            "user_id": alice,
+            "usage": ["master"],
+            "keys": {"ed25519:" + master_pubkey: master_pubkey},
+        }
+        master_key2: JsonDict = {
+            "user_id": alice,
+            "usage": ["master"],
+            "keys": {"ed25519:" + master_pubkey2: master_pubkey2},
+        }
+        master_key3: JsonDict = {
+            "user_id": alice,
+            "usage": ["master"],
+            "keys": {"ed25519:" + master_pubkey3: master_pubkey3},
+        }
+
+        with patch_get_user_by_access_token:
+            # Upload an initial cross-signing key.
+            channel = self.make_request(
+                "POST",
+                "/_matrix/client/v3/keys/device_signing/upload",
+                access_token=alice_token,
+                content={
+                    "master_key": master_key,
+                },
+            )
+            self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+
+            # Should not be able to upload another master key.
+            channel = self.make_request(
+                "POST",
+                "/_matrix/client/v3/keys/device_signing/upload",
+                access_token=alice_token,
+                content={
+                    "master_key": master_key2,
+                },
+            )
+            self.assertEqual(
+                channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body
+            )
+
+        # Pretend that MAS did UIA and allowed us to replace the master key.
+        channel = self.make_request(
+            "POST",
+            f"/_synapse/admin/v1/users/{urllib.parse.quote(alice)}/_allow_cross_signing_replacement_without_uia",
+            access_token=self.OIDC_ADMIN_TOKEN,
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+
+        with patch_get_user_by_access_token:
+            # Should now be able to upload master key2.
+            channel = self.make_request(
+                "POST",
+                "/_matrix/client/v3/keys/device_signing/upload",
+                access_token=alice_token,
+                content={
+                    "master_key": master_key2,
+                },
+            )
+            self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+
+            # Even though we're still in the grace period, we shouldn't be able to
+            # upload master key 3 immediately after uploading key 2.
+            channel = self.make_request(
+                "POST",
+                "/_matrix/client/v3/keys/device_signing/upload",
+                access_token=alice_token,
+                content={
+                    "master_key": master_key3,
+                },
+            )
+            self.assertEqual(
+                channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body
+            )
+
+        # Pretend that MAS did UIA and allowed us to replace the master key.
+        channel = self.make_request(
+            "POST",
+            f"/_synapse/admin/v1/users/{urllib.parse.quote(alice)}/_allow_cross_signing_replacement_without_uia",
+            access_token=self.OIDC_ADMIN_TOKEN,
+        )
+        self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+        timestamp_ms = channel.json_body["updatable_without_uia_before_ms"]
+
+        # Advance to 1 second after the replacement period ends.
+        self.reactor.advance(timestamp_ms - self.clock.time_msec() + 1000)
+
+        with patch_get_user_by_access_token:
+            # We should not be able to upload master key3 because the replacement has
+            # expired.
+            channel = self.make_request(
+                "POST",
+                "/_matrix/client/v3/keys/device_signing/upload",
+                access_token=alice_token,
+                content={
+                    "master_key": master_key3,
+                },
+            )
+            self.assertEqual(
+                channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body
+            )
diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py
index ecae092b47..8f923fd40f 100644
--- a/tests/rest/client/test_profile.py
+++ b/tests/rest/client/test_profile.py
@@ -170,7 +170,8 @@ class ProfileTestCase(unittest.HomeserverTestCase):
         )
         self.assertEqual(channel.code, 200, channel.result)
         # FIXME: If a user has no displayname set, Synapse returns 200 and omits a
-        # displayname from the response. This contradicts the spec, see #13137.
+        # displayname from the response. This contradicts the spec, see
+        # https://github.com/matrix-org/synapse/issues/13137.
         return channel.json_body.get("displayname")
 
     def _get_avatar_url(self, name: Optional[str] = None) -> Optional[str]:
@@ -179,7 +180,8 @@ class ProfileTestCase(unittest.HomeserverTestCase):
         )
         self.assertEqual(channel.code, 200, channel.result)
         # FIXME: If a user has no avatar set, Synapse returns 200 and omits an
-        # avatar_url from the response. This contradicts the spec, see #13137.
+        # avatar_url from the response. This contradicts the spec, see
+        # https://github.com/matrix-org/synapse/issues/13137.
         return channel.json_body.get("avatar_url")
 
     @unittest.override_config({"max_avatar_size": 50})
diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py
index ba4e017a0e..b04094b7b3 100644
--- a/tests/rest/client/test_register.py
+++ b/tests/rest/client/test_register.py
@@ -270,15 +270,15 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
         self.assertLessEqual(det_data.items(), channel.json_body.items())
 
         # Check the `completed` counter has been incremented and pending is 0
-        res = self.get_success(
+        pending, completed = self.get_success(
             store.db_pool.simple_select_one(
                 "registration_tokens",
                 keyvalues={"token": token},
                 retcols=["pending", "completed"],
             )
         )
-        self.assertEqual(res["completed"], 1)
-        self.assertEqual(res["pending"], 0)
+        self.assertEqual(completed, 1)
+        self.assertEqual(pending, 0)
 
     @override_config({"registration_requires_token": True})
     def test_POST_registration_token_invalid(self) -> None:
@@ -372,15 +372,15 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
         params1["auth"]["type"] = LoginType.DUMMY
         self.make_request(b"POST", self.url, params1)
         # Check pending=0 and completed=1
-        res = self.get_success(
+        pending, completed = self.get_success(
             store.db_pool.simple_select_one(
                 "registration_tokens",
                 keyvalues={"token": token},
                 retcols=["pending", "completed"],
             )
         )
-        self.assertEqual(res["pending"], 0)
-        self.assertEqual(res["completed"], 1)
+        self.assertEqual(pending, 0)
+        self.assertEqual(completed, 1)
 
         # Check auth still fails when using token with session2
         channel = self.make_request(b"POST", self.url, params2)
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index aaa4f3bba0..bb24ed6aa7 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -888,7 +888,8 @@ class RoomsCreateTestCase(RoomBase):
     )
     def test_room_creation_ratelimiting(self) -> None:
         """
-        Regression test for #14312, where ratelimiting was made too strict.
+        Regression test for https://github.com/matrix-org/synapse/issues/14312,
+        where ratelimiting was made too strict.
         Clients should be able to create 10 rooms in a row
         without hitting rate limits, using default rate limit config.
         (We override rate limiting config back to its default value.)
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index d60665254e..07c81d7f76 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -642,7 +642,7 @@ class SyncCacheTestCase(unittest.HomeserverTestCase):
     def test_noop_sync_does_not_tightloop(self) -> None:
         """If the sync times out, we shouldn't cache the result
 
-        Essentially a regression test for #8518.
+        Essentially a regression test for https://github.com/matrix-org/synapse/issues/8518.
         """
         self.user_id = self.register_user("kermit", "monkey")
         self.tok = self.login("kermit", "monkey")
diff --git a/tests/rest/media/test_media_retention.py b/tests/rest/media/test_media_retention.py
index b59d9dfd4d..27a663a23b 100644
--- a/tests/rest/media/test_media_retention.py
+++ b/tests/rest/media/test_media_retention.py
@@ -267,23 +267,23 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
         def _assert_mxc_uri_purge_state(mxc_uri: MXCUri, expect_purged: bool) -> None:
             """Given an MXC URI, assert whether it has been purged or not."""
             if mxc_uri.server_name == self.hs.config.server.server_name:
-                found_media_dict = self.get_success(
-                    self.store.get_local_media(mxc_uri.media_id)
+                found_media = bool(
+                    self.get_success(self.store.get_local_media(mxc_uri.media_id))
                 )
             else:
-                found_media_dict = self.get_success(
-                    self.store.get_cached_remote_media(
-                        mxc_uri.server_name, mxc_uri.media_id
+                found_media = bool(
+                    self.get_success(
+                        self.store.get_cached_remote_media(
+                            mxc_uri.server_name, mxc_uri.media_id
+                        )
                     )
                 )
 
             if expect_purged:
-                self.assertIsNone(
-                    found_media_dict, msg=f"{mxc_uri} unexpectedly not purged"
-                )
+                self.assertFalse(found_media, msg=f"{mxc_uri} unexpectedly not purged")
             else:
-                self.assertIsNotNone(
-                    found_media_dict,
+                self.assertTrue(
+                    found_media,
                     msg=f"{mxc_uri} unexpectedly purged",
                 )
 
diff --git a/tests/server.py b/tests/server.py
index cfb0fb823b..2b63ed3dd8 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -88,7 +88,7 @@ from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
 from synapse.server import HomeServer
 from synapse.storage import DataStore
 from synapse.storage.database import LoggingDatabaseConnection
-from synapse.storage.engines import PostgresEngine, create_engine
+from synapse.storage.engines import create_engine
 from synapse.storage.prepare_database import prepare_database
 from synapse.types import ISynapseReactor, JsonDict
 from synapse.util import Clock
@@ -484,7 +484,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock):
         if twisted.version > Version("Twisted", 23, 8, 0):
             from twisted.protocols import tls
 
-            tls._get_default_clock = lambda: self  # type: ignore[attr-defined]
+            tls._get_default_clock = lambda: self
 
         self.nameResolver = SimpleResolverComplexifier(FakeResolver())
         super().__init__()
@@ -974,7 +974,7 @@ def setup_test_homeserver(
         database_config = {
             "name": "psycopg2",
             "args": {
-                "database": test_db,
+                "dbname": test_db,
                 "host": POSTGRES_HOST,
                 "password": POSTGRES_PASSWORD,
                 "user": POSTGRES_USER,
@@ -1029,18 +1029,15 @@ def setup_test_homeserver(
 
     # Create the database before we actually try and connect to it, based off
     # the template database we generate in setupdb()
-    if isinstance(db_engine, PostgresEngine):
-        import psycopg2.extensions
-
+    if USE_POSTGRES_FOR_TESTS:
         db_conn = db_engine.module.connect(
-            database=POSTGRES_BASE_DB,
+            dbname=POSTGRES_BASE_DB,
             user=POSTGRES_USER,
             host=POSTGRES_HOST,
             port=POSTGRES_PORT,
             password=POSTGRES_PASSWORD,
         )
-        assert isinstance(db_conn, psycopg2.extensions.connection)
-        db_conn.autocommit = True
+        db_engine.attempt_to_set_autocommit(db_conn, True)
         cur = db_conn.cursor()
         cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
         cur.execute(
@@ -1065,13 +1062,12 @@ def setup_test_homeserver(
 
     hs.setup()
 
-    if isinstance(db_engine, PostgresEngine):
+    if USE_POSTGRES_FOR_TESTS:
         database_pool = hs.get_datastores().databases[0]
 
         # We need to do cleanup on PostgreSQL
         def cleanup() -> None:
             import psycopg2
-            import psycopg2.extensions
 
             # Close all the db pools
             database_pool._db_pool.close()
@@ -1080,14 +1076,13 @@ def setup_test_homeserver(
 
             # Drop the test database
             db_conn = db_engine.module.connect(
-                database=POSTGRES_BASE_DB,
+                dbname=POSTGRES_BASE_DB,
                 user=POSTGRES_USER,
                 host=POSTGRES_HOST,
                 port=POSTGRES_PORT,
                 password=POSTGRES_PASSWORD,
             )
-            assert isinstance(db_conn, psycopg2.extensions.connection)
-            db_conn.autocommit = True
+            db_engine.attempt_to_set_autocommit(db_conn, True)
             cur = db_conn.cursor()
 
             # Try a few times to drop the DB. Some things may hold on to the
diff --git a/tests/storage/databases/main/test_cache.py b/tests/storage/databases/main/test_cache.py
new file mode 100644
index 0000000000..3f71f5d102
--- /dev/null
+++ b/tests/storage/databases/main/test_cache.py
@@ -0,0 +1,117 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from unittest.mock import Mock, call
+
+from synapse.storage.database import LoggingTransaction
+
+from tests.replication._base import BaseMultiWorkerStreamTestCase
+from tests.unittest import HomeserverTestCase
+
+
+class CacheInvalidationTestCase(HomeserverTestCase):
+    def setUp(self) -> None:
+        super().setUp()
+        self.store = self.hs.get_datastores().main
+
+    def test_bulk_invalidation(self) -> None:
+        master_invalidate = Mock()
+
+        self.store._get_cached_user_device.invalidate = master_invalidate
+
+        keys_to_invalidate = [
+            ("a", "b"),
+            ("c", "d"),
+            ("e", "f"),
+            ("g", "h"),
+        ]
+
+        def test_txn(txn: LoggingTransaction) -> None:
+            self.store._invalidate_cache_and_stream_bulk(
+                txn,
+                # This is an arbitrarily chosen cached store function. It was chosen
+                # because it takes more than one argument. We'll use this later to
+                # check that the invalidation was actioned over replication.
+                cache_func=self.store._get_cached_user_device,
+                key_tuples=keys_to_invalidate,
+            )
+
+        self.get_success(
+            self.store.db_pool.runInteraction(
+                "test_invalidate_cache_and_stream_bulk", test_txn
+            )
+        )
+
+        master_invalidate.assert_has_calls(
+            [call(key_list) for key_list in keys_to_invalidate],
+            any_order=True,
+        )
+
+
+class CacheInvalidationOverReplicationTestCase(BaseMultiWorkerStreamTestCase):
+    def setUp(self) -> None:
+        super().setUp()
+        self.store = self.hs.get_datastores().main
+
+    def test_bulk_invalidation_replicates(self) -> None:
+        """Like test_bulk_invalidation, but also checks the invalidations replicate."""
+        master_invalidate = Mock()
+        worker_invalidate = Mock()
+
+        self.store._get_cached_user_device.invalidate = master_invalidate
+        worker = self.make_worker_hs("synapse.app.generic_worker")
+        worker_ds = worker.get_datastores().main
+        worker_ds._get_cached_user_device.invalidate = worker_invalidate
+
+        keys_to_invalidate = [
+            ("a", "b"),
+            ("c", "d"),
+            ("e", "f"),
+            ("g", "h"),
+        ]
+
+        def test_txn(txn: LoggingTransaction) -> None:
+            self.store._invalidate_cache_and_stream_bulk(
+                txn,
+                # This is an arbitrarily chosen cached store function. It was chosen
+                # because it takes more than one argument. We'll use this later to
+                # check that the invalidation was actioned over replication.
+                cache_func=self.store._get_cached_user_device,
+                key_tuples=keys_to_invalidate,
+            )
+
+        assert self.store._cache_id_gen is not None
+        initial_token = self.store._cache_id_gen.get_current_token()
+        self.get_success(
+            self.database_pool.runInteraction(
+                "test_invalidate_cache_and_stream_bulk", test_txn
+            )
+        )
+        second_token = self.store._cache_id_gen.get_current_token()
+
+        self.assertGreaterEqual(second_token, initial_token + len(keys_to_invalidate))
+
+        self.get_success(
+            worker.get_replication_data_handler().wait_for_stream_position(
+                "master", "caches", second_token
+            )
+        )
+
+        master_invalidate.assert_has_calls(
+            [call(key_list) for key_list in keys_to_invalidate],
+            any_order=True,
+        )
+        worker_invalidate.assert_has_calls(
+            [call(key_list) for key_list in keys_to_invalidate],
+            any_order=True,
+        )
diff --git a/tests/storage/databases/main/test_end_to_end_keys.py b/tests/storage/databases/main/test_end_to_end_keys.py
new file mode 100644
index 0000000000..23e6f82c75
--- /dev/null
+++ b/tests/storage/databases/main/test_end_to_end_keys.py
@@ -0,0 +1,121 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import List, Optional, Tuple
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
+from synapse.storage._base import db_to_json
+from synapse.storage.database import LoggingTransaction
+from synapse.types import JsonDict
+from synapse.util import Clock
+
+from tests.unittest import HomeserverTestCase
+
+
+class EndToEndKeyWorkerStoreTestCase(HomeserverTestCase):
+    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+        self.store = hs.get_datastores().main
+
+    def test_get_master_cross_signing_key_updatable_before(self) -> None:
+        # Should return False, None when there is no master key.
+        alice = "@alice:test"
+        exists, timestamp = self.get_success(
+            self.store.get_master_cross_signing_key_updatable_before(alice)
+        )
+        self.assertIs(exists, False)
+        self.assertIsNone(timestamp)
+
+        # Upload a master key.
+        dummy_key = {"keys": {"a": "b"}}
+        self.get_success(
+            self.store.set_e2e_cross_signing_key(alice, "master", dummy_key)
+        )
+
+        # Should now find that the key exists.
+        exists, timestamp = self.get_success(
+            self.store.get_master_cross_signing_key_updatable_before(alice)
+        )
+        self.assertIs(exists, True)
+        self.assertIsNone(timestamp)
+
+        # Write an updateable_before timestamp.
+        written_timestamp = self.get_success(
+            self.store.allow_master_cross_signing_key_replacement_without_uia(
+                alice, 1000
+            )
+        )
+
+        # Should now find that the key exists.
+        exists, timestamp = self.get_success(
+            self.store.get_master_cross_signing_key_updatable_before(alice)
+        )
+        self.assertIs(exists, True)
+        self.assertEqual(timestamp, written_timestamp)
+
+    def test_master_replacement_only_applies_to_latest_master_key(
+        self,
+    ) -> None:
+        """We shouldn't allow updates w/o UIA to old master keys or other key types."""
+        alice = "@alice:test"
+        # Upload two master keys.
+        key1 = {"keys": {"a": "b"}}
+        key2 = {"keys": {"c": "d"}}
+        key3 = {"keys": {"e": "f"}}
+        self.get_success(self.store.set_e2e_cross_signing_key(alice, "master", key1))
+        self.get_success(self.store.set_e2e_cross_signing_key(alice, "other", key2))
+        self.get_success(self.store.set_e2e_cross_signing_key(alice, "master", key3))
+
+        # Third key should be the current one.
+        key = self.get_success(
+            self.store.get_e2e_cross_signing_key(alice, "master", alice)
+        )
+        self.assertEqual(key, key3)
+
+        timestamp = self.get_success(
+            self.store.allow_master_cross_signing_key_replacement_without_uia(
+                alice, 1000
+            )
+        )
+        assert timestamp is not None
+
+        def check_timestamp_column(
+            txn: LoggingTransaction,
+        ) -> List[Tuple[JsonDict, Optional[int]]]:
+            """Fetch all rows for Alice's keys."""
+            txn.execute(
+                """
+                SELECT keydata, updatable_without_uia_before_ms
+                FROM e2e_cross_signing_keys
+                WHERE user_id = ?
+                ORDER BY stream_id ASC;
+            """,
+                (alice,),
+            )
+            return [(db_to_json(keydata), ts) for keydata, ts in txn.fetchall()]
+
+        values = self.get_success(
+            self.store.db_pool.runInteraction(
+                "check_timestamp_column",
+                check_timestamp_column,
+            )
+        )
+        self.assertEqual(
+            values,
+            [
+                (key1, None),
+                (key2, None),
+                (key3, timestamp),
+            ],
+        )
diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py
index 35f77052a7..6c4d44c05c 100644
--- a/tests/storage/databases/main/test_lock.py
+++ b/tests/storage/databases/main/test_lock.py
@@ -66,9 +66,9 @@ class LockTestCase(unittest.HomeserverTestCase):
 
         # Run the tasks to completion.
         # To work around `Linearizer`s using a different reactor to sleep when
-        # contended (#12841), we call `runUntilCurrent` on
-        # `twisted.internet.reactor`, which is a different reactor to that used
-        # by the homeserver.
+        # contended (https://github.com/matrix-org/synapse/issues/12841), we call
+        # `runUntilCurrent` on `twisted.internet.reactor`, which is a different
+        # reactor to that used by the homeserver.
         assert isinstance(reactor, ReactorBase)
         self.get_success(task1)
         reactor.runUntilCurrent()
@@ -217,9 +217,9 @@ class ReadWriteLockTestCase(unittest.HomeserverTestCase):
 
         # Run the tasks to completion.
         # To work around `Linearizer`s using a different reactor to sleep when
-        # contended (#12841), we call `runUntilCurrent` on
-        # `twisted.internet.reactor`, which is a different reactor to that used
-        # by the homeserver.
+        # contended (https://github.com/matrix-org/synapse/issues/12841), we call
+        # `runUntilCurrent` on `twisted.internet.reactor`, which is a different
+        # reactor to that used by the homeserver.
         assert isinstance(reactor, ReactorBase)
         self.get_success(task1)
         reactor.runUntilCurrent()
@@ -269,9 +269,9 @@ class ReadWriteLockTestCase(unittest.HomeserverTestCase):
 
         # Run the tasks to completion.
         # To work around `Linearizer`s using a different reactor to sleep when
-        # contended (#12841), we call `runUntilCurrent` on
-        # `twisted.internet.reactor`, which is a different reactor to that used
-        # by the homeserver.
+        # contended (https://github.com/matrix-org/synapse/issues/12841), we call
+        # `runUntilCurrent` on `twisted.internet.reactor`, which is a different
+        # reactor to that used by the homeserver.
         assert isinstance(reactor, ReactorBase)
         self.get_success(task1)
         reactor.runUntilCurrent()
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index e4a52c301e..491e6d5e63 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -14,7 +14,7 @@
 
 from collections import OrderedDict
 from typing import Generator
-from unittest.mock import Mock
+from unittest.mock import Mock, call, patch
 
 from twisted.internet import defer
 
@@ -24,43 +24,90 @@ from synapse.storage.engines import create_engine
 
 from tests import unittest
 from tests.server import TestHomeServer
-from tests.utils import default_config
+from tests.utils import USE_POSTGRES_FOR_TESTS, default_config
 
 
 class SQLBaseStoreTestCase(unittest.TestCase):
     """Test the "simple" SQL generating methods in SQLBaseStore."""
 
     def setUp(self) -> None:
-        self.db_pool = Mock(spec=["runInteraction"])
+        # This is the Twisted connection pool.
+        conn_pool = Mock(spec=["runInteraction", "runWithConnection"])
         self.mock_txn = Mock()
-        self.mock_conn = Mock(spec_set=["cursor", "rollback", "commit"])
+        if USE_POSTGRES_FOR_TESTS:
+            # To avoid testing psycopg2 itself, patch execute_batch/execute_values
+            # to assert how it is called.
+            from psycopg2 import extras
+
+            self.mock_execute_batch = Mock()
+            self.execute_batch_patcher = patch.object(
+                extras, "execute_batch", new=self.mock_execute_batch
+            )
+            self.execute_batch_patcher.start()
+            self.mock_execute_values = Mock()
+            self.execute_values_patcher = patch.object(
+                extras, "execute_values", new=self.mock_execute_values
+            )
+            self.execute_values_patcher.start()
+
+            self.mock_conn = Mock(
+                spec_set=[
+                    "cursor",
+                    "rollback",
+                    "commit",
+                    "closed",
+                    "reconnect",
+                    "set_session",
+                    "encoding",
+                ]
+            )
+            self.mock_conn.encoding = "UNICODE"
+        else:
+            self.mock_conn = Mock(spec_set=["cursor", "rollback", "commit"])
         self.mock_conn.cursor.return_value = self.mock_txn
+        self.mock_txn.connection = self.mock_conn
         self.mock_conn.rollback.return_value = None
         # Our fake runInteraction just runs synchronously inline
 
         def runInteraction(func, *args, **kwargs) -> defer.Deferred:  # type: ignore[no-untyped-def]
             return defer.succeed(func(self.mock_txn, *args, **kwargs))
 
-        self.db_pool.runInteraction = runInteraction
+        conn_pool.runInteraction = runInteraction
 
         def runWithConnection(func, *args, **kwargs):  # type: ignore[no-untyped-def]
             return defer.succeed(func(self.mock_conn, *args, **kwargs))
 
-        self.db_pool.runWithConnection = runWithConnection
+        conn_pool.runWithConnection = runWithConnection
 
         config = default_config(name="test", parse=True)
         hs = TestHomeServer("test", config=config)
 
-        sqlite_config = {"name": "sqlite3"}
-        engine = create_engine(sqlite_config)
+        if USE_POSTGRES_FOR_TESTS:
+            db_config = {"name": "psycopg2", "args": {}}
+        else:
+            db_config = {"name": "sqlite3"}
+        engine = create_engine(db_config)
+
         fake_engine = Mock(wraps=engine)
         fake_engine.in_transaction.return_value = False
+        fake_engine.module.OperationalError = engine.module.OperationalError
+        fake_engine.module.DatabaseError = engine.module.DatabaseError
+        fake_engine.module.IntegrityError = engine.module.IntegrityError
+        # Don't convert param style to make assertions easier.
+        fake_engine.convert_param_style = lambda sql: sql
+        # To fix isinstance(...) checks.
+        fake_engine.__class__ = engine.__class__  # type: ignore[assignment]
 
-        db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine)
-        db._db_pool = self.db_pool
+        db = DatabasePool(Mock(), Mock(config=db_config), fake_engine)
+        db._db_pool = conn_pool
 
         self.datastore = SQLBaseStore(db, None, hs)  # type: ignore[arg-type]
 
+    def tearDown(self) -> None:
+        if USE_POSTGRES_FOR_TESTS:
+            self.execute_batch_patcher.stop()
+            self.execute_values_patcher.stop()
+
     @defer.inlineCallbacks
     def test_insert_1col(self) -> Generator["defer.Deferred[object]", object, None]:
         self.mock_txn.rowcount = 1
@@ -71,7 +118,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
             )
         )
 
-        self.mock_txn.execute.assert_called_with(
+        self.mock_txn.execute.assert_called_once_with(
             "INSERT INTO tablename (columname) VALUES(?)", ("Value",)
         )
 
@@ -87,11 +134,66 @@ class SQLBaseStoreTestCase(unittest.TestCase):
             )
         )
 
-        self.mock_txn.execute.assert_called_with(
+        self.mock_txn.execute.assert_called_once_with(
             "INSERT INTO tablename (colA, colB, colC) VALUES(?, ?, ?)", (1, 2, 3)
         )
 
     @defer.inlineCallbacks
+    def test_insert_many(self) -> Generator["defer.Deferred[object]", object, None]:
+        yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_insert_many(
+                table="tablename",
+                keys=(
+                    "col1",
+                    "col2",
+                ),
+                values=[
+                    (
+                        "val1",
+                        "val2",
+                    ),
+                    ("val3", "val4"),
+                ],
+                desc="",
+            )
+        )
+
+        if USE_POSTGRES_FOR_TESTS:
+            self.mock_execute_values.assert_called_once_with(
+                self.mock_txn,
+                "INSERT INTO tablename (col1, col2) VALUES ?",
+                [("val1", "val2"), ("val3", "val4")],
+                template=None,
+                fetch=False,
+            )
+        else:
+            self.mock_txn.executemany.assert_called_once_with(
+                "INSERT INTO tablename (col1, col2) VALUES(?, ?)",
+                [("val1", "val2"), ("val3", "val4")],
+            )
+
+    @defer.inlineCallbacks
+    def test_insert_many_no_iterable(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_insert_many(
+                table="tablename",
+                keys=(
+                    "col1",
+                    "col2",
+                ),
+                values=[],
+                desc="",
+            )
+        )
+
+        if USE_POSTGRES_FOR_TESTS:
+            self.mock_execute_values.assert_not_called()
+        else:
+            self.mock_txn.executemany.assert_not_called()
+
+    @defer.inlineCallbacks
     def test_select_one_1col(self) -> Generator["defer.Deferred[object]", object, None]:
         self.mock_txn.rowcount = 1
         self.mock_txn.__iter__ = Mock(return_value=iter([("Value",)]))
@@ -103,7 +205,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         )
 
         self.assertEqual("Value", value)
-        self.mock_txn.execute.assert_called_with(
+        self.mock_txn.execute.assert_called_once_with(
             "SELECT retcol FROM tablename WHERE keycol = ?", ["TheKey"]
         )
 
@@ -120,8 +222,8 @@ class SQLBaseStoreTestCase(unittest.TestCase):
             )
         )
 
-        self.assertEqual({"colA": 1, "colB": 2, "colC": 3}, ret)
-        self.mock_txn.execute.assert_called_with(
+        self.assertEqual((1, 2, 3), ret)
+        self.mock_txn.execute.assert_called_once_with(
             "SELECT colA, colB, colC FROM tablename WHERE keycol = ?", ["TheKey"]
         )
 
@@ -141,7 +243,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
             )
         )
 
-        self.assertFalse(ret)
+        self.assertIsNone(ret)
 
     @defer.inlineCallbacks
     def test_select_list(self) -> Generator["defer.Deferred[object]", object, None]:
@@ -156,11 +258,59 @@ class SQLBaseStoreTestCase(unittest.TestCase):
         )
 
         self.assertEqual([(1,), (2,), (3,)], ret)
-        self.mock_txn.execute.assert_called_with(
+        self.mock_txn.execute.assert_called_once_with(
             "SELECT colA FROM tablename WHERE keycol = ?", ["A set"]
         )
 
     @defer.inlineCallbacks
+    def test_select_many_batch(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        self.mock_txn.rowcount = 3
+        self.mock_txn.fetchall.side_effect = [[(1,), (2,)], [(3,)]]
+
+        ret = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_select_many_batch(
+                table="tablename",
+                column="col1",
+                iterable=("val1", "val2", "val3"),
+                retcols=("col2",),
+                keyvalues={"col3": "val4"},
+                batch_size=2,
+            )
+        )
+
+        self.mock_txn.execute.assert_has_calls(
+            [
+                call(
+                    "SELECT col2 FROM tablename WHERE col1 = ANY(?) AND col3 = ?",
+                    [["val1", "val2"], "val4"],
+                ),
+                call(
+                    "SELECT col2 FROM tablename WHERE col1 = ANY(?) AND col3 = ?",
+                    [["val3"], "val4"],
+                ),
+            ],
+        )
+        self.assertEqual([(1,), (2,), (3,)], ret)
+
+    def test_select_many_no_iterable(self) -> None:
+        self.mock_txn.rowcount = 3
+        self.mock_txn.fetchall.side_effect = [(1,), (2,)]
+
+        ret = self.datastore.db_pool.simple_select_many_txn(
+            self.mock_txn,
+            table="tablename",
+            column="col1",
+            iterable=(),
+            retcols=("col2",),
+            keyvalues={"col3": "val4"},
+        )
+
+        self.mock_txn.execute.assert_not_called()
+        self.assertEqual([], ret)
+
+    @defer.inlineCallbacks
     def test_update_one_1col(self) -> Generator["defer.Deferred[object]", object, None]:
         self.mock_txn.rowcount = 1
 
@@ -172,7 +322,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
             )
         )
 
-        self.mock_txn.execute.assert_called_with(
+        self.mock_txn.execute.assert_called_once_with(
             "UPDATE tablename SET columnname = ? WHERE keycol = ?",
             ["New Value", "TheKey"],
         )
@@ -191,12 +341,70 @@ class SQLBaseStoreTestCase(unittest.TestCase):
             )
         )
 
-        self.mock_txn.execute.assert_called_with(
+        self.mock_txn.execute.assert_called_once_with(
             "UPDATE tablename SET colC = ?, colD = ? WHERE" " colA = ? AND colB = ?",
             [3, 4, 1, 2],
         )
 
     @defer.inlineCallbacks
+    def test_update_many(self) -> Generator["defer.Deferred[object]", object, None]:
+        yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_update_many(
+                table="tablename",
+                key_names=("col1", "col2"),
+                key_values=[("val1", "val2")],
+                value_names=("col3",),
+                value_values=[("val3",)],
+                desc="",
+            )
+        )
+
+        if USE_POSTGRES_FOR_TESTS:
+            self.mock_execute_batch.assert_called_once_with(
+                self.mock_txn,
+                "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?",
+                [("val3", "val1", "val2")],
+            )
+        else:
+            self.mock_txn.executemany.assert_called_once_with(
+                "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?",
+                [("val3", "val1", "val2")],
+            )
+
+        # key_values and value_values must be the same length.
+        with self.assertRaises(ValueError):
+            yield defer.ensureDeferred(
+                self.datastore.db_pool.simple_update_many(
+                    table="tablename",
+                    key_names=("col1", "col2"),
+                    key_values=[("val1", "val2")],
+                    value_names=("col3",),
+                    value_values=[],
+                    desc="",
+                )
+            )
+
+    @defer.inlineCallbacks
+    def test_update_many_no_iterable(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_update_many(
+                table="tablename",
+                key_names=("col1", "col2"),
+                key_values=[],
+                value_names=("col3",),
+                value_values=[],
+                desc="",
+            )
+        )
+
+        if USE_POSTGRES_FOR_TESTS:
+            self.mock_execute_batch.assert_not_called()
+        else:
+            self.mock_txn.executemany.assert_not_called()
+
+    @defer.inlineCallbacks
     def test_delete_one(self) -> Generator["defer.Deferred[object]", object, None]:
         self.mock_txn.rowcount = 1
 
@@ -206,6 +414,393 @@ class SQLBaseStoreTestCase(unittest.TestCase):
             )
         )
 
-        self.mock_txn.execute.assert_called_with(
+        self.mock_txn.execute.assert_called_once_with(
             "DELETE FROM tablename WHERE keycol = ?", ["Go away"]
         )
+
+    @defer.inlineCallbacks
+    def test_delete_many(self) -> Generator["defer.Deferred[object]", object, None]:
+        self.mock_txn.rowcount = 2
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_delete_many(
+                table="tablename",
+                column="col1",
+                iterable=("val1", "val2"),
+                keyvalues={"col2": "val3"},
+                desc="",
+            )
+        )
+
+        self.mock_txn.execute.assert_called_once_with(
+            "DELETE FROM tablename WHERE col1 = ANY(?) AND col2 = ?",
+            [["val1", "val2"], "val3"],
+        )
+        self.assertEqual(result, 2)
+
+    @defer.inlineCallbacks
+    def test_delete_many_no_iterable(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_delete_many(
+                table="tablename",
+                column="col1",
+                iterable=(),
+                keyvalues={"col2": "val3"},
+                desc="",
+            )
+        )
+
+        self.mock_txn.execute.assert_not_called()
+        self.assertEqual(result, 0)
+
+    @defer.inlineCallbacks
+    def test_delete_many_no_keyvalues(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        self.mock_txn.rowcount = 2
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_delete_many(
+                table="tablename",
+                column="col1",
+                iterable=("val1", "val2"),
+                keyvalues={},
+                desc="",
+            )
+        )
+
+        self.mock_txn.execute.assert_called_once_with(
+            "DELETE FROM tablename WHERE col1 = ANY(?)", [["val1", "val2"]]
+        )
+        self.assertEqual(result, 2)
+
+    @defer.inlineCallbacks
+    def test_upsert(self) -> Generator["defer.Deferred[object]", object, None]:
+        self.mock_txn.rowcount = 1
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert(
+                table="tablename",
+                keyvalues={"columnname": "oldvalue"},
+                values={"othercol": "newvalue"},
+            )
+        )
+
+        self.mock_txn.execute.assert_called_once_with(
+            "INSERT INTO tablename (columnname, othercol) VALUES (?, ?) ON CONFLICT (columnname) DO UPDATE SET othercol=EXCLUDED.othercol",
+            ["oldvalue", "newvalue"],
+        )
+        self.assertTrue(result)
+
+    @defer.inlineCallbacks
+    def test_upsert_no_values(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        self.mock_txn.rowcount = 1
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert(
+                table="tablename",
+                keyvalues={"columnname": "value"},
+                values={},
+                insertion_values={"columnname": "value"},
+            )
+        )
+
+        self.mock_txn.execute.assert_called_once_with(
+            "INSERT INTO tablename (columnname) VALUES (?) ON CONFLICT (columnname) DO NOTHING",
+            ["value"],
+        )
+        self.assertTrue(result)
+
+    @defer.inlineCallbacks
+    def test_upsert_with_insertion(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        self.mock_txn.rowcount = 1
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert(
+                table="tablename",
+                keyvalues={"columnname": "oldvalue"},
+                values={"othercol": "newvalue"},
+                insertion_values={"thirdcol": "insertionval"},
+            )
+        )
+
+        self.mock_txn.execute.assert_called_once_with(
+            "INSERT INTO tablename (columnname, thirdcol, othercol) VALUES (?, ?, ?) ON CONFLICT (columnname) DO UPDATE SET othercol=EXCLUDED.othercol",
+            ["oldvalue", "insertionval", "newvalue"],
+        )
+        self.assertTrue(result)
+
+    @defer.inlineCallbacks
+    def test_upsert_with_where(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        self.mock_txn.rowcount = 1
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert(
+                table="tablename",
+                keyvalues={"columnname": "oldvalue"},
+                values={"othercol": "newvalue"},
+                where_clause="thirdcol IS NULL",
+            )
+        )
+
+        self.mock_txn.execute.assert_called_once_with(
+            "INSERT INTO tablename (columnname, othercol) VALUES (?, ?) ON CONFLICT (columnname) WHERE thirdcol IS NULL DO UPDATE SET othercol=EXCLUDED.othercol",
+            ["oldvalue", "newvalue"],
+        )
+        self.assertTrue(result)
+
+    @defer.inlineCallbacks
+    def test_upsert_many(self) -> Generator["defer.Deferred[object]", object, None]:
+        yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert_many(
+                table="tablename",
+                key_names=["keycol1", "keycol2"],
+                key_values=[["keyval1", "keyval2"], ["keyval3", "keyval4"]],
+                value_names=["valuecol3"],
+                value_values=[["val5"], ["val6"]],
+                desc="",
+            )
+        )
+
+        if USE_POSTGRES_FOR_TESTS:
+            self.mock_execute_values.assert_called_once_with(
+                self.mock_txn,
+                "INSERT INTO tablename (keycol1, keycol2, valuecol3) VALUES ? ON CONFLICT (keycol1, keycol2) DO UPDATE SET valuecol3=EXCLUDED.valuecol3",
+                [("keyval1", "keyval2", "val5"), ("keyval3", "keyval4", "val6")],
+                template=None,
+                fetch=False,
+            )
+        else:
+            self.mock_txn.executemany.assert_called_once_with(
+                "INSERT INTO tablename (keycol1, keycol2, valuecol3) VALUES (?, ?, ?) ON CONFLICT (keycol1, keycol2) DO UPDATE SET valuecol3=EXCLUDED.valuecol3",
+                [("keyval1", "keyval2", "val5"), ("keyval3", "keyval4", "val6")],
+            )
+
+    @defer.inlineCallbacks
+    def test_upsert_many_no_values(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert_many(
+                table="tablename",
+                key_names=["columnname"],
+                key_values=[["oldvalue"]],
+                value_names=[],
+                value_values=[],
+                desc="",
+            )
+        )
+
+        if USE_POSTGRES_FOR_TESTS:
+            self.mock_execute_values.assert_called_once_with(
+                self.mock_txn,
+                "INSERT INTO tablename (columnname) VALUES ? ON CONFLICT (columnname) DO NOTHING",
+                [("oldvalue",)],
+                template=None,
+                fetch=False,
+            )
+        else:
+            self.mock_txn.executemany.assert_called_once_with(
+                "INSERT INTO tablename (columnname) VALUES (?) ON CONFLICT (columnname) DO NOTHING",
+                [("oldvalue",)],
+            )
+
+    @defer.inlineCallbacks
+    def test_upsert_emulated_no_values_exists(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename")
+
+        self.mock_txn.fetchall.return_value = [(1,)]
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert(
+                table="tablename",
+                keyvalues={"columnname": "value"},
+                values={},
+                insertion_values={"columnname": "value"},
+            )
+        )
+
+        if USE_POSTGRES_FOR_TESTS:
+            self.mock_txn.execute.assert_has_calls(
+                [
+                    call("LOCK TABLE tablename in EXCLUSIVE MODE", ()),
+                    call("SELECT 1 FROM tablename WHERE columnname = ?", ["value"]),
+                ]
+            )
+        else:
+            self.mock_txn.execute.assert_called_once_with(
+                "SELECT 1 FROM tablename WHERE columnname = ?", ["value"]
+            )
+        self.assertFalse(result)
+
+    @defer.inlineCallbacks
+    def test_upsert_emulated_no_values_not_exists(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename")
+
+        self.mock_txn.fetchall.return_value = []
+        self.mock_txn.rowcount = 1
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert(
+                table="tablename",
+                keyvalues={"columnname": "value"},
+                values={},
+                insertion_values={"columnname": "value"},
+            )
+        )
+
+        self.mock_txn.execute.assert_has_calls(
+            [
+                call(
+                    "SELECT 1 FROM tablename WHERE columnname = ?",
+                    ["value"],
+                ),
+                call("INSERT INTO tablename (columnname) VALUES (?)", ["value"]),
+            ],
+        )
+        self.assertTrue(result)
+
+    @defer.inlineCallbacks
+    def test_upsert_emulated_with_insertion_exists(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename")
+
+        self.mock_txn.rowcount = 1
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert(
+                table="tablename",
+                keyvalues={"columnname": "oldvalue"},
+                values={"othercol": "newvalue"},
+                insertion_values={"thirdcol": "insertionval"},
+            )
+        )
+
+        if USE_POSTGRES_FOR_TESTS:
+            self.mock_txn.execute.assert_has_calls(
+                [
+                    call("LOCK TABLE tablename in EXCLUSIVE MODE", ()),
+                    call(
+                        "UPDATE tablename SET othercol = ? WHERE columnname = ?",
+                        ["newvalue", "oldvalue"],
+                    ),
+                ]
+            )
+        else:
+            self.mock_txn.execute.assert_called_once_with(
+                "UPDATE tablename SET othercol = ? WHERE columnname = ?",
+                ["newvalue", "oldvalue"],
+            )
+        self.assertTrue(result)
+
+    @defer.inlineCallbacks
+    def test_upsert_emulated_with_insertion_not_exists(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename")
+
+        self.mock_txn.rowcount = 0
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert(
+                table="tablename",
+                keyvalues={"columnname": "oldvalue"},
+                values={"othercol": "newvalue"},
+                insertion_values={"thirdcol": "insertionval"},
+            )
+        )
+
+        self.mock_txn.execute.assert_has_calls(
+            [
+                call(
+                    "UPDATE tablename SET othercol = ? WHERE columnname = ?",
+                    ["newvalue", "oldvalue"],
+                ),
+                call(
+                    "INSERT INTO tablename (columnname, othercol, thirdcol) VALUES (?, ?, ?)",
+                    ["oldvalue", "newvalue", "insertionval"],
+                ),
+            ]
+        )
+        self.assertTrue(result)
+
+    @defer.inlineCallbacks
+    def test_upsert_emulated_with_where(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename")
+
+        self.mock_txn.rowcount = 1
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert(
+                table="tablename",
+                keyvalues={"columnname": "oldvalue"},
+                values={"othercol": "newvalue"},
+                where_clause="thirdcol IS NULL",
+            )
+        )
+
+        if USE_POSTGRES_FOR_TESTS:
+            self.mock_txn.execute.assert_has_calls(
+                [
+                    call("LOCK TABLE tablename in EXCLUSIVE MODE", ()),
+                    call(
+                        "UPDATE tablename SET othercol = ? WHERE columnname = ? AND thirdcol IS NULL",
+                        ["newvalue", "oldvalue"],
+                    ),
+                ]
+            )
+        else:
+            self.mock_txn.execute.assert_called_once_with(
+                "UPDATE tablename SET othercol = ? WHERE columnname = ? AND thirdcol IS NULL",
+                ["newvalue", "oldvalue"],
+            )
+        self.assertTrue(result)
+
+    @defer.inlineCallbacks
+    def test_upsert_emulated_with_where_no_values(
+        self,
+    ) -> Generator["defer.Deferred[object]", object, None]:
+        self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename")
+
+        self.mock_txn.rowcount = 1
+
+        result = yield defer.ensureDeferred(
+            self.datastore.db_pool.simple_upsert(
+                table="tablename",
+                keyvalues={"columnname": "oldvalue"},
+                values={},
+                where_clause="thirdcol IS NULL",
+            )
+        )
+
+        if USE_POSTGRES_FOR_TESTS:
+            self.mock_txn.execute.assert_has_calls(
+                [
+                    call("LOCK TABLE tablename in EXCLUSIVE MODE", ()),
+                    call(
+                        "SELECT 1 FROM tablename WHERE columnname = ? AND thirdcol IS NULL",
+                        ["oldvalue"],
+                    ),
+                ]
+            )
+        else:
+            self.mock_txn.execute.assert_called_once_with(
+                "SELECT 1 FROM tablename WHERE columnname = ? AND thirdcol IS NULL",
+                ["oldvalue"],
+            )
+        self.assertFalse(result)
diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py
index 8cd7c89ca2..4d0ebb550d 100644
--- a/tests/storage/test_database.py
+++ b/tests/storage/test_database.py
@@ -213,7 +213,8 @@ class CallbacksTestCase(unittest.HomeserverTestCase):
         after_callback, exception_callback = self._run_interaction(_test_txn)
 
         # Calling both `after_callback`s when the first attempt failed is rather
-        # surprising (#12184). Let's document the behaviour in a test.
+        # surprising (https://github.com/matrix-org/synapse/issues/12184).
+        # Let's document the behaviour in a test.
         after_callback.assert_has_calls(
             [
                 call(123, 456, extra=789),
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index d3e20f44b2..66a027887d 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -1060,7 +1060,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
         self,
     ) -> None:
         """
-        A test that reproduces #13929 (Postgres only).
+        A test that reproduces https://github.com/matrix-org/synapse/issues/13929 (Postgres only).
 
         Test to make sure we can still get backfill points after many failed pull
         attempts that cause us to backoff to the limit. Even if the backoff formula
diff --git a/tests/storage/test_main.py b/tests/storage/test_main.py
index b8823d6993..01c0e5e671 100644
--- a/tests/storage/test_main.py
+++ b/tests/storage/test_main.py
@@ -39,11 +39,11 @@ class DataStoreTestCase(unittest.HomeserverTestCase):
         )
 
         self.assertEqual(1, total)
-        self.assertEqual(self.displayname, users.pop()["displayname"])
+        self.assertEqual(self.displayname, users.pop().displayname)
 
         users, total = self.get_success(
             self.store.get_users_paginate(0, 10, name="BC", guests=False)
         )
 
         self.assertEqual(1, total)
-        self.assertEqual(self.displayname, users.pop()["displayname"])
+        self.assertEqual(self.displayname, users.pop().displayname)
diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py
index 1e27f2c275..d3ffe963d3 100644
--- a/tests/storage/test_room.py
+++ b/tests/storage/test_room.py
@@ -42,16 +42,9 @@ class RoomStoreTestCase(HomeserverTestCase):
         )
 
     def test_get_room(self) -> None:
-        res = self.get_success(self.store.get_room(self.room.to_string()))
-        assert res is not None
-        self.assertLessEqual(
-            {
-                "room_id": self.room.to_string(),
-                "creator": self.u_creator.to_string(),
-                "is_public": True,
-            }.items(),
-            res.items(),
-        )
+        room = self.get_success(self.store.get_room(self.room.to_string()))
+        assert room is not None
+        self.assertTrue(room[0])
 
     def test_get_room_unknown_room(self) -> None:
         self.assertIsNone(self.get_success(self.store.get_room("!uknown:test")))
@@ -59,14 +52,9 @@ class RoomStoreTestCase(HomeserverTestCase):
     def test_get_room_with_stats(self) -> None:
         res = self.get_success(self.store.get_room_with_stats(self.room.to_string()))
         assert res is not None
-        self.assertLessEqual(
-            {
-                "room_id": self.room.to_string(),
-                "creator": self.u_creator.to_string(),
-                "public": True,
-            }.items(),
-            res.items(),
-        )
+        self.assertEqual(res.room_id, self.room.to_string())
+        self.assertEqual(res.creator, self.u_creator.to_string())
+        self.assertTrue(res.public)
 
     def test_get_room_with_stats_unknown_room(self) -> None:
         self.assertIsNone(
diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py
index 52ffa91c81..e3dc3623cb 100644
--- a/tests/storage/test_room_search.py
+++ b/tests/storage/test_room_search.py
@@ -93,7 +93,7 @@ class EventSearchInsertionTest(HomeserverTestCase):
         both strings and integers. When using Postgres, integers are automatically
         converted to strings.
 
-        Regression test for #11918.
+        Regression test for https://github.com/matrix-org/synapse/issues/11918.
         """
         store = self.hs.get_datastores().main
 
diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py
index aa20fe6780..c1392d8bfc 100644
--- a/tests/util/test_check_dependencies.py
+++ b/tests/util/test_check_dependencies.py
@@ -89,7 +89,8 @@ class TestDependencyChecker(TestCase):
     def test_version_reported_as_none(self) -> None:
         """Complain if importlib.metadata.version() returns None.
 
-        This shouldn't normally happen, but it was seen in the wild (#12223).
+        This shouldn't normally happen, but it was seen in the wild
+        (https://github.com/matrix-org/synapse/issues/12223).
         """
         with patch(
             "synapse.util.check_dependencies.metadata.requires",
@@ -148,7 +149,7 @@ class TestDependencyChecker(TestCase):
         """
         Tests that release candidates count as far as satisfying a dependency
         is concerned.
-        (Regression test, see #12176.)
+        (Regression test, see https://github.com/matrix-org/synapse/issues/12176.)
         """
         with patch(
             "synapse.util.check_dependencies.metadata.requires",
@@ -162,7 +163,10 @@ class TestDependencyChecker(TestCase):
                 check_requirements()
 
     def test_setuptools_rust_ignored(self) -> None:
-        """Test a workaround for a `poetry build` problem. Reproduces #13926."""
+        """
+        Test a workaround for a `poetry build` problem. Reproduces
+        https://github.com/matrix-org/synapse/issues/13926.
+        """
         with patch(
             "synapse.util.check_dependencies.metadata.requires",
             return_value=["setuptools_rust >= 1.3"],
diff --git a/tests/util/test_itertools.py b/tests/util/test_itertools.py
index 406c16cdcf..fabb05c7e4 100644
--- a/tests/util/test_itertools.py
+++ b/tests/util/test_itertools.py
@@ -13,7 +13,11 @@
 # limitations under the License.
 from typing import Dict, Iterable, List, Sequence
 
-from synapse.util.iterutils import chunk_seq, sorted_topologically
+from synapse.util.iterutils import (
+    chunk_seq,
+    sorted_topologically,
+    sorted_topologically_batched,
+)
 
 from tests.unittest import TestCase
 
@@ -107,3 +111,73 @@ class SortTopologically(TestCase):
         graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3, 2, 1]}
 
         self.assertEqual(list(sorted_topologically([4, 3, 2, 1], graph)), [1, 2, 3, 4])
+
+
+class SortTopologicallyBatched(TestCase):
+    "Test cases for `sorted_topologically_batched`"
+
+    def test_empty(self) -> None:
+        "Test that an empty graph works correctly"
+
+        graph: Dict[int, List[int]] = {}
+        self.assertEqual(list(sorted_topologically_batched([], graph)), [])
+
+    def test_handle_empty_graph(self) -> None:
+        "Test that a graph where a node doesn't have an entry is treated as empty"
+
+        graph: Dict[int, List[int]] = {}
+
+        # For disconnected nodes the output is simply sorted.
+        self.assertEqual(list(sorted_topologically_batched([1, 2], graph)), [[1, 2]])
+
+    def test_disconnected(self) -> None:
+        "Test that a graph with no edges work"
+
+        graph: Dict[int, List[int]] = {1: [], 2: []}
+
+        # For disconnected nodes the output is simply sorted.
+        self.assertEqual(list(sorted_topologically_batched([1, 2], graph)), [[1, 2]])
+
+    def test_linear(self) -> None:
+        "Test that a simple `4 -> 3 -> 2 -> 1` graph works"
+
+        graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]}
+
+        self.assertEqual(
+            list(sorted_topologically_batched([4, 3, 2, 1], graph)),
+            [[1], [2], [3], [4]],
+        )
+
+    def test_subset(self) -> None:
+        "Test that only sorting a subset of the graph works"
+        graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]}
+
+        self.assertEqual(list(sorted_topologically_batched([4, 3], graph)), [[3], [4]])
+
+    def test_fork(self) -> None:
+        "Test that a forked graph works"
+        graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [1], 4: [2, 3]}
+
+        # Valid orderings are `[1, 3, 2, 4]` or `[1, 2, 3, 4]`, but we should
+        # always get the same one.
+        self.assertEqual(
+            list(sorted_topologically_batched([4, 3, 2, 1], graph)), [[1], [2, 3], [4]]
+        )
+
+    def test_duplicates(self) -> None:
+        "Test that a graph with duplicate edges work"
+        graph: Dict[int, List[int]] = {1: [], 2: [1, 1], 3: [2, 2], 4: [3]}
+
+        self.assertEqual(
+            list(sorted_topologically_batched([4, 3, 2, 1], graph)),
+            [[1], [2], [3], [4]],
+        )
+
+    def test_multiple_paths(self) -> None:
+        "Test that a graph with multiple paths between two nodes work"
+        graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3, 2, 1]}
+
+        self.assertEqual(
+            list(sorted_topologically_batched([4, 3, 2, 1], graph)),
+            [[1], [2], [3], [4]],
+        )
diff --git a/tests/utils.py b/tests/utils.py
index e73b46944b..a0c87ad628 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -80,7 +80,7 @@ def setupdb() -> None:
 
         # Set up in the db
         db_conn = db_engine.module.connect(
-            database=POSTGRES_BASE_DB,
+            dbname=POSTGRES_BASE_DB,
             user=POSTGRES_USER,
             host=POSTGRES_HOST,
             port=POSTGRES_PORT,