summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.github/ISSUE_TEMPLATE/BUG_REPORT.md72
-rw-r--r--.github/ISSUE_TEMPLATE/BUG_REPORT.yml103
-rw-r--r--.github/workflows/tests.yml12
-rw-r--r--changelog.d/12674.misc1
-rw-r--r--changelog.d/12737.doc1
-rw-r--r--changelog.d/12738.misc1
-rw-r--r--changelog.d/12881.misc1
-rw-r--r--changelog.d/12929.misc1
-rw-r--r--changelog.d/12957.misc1
-rw-r--r--changelog.d/12963.misc1
-rw-r--r--changelog.d/12965.misc1
-rw-r--r--changelog.d/12969.misc1
-rw-r--r--changelog.d/12970.misc1
-rw-r--r--changelog.d/12973.bugfix1
-rw-r--r--changelog.d/12979.bugfix1
-rw-r--r--changelog.d/12982.misc1
-rw-r--r--changelog.d/12984.misc1
-rw-r--r--changelog.d/12985.misc1
-rw-r--r--changelog.d/12990.misc1
-rw-r--r--changelog.d/12991.bugfix2
-rw-r--r--changelog.d/13004.misc1
-rw-r--r--changelog.d/13013.misc1
-rw-r--r--changelog.d/13022.doc1
-rw-r--r--contrib/docker_compose_workers/README.md125
-rw-r--r--contrib/docker_compose_workers/docker-compose.yaml77
-rw-r--r--contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml14
-rw-r--r--contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml19
-rw-r--r--contrib/graph/graph.py35
-rw-r--r--contrib/graph/graph2.py32
-rw-r--r--contrib/graph/graph3.py45
-rw-r--r--docker/Dockerfile-workers3
-rw-r--r--docker/README-testing.md97
-rw-r--r--docker/complement/Dockerfile49
-rw-r--r--docker/complement/README.md33
-rw-r--r--docker/complement/SynapseWorkers.Dockerfile40
-rwxr-xr-xdocker/complement/conf-workers/start-complement-synapse-workers.sh61
-rw-r--r--docker/complement/conf/homeserver.yaml129
-rw-r--r--docker/complement/conf/log_config.yaml24
-rw-r--r--docker/complement/conf/postgres.supervisord.conf (renamed from docker/complement/conf-workers/postgres.supervisord.conf)3
-rwxr-xr-xdocker/complement/conf/start.sh30
-rwxr-xr-xdocker/complement/conf/start_for_complement.sh90
-rw-r--r--docker/complement/conf/workers-shared-extra.yaml.j2 (renamed from docker/complement/conf-workers/workers-shared.yaml)20
-rw-r--r--docker/conf-workers/shared.yaml.j22
-rw-r--r--docker/conf-workers/supervisord.conf.j25
-rwxr-xr-xdocker/configure_workers_and_start.py22
-rw-r--r--docs/SUMMARY.md1
-rw-r--r--docs/development/contributing_guide.md5
-rw-r--r--mypy.ini4
-rw-r--r--poetry.lock10
-rw-r--r--pyproject.toml2
-rwxr-xr-xscripts-dev/complement.sh35
-rw-r--r--synapse/__init__.py6
-rwxr-xr-xsynapse/_scripts/synapse_port_db.py7
-rwxr-xr-xsynapse/_scripts/update_synapse_database.py5
-rw-r--r--synapse/app/_base.py4
-rw-r--r--synapse/app/admin_cmd.py5
-rw-r--r--synapse/app/generic_worker.py5
-rw-r--r--synapse/app/homeserver.py6
-rw-r--r--synapse/config/experimental.py3
-rw-r--r--synapse/config/logger.py4
-rw-r--r--synapse/federation/federation_client.py4
-rw-r--r--synapse/federation/sender/per_destination_queue.py31
-rw-r--r--synapse/federation/transport/server/federation.py4
-rw-r--r--synapse/handlers/auth.py8
-rw-r--r--synapse/handlers/device.py33
-rw-r--r--synapse/handlers/federation.py13
-rw-r--r--synapse/handlers/federation_event.py2
-rw-r--r--synapse/handlers/message.py1
-rw-r--r--synapse/metrics/__init__.py4
-rw-r--r--synapse/module_api/__init__.py2
-rw-r--r--synapse/rest/admin/__init__.py5
-rw-r--r--synapse/rest/admin/devices.py2
-rw-r--r--synapse/rest/client/devices.py4
-rw-r--r--synapse/rest/client/logout.py4
-rw-r--r--synapse/rest/client/relations.py13
-rw-r--r--synapse/state/v2.py57
-rw-r--r--synapse/storage/controllers/persist_events.py5
-rw-r--r--synapse/storage/controllers/state.py3
-rw-r--r--synapse/storage/databases/main/devices.py10
-rw-r--r--synapse/storage/databases/main/events.py2
-rw-r--r--synapse/storage/databases/main/receipts.py5
-rw-r--r--synapse/storage/databases/main/room.py2
-rw-r--r--synapse/storage/databases/main/state.py8
-rw-r--r--synapse/storage/engines/__init__.py38
-rw-r--r--synapse/storage/engines/postgres.py24
-rw-r--r--synapse/storage/prepare_database.py3
-rw-r--r--synapse/storage/state.py1
-rw-r--r--synapse/util/__init__.py6
-rw-r--r--tests/federation/transport/server/test__base.py10
-rw-r--r--tests/handlers/test_device.py4
-rw-r--r--tests/handlers/test_room_summary.py2
-rw-r--r--tests/http/server/_base.py580
-rw-r--r--tests/http/test_servlet.py10
-rw-r--r--tests/replication/http/test__base.py10
-rw-r--r--tests/rest/client/test_relations.py1
-rw-r--r--tests/rest/client/test_rooms.py100
-rw-r--r--tests/state/test_v2.py125
-rw-r--r--tests/test_server.py14
98 files changed, 1645 insertions, 770 deletions
diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.md b/.github/ISSUE_TEMPLATE/BUG_REPORT.md
deleted file mode 100644
index 978b699886..0000000000
--- a/.github/ISSUE_TEMPLATE/BUG_REPORT.md
+++ /dev/null
@@ -1,72 +0,0 @@
----
-name: Bug report
-about: Create a report to help us improve
-
----
-
-<!--
-
-**THIS IS NOT A SUPPORT CHANNEL!**
-**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**,
-please ask in **#synapse:matrix.org** (using a matrix.org account if necessary)
-
-If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
-
-This is a bug report template. By following the instructions below and
-filling out the sections with your information, you will help the us to get all
-the necessary data to fix your issue.
-
-You can also preview your report before submitting it. You may remove sections
-that aren't relevant to your particular case.
-
-Text between <!-- and --​> marks will be invisible in the report.
-
--->
-
-### Description
-
-<!-- Describe here the problem that you are experiencing -->
-
-### Steps to reproduce
-
-- list the steps
-- that reproduce the bug
-- using hyphens as bullet points
-
-<!--
-Describe how what happens differs from what you expected.
-
-If you can identify any relevant log snippets from _homeserver.log_, please include
-those (please be careful to remove any personal or private data). Please surround them with
-``` (three backticks, on a line on their own), so that they are formatted legibly.
--->
-
-### Version information
-
-<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
-
-<!-- Was this issue identified on matrix.org or another homeserver? -->
-- **Homeserver**:
-
-If not matrix.org:
-
-<!--
- What version of Synapse is running?
-
-You can find the Synapse version with this command:
-
-$ curl http://localhost:8008/_synapse/admin/v1/server_version
-
-(You may need to replace `localhost:8008` if Synapse is not configured to
-listen on that port.)
--->
-- **Version**:
-
-- **Install method**:
-<!-- examples: package manager/git clone/pip  -->
-
-- **Platform**:
-<!--
-Tell us about the environment in which your homeserver is operating
-distro, hardware, if it's running in a vm/container, etc.
--->
diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
new file mode 100644
index 0000000000..1b304198bc
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
@@ -0,0 +1,103 @@
+name: Bug report
+description: Create a report to help us improve
+body:
+  - type: markdown
+    attributes:
+      value: |
+        **THIS IS NOT A SUPPORT CHANNEL!**
+        **IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary).
+
+        If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/
+
+        This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue.
+
+        You can also preview your report before submitting it.
+  - type: textarea
+    id: description
+    attributes:
+      label: Description
+      description: Describe the problem that you are experiencing
+    validations:
+      required: true
+  - type: textarea
+    id: reproduction_steps
+    attributes:
+      label: Steps to reproduce
+      description: |
+        Describe the series of steps that leads you to the problem.
+
+        Describe how what happens differs from what you expected.
+      placeholder: Tell us what you see!
+      value: |
+        - list the steps
+        - that reproduce the bug
+        - using hyphens as bullet points
+    validations:
+      required: true
+  - type: markdown
+    attributes:
+      value: |
+        ---
+
+        **IMPORTANT**: please answer the following questions, to help us narrow down the problem.
+  - type: input
+    id: homeserver
+    attributes:
+      label: Homeserver
+      description: Which homeserver was this issue identified on? (matrix.org, another homeserver, etc)
+    validations:
+      required: true
+  - type: input
+    id: version
+    attributes:
+      label: Synapse Version
+      description: |
+        What version of Synapse is this homeserver running?
+
+        You can find the Synapse version by visiting https://yourserver.example.com/_matrix/federation/v1/version
+
+        or with this command:
+
+        ```
+        $ curl http://localhost:8008/_synapse/admin/v1/server_version
+        ```
+
+        (You may need to replace `localhost:8008` if Synapse is not configured to listen on that port.)
+    validations:
+      required: true
+  - type: dropdown
+    id: install_method
+    attributes:
+      label: Installation Method
+      options:
+        - Docker (matrixdotorg/synapse)
+        - Debian packages from packages.matrix.org
+        - pip (from PyPI)
+        - Other (please mention below)
+  - type: textarea
+    id: platform
+    attributes:
+      label: Platform
+      description: |
+        Tell us about the environment in which your homeserver is operating...
+        e.g. distro, hardware, if it's running in a vm/container, etc.
+    validations:
+      required: true
+  - type: textarea
+    id: logs
+    attributes:
+      label: Relevant log output
+      description: |
+        Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
+        This will be automatically formatted into code, so there is no need for backticks.
+
+        Please be careful to remove any personal or private data.
+
+        **Bug reports are usually very difficult to diagnose without logging.**
+      render: shell
+    validations:
+      required: true
+  - type: textarea
+    id: anything_else
+    attributes:
+      label: Anything else that would be useful to know?
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 83ab727378..0b70ffc643 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -310,6 +310,16 @@ jobs:
     needs: linting-done
     runs-on: ubuntu-latest
 
+    strategy:
+      fail-fast: false
+      matrix:
+        include:
+          - arrangement: monolith
+            database: SQLite
+
+          - arrangement: monolith
+            database: Postgres
+
     steps:
       # The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
       # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
@@ -337,7 +347,7 @@ jobs:
 
       - run: |
           set -o pipefail
-          COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
+          POSTGRES=${{ (matrix.database == 'Postgres') && 1 }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
         shell: bash
         name: Run Complement Tests
 
diff --git a/changelog.d/12674.misc b/changelog.d/12674.misc
new file mode 100644
index 0000000000..c8a8f32f0a
--- /dev/null
+++ b/changelog.d/12674.misc
@@ -0,0 +1 @@
+Add tests for cancellation of `GET /rooms/$room_id/members` and `GET /rooms/$room_id/state` requests.
diff --git a/changelog.d/12737.doc b/changelog.d/12737.doc
new file mode 100644
index 0000000000..ab2d1f2fd9
--- /dev/null
+++ b/changelog.d/12737.doc
@@ -0,0 +1 @@
+Add documentation for how to configure Synapse with Workers using Docker Compose. Includes example worker config and docker-compose.yaml. Contributed by @Thumbscrew.
\ No newline at end of file
diff --git a/changelog.d/12738.misc b/changelog.d/12738.misc
new file mode 100644
index 0000000000..8252223475
--- /dev/null
+++ b/changelog.d/12738.misc
@@ -0,0 +1 @@
+Report login failures due to unknown third party identifiers in the same way as failures due to invalid passwords. This prevents an attacker from using the error response to determine if the identifier exists. Contributed by Daniel Aloni.
\ No newline at end of file
diff --git a/changelog.d/12881.misc b/changelog.d/12881.misc
new file mode 100644
index 0000000000..8a83182bd4
--- /dev/null
+++ b/changelog.d/12881.misc
@@ -0,0 +1 @@
+Merge the Complement testing Docker images into a single, multi-purpose image.
\ No newline at end of file
diff --git a/changelog.d/12929.misc b/changelog.d/12929.misc
new file mode 100644
index 0000000000..20718d258d
--- /dev/null
+++ b/changelog.d/12929.misc
@@ -0,0 +1 @@
+Clean up the test code for client disconnection.
diff --git a/changelog.d/12957.misc b/changelog.d/12957.misc
new file mode 100644
index 0000000000..0c075276ec
--- /dev/null
+++ b/changelog.d/12957.misc
@@ -0,0 +1 @@
+Use lower isolation level when inserting read receipts to avoid serialization errors. Contributed by Nick @ Beeper.
diff --git a/changelog.d/12963.misc b/changelog.d/12963.misc
new file mode 100644
index 0000000000..d57e1aca6b
--- /dev/null
+++ b/changelog.d/12963.misc
@@ -0,0 +1 @@
+Reduce the amount of state we pull from the DB.
diff --git a/changelog.d/12965.misc b/changelog.d/12965.misc
new file mode 100644
index 0000000000..cc2823e12b
--- /dev/null
+++ b/changelog.d/12965.misc
@@ -0,0 +1 @@
+Enable testing against PostgreSQL databases in Complement CI.
\ No newline at end of file
diff --git a/changelog.d/12969.misc b/changelog.d/12969.misc
new file mode 100644
index 0000000000..05de7ce839
--- /dev/null
+++ b/changelog.d/12969.misc
@@ -0,0 +1 @@
+Fix an inaccurate comment.
diff --git a/changelog.d/12970.misc b/changelog.d/12970.misc
new file mode 100644
index 0000000000..8f874aa07b
--- /dev/null
+++ b/changelog.d/12970.misc
@@ -0,0 +1 @@
+Remove the `delete_device` method and always call `delete_devices`.
diff --git a/changelog.d/12973.bugfix b/changelog.d/12973.bugfix
new file mode 100644
index 0000000000..1bf45854ff
--- /dev/null
+++ b/changelog.d/12973.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable.
diff --git a/changelog.d/12979.bugfix b/changelog.d/12979.bugfix
new file mode 100644
index 0000000000..6b54408025
--- /dev/null
+++ b/changelog.d/12979.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available.
diff --git a/changelog.d/12982.misc b/changelog.d/12982.misc
new file mode 100644
index 0000000000..036b69efe6
--- /dev/null
+++ b/changelog.d/12982.misc
@@ -0,0 +1 @@
+Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template.
\ No newline at end of file
diff --git a/changelog.d/12984.misc b/changelog.d/12984.misc
new file mode 100644
index 0000000000..a902017180
--- /dev/null
+++ b/changelog.d/12984.misc
@@ -0,0 +1 @@
+Move [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) behind an experimental config flag.
diff --git a/changelog.d/12985.misc b/changelog.d/12985.misc
new file mode 100644
index 0000000000..d5ab9eedea
--- /dev/null
+++ b/changelog.d/12985.misc
@@ -0,0 +1 @@
+Add type annotations to `tests.state.test_v2`.
diff --git a/changelog.d/12990.misc b/changelog.d/12990.misc
new file mode 100644
index 0000000000..c68f6a731e
--- /dev/null
+++ b/changelog.d/12990.misc
@@ -0,0 +1 @@
+Fix documentation for running complement tests.
diff --git a/changelog.d/12991.bugfix b/changelog.d/12991.bugfix
new file mode 100644
index 0000000000..c6e388d5b9
--- /dev/null
+++ b/changelog.d/12991.bugfix
@@ -0,0 +1,2 @@
+Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced 
+in Synapse v1.41.0.
diff --git a/changelog.d/13004.misc b/changelog.d/13004.misc
new file mode 100644
index 0000000000..d8e93d87af
--- /dev/null
+++ b/changelog.d/13004.misc
@@ -0,0 +1 @@
+Faster joins: add issue links to the TODO comments in the code.
diff --git a/changelog.d/13013.misc b/changelog.d/13013.misc
new file mode 100644
index 0000000000..903c6a3c8a
--- /dev/null
+++ b/changelog.d/13013.misc
@@ -0,0 +1 @@
+Modernize the `contrib/graph/` scripts.
diff --git a/changelog.d/13022.doc b/changelog.d/13022.doc
new file mode 100644
index 0000000000..4d6ac7ae94
--- /dev/null
+++ b/changelog.d/13022.doc
@@ -0,0 +1 @@
+Ensure the [Poetry cheat sheet](https://matrix-org.github.io/synapse/develop/development/dependencies.html) is available in the online documentation.
diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md
new file mode 100644
index 0000000000..4dbfee2853
--- /dev/null
+++ b/contrib/docker_compose_workers/README.md
@@ -0,0 +1,125 @@
+# Setting up Synapse with Workers using Docker Compose
+
+This directory describes how deploy and manage Synapse and workers via [Docker Compose](https://docs.docker.com/compose/).
+
+Example worker configuration files can be found [here](workers).
+
+All examples and snippets assume that your Synapse service is called `synapse` in your Docker Compose file.
+
+An example Docker Compose file can be found [here](docker-compose.yaml).
+
+## Worker Service Examples in Docker Compose
+
+In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`).
+
+### Generic Worker Example
+
+```yaml
+synapse-generic-worker-1:
+  image: matrixdotorg/synapse:latest
+  container_name: synapse-generic-worker-1
+  restart: unless-stopped
+  entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
+  healthcheck:
+    test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
+    start_period: "5s"
+    interval: "15s"
+    timeout: "5s"
+  volumes:
+    - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
+  environment:
+    SYNAPSE_WORKER: synapse.app.generic_worker
+  # Expose port if required so your reverse proxy can send requests to this worker
+  # Port configuration will depend on how the http listener is defined in the worker configuration file
+  ports:
+    - 8081:8081
+  depends_on:
+    - synapse
+```
+
+### Federation Sender Example
+
+Please note: The federation sender does not receive REST API calls so no exposed ports are required.
+
+```yaml
+synapse-federation-sender-1:
+  image: matrixdotorg/synapse:latest
+  container_name: synapse-federation-sender-1
+  restart: unless-stopped
+  entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
+  healthcheck:
+    disable: true
+  volumes:
+    - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
+  environment:
+    SYNAPSE_WORKER: synapse.app.federation_sender
+  depends_on:
+    - synapse
+```
+
+## `homeserver.yaml` Configuration
+
+### Enable Redis
+
+Locate the `redis` section of your `homeserver.yaml` and enable and configure it:
+
+```yaml
+redis:
+  enabled: true
+  host: redis
+  port: 6379
+  # password: <secret_password>  
+```
+
+This assumes that your Redis service is called `redis` in your Docker Compose file.
+
+### Add a replication Listener
+
+Locate the `listeners` section of your `homeserver.yaml` and add the following replication listener:
+
+```yaml
+listeners:
+  # Other listeners
+
+  - port: 9093
+    type: http
+    resources:
+      - names: [replication]
+```
+
+This listener is used by the workers for replication and is referred to in worker config files using the following settings:
+
+```yaml
+worker_replication_host: synapse
+worker_replication_http_port: 9093
+```
+
+### Add Workers to `instance_map`
+
+Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers:
+
+```yaml
+instance_map:
+  synapse-generic-worker-1:        # The worker_name setting in your worker configuration file
+    host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file
+    port: 8034                     # The port assigned to the replication listener in your worker config file
+  synapse-federation-sender-1:
+    host: synapse-federation-sender-1
+    port: 8034
+```
+
+### Configure Federation Senders
+
+This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
+
+```yaml
+# This will disable federation sending on the main Synapse instance
+send_federation: false
+
+federation_sender_instances:
+  - synapse-federation-sender-1 # The worker_name setting in your federation sender worker configuration file
+```
+
+## Other Worker types
+
+Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
\ No newline at end of file
diff --git a/contrib/docker_compose_workers/docker-compose.yaml b/contrib/docker_compose_workers/docker-compose.yaml
new file mode 100644
index 0000000000..eaf02c2af9
--- /dev/null
+++ b/contrib/docker_compose_workers/docker-compose.yaml
@@ -0,0 +1,77 @@
+networks:
+  backend:
+
+services:
+  postgres:
+    image: postgres:latest
+    restart: unless-stopped
+    volumes:
+      - ${VOLUME_PATH}/var/lib/postgresql/data:/var/lib/postgresql/data:rw
+    networks:
+      - backend
+    environment:
+      POSTGRES_DB: synapse
+      POSTGRES_USER: synapse_user
+      POSTGRES_PASSWORD: postgres
+      POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=C
+
+  redis:
+    image: redis:latest
+    restart: unless-stopped
+    networks:
+      - backend
+
+  synapse:
+    image: matrixdotorg/synapse:latest
+    container_name: synapse
+    restart: unless-stopped
+    volumes:
+      - ${VOLUME_PATH}/data:/data:rw
+    ports:
+      - 8008:8008
+    networks:
+      - backend
+    environment:
+      SYNAPSE_CONFIG_DIR: /data
+      SYNAPSE_CONFIG_PATH: /data/homeserver.yaml
+    depends_on:
+      - postgres
+
+  synapse-generic-worker-1:
+    image: matrixdotorg/synapse:latest
+    container_name: synapse-generic-worker-1
+    restart: unless-stopped
+    entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"]
+    healthcheck:
+      test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"]
+      start_period: "5s"
+      interval: "15s"
+      timeout: "5s"
+    networks:
+      - backend
+    volumes:
+      - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
+    environment:
+      SYNAPSE_WORKER: synapse.app.generic_worker
+    # Expose port if required so your reverse proxy can send requests to this worker
+    # Port configuration will depend on how the http listener is defined in the worker configuration file
+    ports:
+      - 8081:8081
+    depends_on:
+      - synapse
+
+  synapse-federation-sender-1:
+    image: matrixdotorg/synapse:latest
+    container_name: synapse-federation-sender-1
+    restart: unless-stopped
+    entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"]
+    healthcheck:
+      disable: true
+    networks:
+      - backend
+    volumes:
+      - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume
+    environment:
+      SYNAPSE_WORKER: synapse.app.federation_sender
+    depends_on:
+      - synapse
diff --git a/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml
new file mode 100644
index 0000000000..5ba42a92d2
--- /dev/null
+++ b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml
@@ -0,0 +1,14 @@
+worker_app: synapse.app.federation_sender
+worker_name: synapse-federation-sender-1
+
+# The replication listener on the main synapse process.
+worker_replication_host: synapse
+worker_replication_http_port: 9093
+
+worker_listeners:
+  - type: http
+    port: 8034
+    resources:
+      - names: [replication]
+
+worker_log_config: /data/federation_sender.log.config
diff --git a/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml
new file mode 100644
index 0000000000..694584105a
--- /dev/null
+++ b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml
@@ -0,0 +1,19 @@
+worker_app: synapse.app.generic_worker
+worker_name: synapse-generic-worker-1
+
+# The replication listener on the main synapse process.
+worker_replication_host: synapse
+worker_replication_http_port: 9093
+
+worker_listeners:
+  - type: http
+    port: 8034
+    resources:
+      - names: [replication]
+  - type: http
+    port: 8081
+    x_forwarded: true
+    resources:
+      - names: [client, federation]
+
+worker_log_config: /data/worker.log.config
diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py
index fdbac087bd..3c4f47dbd2 100644
--- a/contrib/graph/graph.py
+++ b/contrib/graph/graph.py
@@ -1,11 +1,3 @@
-import argparse
-import cgi
-import datetime
-import json
-
-import pydot
-import urllib2
-
 # Copyright 2014-2016 OpenMarket Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,12 +12,25 @@ import urllib2
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import argparse
+import cgi
+import datetime
+import json
+import urllib.request
+from typing import List
+
+import pydot
+
 
-def make_name(pdu_id, origin):
-    return "%s@%s" % (pdu_id, origin)
+def make_name(pdu_id: str, origin: str) -> str:
+    return f"{pdu_id}@{origin}"
 
 
-def make_graph(pdus, room, filename_prefix):
+def make_graph(pdus: List[dict], filename_prefix: str) -> None:
+    """
+    Generate a dot and SVG file for a graph of events in the room based on the
+    topological ordering by querying a homeserver.
+    """
     pdu_map = {}
     node_map = {}
 
@@ -111,10 +116,10 @@ def make_graph(pdus, room, filename_prefix):
     graph.write_svg("%s.svg" % filename_prefix, prog="dot")
 
 
-def get_pdus(host, room):
+def get_pdus(host: str, room: str) -> List[dict]:
     transaction = json.loads(
-        urllib2.urlopen(
-            "http://%s/_matrix/federation/v1/context/%s/" % (host, room)
+        urllib.request.urlopen(
+            f"http://{host}/_matrix/federation/v1/context/{room}/"
         ).read()
     )
 
@@ -141,4 +146,4 @@ if __name__ == "__main__":
 
     pdus = get_pdus(host, room)
 
-    make_graph(pdus, room, prefix)
+    make_graph(pdus, prefix)
diff --git a/contrib/graph/graph2.py b/contrib/graph/graph2.py
index 0980231e4a..b46094ce0a 100644
--- a/contrib/graph/graph2.py
+++ b/contrib/graph/graph2.py
@@ -14,22 +14,31 @@
 
 
 import argparse
-import cgi
 import datetime
+import html
 import json
 import sqlite3
 
 import pydot
 
-from synapse.events import FrozenEvent
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.events import make_event_from_dict
 from synapse.util.frozenutils import unfreeze
 
 
-def make_graph(db_name, room_id, file_prefix, limit):
+def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None:
+    """
+    Generate a dot and SVG file for a graph of events in the room based on the
+    topological ordering by reading from a Synapse SQLite database.
+    """
     conn = sqlite3.connect(db_name)
 
+    sql = "SELECT room_version FROM rooms WHERE room_id = ?"
+    c = conn.execute(sql, (room_id,))
+    room_version = KNOWN_ROOM_VERSIONS[c.fetchone()[0]]
+
     sql = (
-        "SELECT json FROM event_json as j "
+        "SELECT json, internal_metadata FROM event_json as j "
         "INNER JOIN events as e ON e.event_id = j.event_id "
         "WHERE j.room_id = ?"
     )
@@ -43,7 +52,10 @@ def make_graph(db_name, room_id, file_prefix, limit):
 
     c = conn.execute(sql, args)
 
-    events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()]
+    events = [
+        make_event_from_dict(json.loads(e[0]), room_version, json.loads(e[1]))
+        for e in c.fetchall()
+    ]
 
     events.sort(key=lambda e: e.depth)
 
@@ -84,7 +96,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
             "name": event.event_id,
             "type": event.type,
             "state_key": event.get("state_key", None),
-            "content": cgi.escape(content, quote=True),
+            "content": html.escape(content, quote=True),
             "time": t,
             "depth": event.depth,
             "state_group": state_group,
@@ -96,11 +108,11 @@ def make_graph(db_name, room_id, file_prefix, limit):
         graph.add_node(node)
 
     for event in events:
-        for prev_id, _ in event.prev_events:
+        for prev_id in event.prev_event_ids():
             try:
                 end_node = node_map[prev_id]
             except Exception:
-                end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
+                end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
 
                 node_map[prev_id] = end_node
                 graph.add_node(end_node)
@@ -112,7 +124,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
         if len(event_ids) <= 1:
             continue
 
-        cluster = pydot.Cluster(str(group), label="<State Group: %s>" % (str(group),))
+        cluster = pydot.Cluster(str(group), label=f"<State Group: {str(group)}>")
 
         for event_id in event_ids:
             cluster.add_node(node_map[event_id])
@@ -126,7 +138,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(
         description="Generate a PDU graph for a given room by talking "
-        "to the given homeserver to get the list of PDUs. \n"
+        "to the given Synapse SQLite file to get the list of PDUs. \n"
         "Requires pydot."
     )
     parser.add_argument(
diff --git a/contrib/graph/graph3.py b/contrib/graph/graph3.py
index dd0c19368b..a28a1594c7 100644
--- a/contrib/graph/graph3.py
+++ b/contrib/graph/graph3.py
@@ -1,13 +1,3 @@
-import argparse
-import cgi
-import datetime
-
-import pydot
-import simplejson as json
-
-from synapse.events import FrozenEvent
-from synapse.util.frozenutils import unfreeze
-
 # Copyright 2016 OpenMarket Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -22,15 +12,35 @@ from synapse.util.frozenutils import unfreeze
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import argparse
+import datetime
+import html
+import json
+
+import pydot
 
-def make_graph(file_name, room_id, file_prefix, limit):
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.events import make_event_from_dict
+from synapse.util.frozenutils import unfreeze
+
+
+def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
+    """
+    Generate a dot and SVG file for a graph of events in the room based on the
+    topological ordering by reading line-delimited JSON from a file.
+    """
     print("Reading lines")
     with open(file_name) as f:
         lines = f.readlines()
 
     print("Read lines")
 
-    events = [FrozenEvent(json.loads(line)) for line in lines]
+    # Figure out the room version, assume the first line is the create event.
+    room_version = KNOWN_ROOM_VERSIONS[
+        json.loads(lines[0]).get("content", {}).get("room_version")
+    ]
+
+    events = [make_event_from_dict(json.loads(line), room_version) for line in lines]
 
     print("Loaded events.")
 
@@ -66,8 +76,8 @@ def make_graph(file_name, room_id, file_prefix, limit):
             content.append(
                 "<b>%s</b>: %s,"
                 % (
-                    cgi.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
-                    cgi.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
+                    html.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"),
+                    html.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"),
                 )
             )
 
@@ -101,11 +111,11 @@ def make_graph(file_name, room_id, file_prefix, limit):
     print("Created Nodes")
 
     for event in events:
-        for prev_id, _ in event.prev_events:
+        for prev_id in event.prev_event_ids():
             try:
                 end_node = node_map[prev_id]
             except Exception:
-                end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,))
+                end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>")
 
                 node_map[prev_id] = end_node
                 graph.add_node(end_node)
@@ -139,8 +149,7 @@ if __name__ == "__main__":
     )
     parser.add_argument("-l", "--limit", help="Only retrieve the last N events.")
     parser.add_argument("event_file")
-    parser.add_argument("room")
 
     args = parser.parse_args()
 
-    make_graph(args.event_file, args.room, args.prefix, args.limit)
+    make_graph(args.event_file, args.prefix, args.limit)
diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index 24b03585f9..83db0a95b9 100644
--- a/docker/Dockerfile-workers
+++ b/docker/Dockerfile-workers
@@ -1,5 +1,6 @@
 # Inherit from the official Synapse docker image
-FROM matrixdotorg/synapse
+ARG SYNAPSE_VERSION=latest
+FROM matrixdotorg/synapse:$SYNAPSE_VERSION
 
 # Install deps
 RUN \
diff --git a/docker/README-testing.md b/docker/README-testing.md
index c38cae7530..1f0423f09b 100644
--- a/docker/README-testing.md
+++ b/docker/README-testing.md
@@ -8,79 +8,50 @@ docker images that can be run inside Complement for testing purposes.
 
 Note that running Synapse's unit tests from within the docker image is not supported.
 
-## Testing with SQLite and single-process Synapse
+## Using the Complement launch script
 
-> Note that `scripts-dev/complement.sh` is a script that will automatically build
-> and run an SQLite-based, single-process of Synapse against Complement.
+`scripts-dev/complement.sh` is a script that will automatically build
+and run Synapse against Complement.
+Consult the [contributing guide][guideComplementSh] for instructions on how to use it.
 
-The instructions below will set up Complement testing for a single-process,
-SQLite-based Synapse deployment.
 
-Start by building the base Synapse docker image. If you wish to run tests with the latest
-release of Synapse, instead of your current checkout, you can skip this step. From the
-root of the repository:
-
-```sh
-docker build -t matrixdotorg/synapse -f docker/Dockerfile .
-```
-
-This will build an image with the tag `matrixdotorg/synapse`.
-
-Next, build the Synapse image for Complement.
+[guideComplementSh]: https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-integration-tests-complement
 
-```sh
-docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement
-```
+## Building and running the images manually
 
-This will build an image with the tag `complement-synapse`, which can be handed to
-Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
-[Complement's documentation](https://github.com/matrix-org/complement/#running) for
-how to run the tests, as well as the various available command line flags.
-
-## Testing with PostgreSQL and single or multi-process Synapse
+Under some circumstances, you may wish to build the images manually.
+The instructions below will lead you to doing that.
 
-The above docker image only supports running Synapse with SQLite and in a
-single-process topology. The following instructions are used to build a Synapse image for
-Complement that supports either single or multi-process topology with a PostgreSQL
-database backend.
-
-As with the single-process image, build the base Synapse docker image. If you wish to run
-tests with the latest release of Synapse, instead of your current checkout, you can skip
-this step. From the root of the repository:
+Start by building the base Synapse docker image. If you wish to run tests with the latest
+release of Synapse, instead of your current checkout, you can skip this step. From the
+root of the repository:
 
 ```sh
 docker build -t matrixdotorg/synapse -f docker/Dockerfile .
 ```
 
-This will build an image with the tag `matrixdotorg/synapse`.
-
-Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
-Again, from the root of the repository:
+Next, build the workerised Synapse docker image, which is a layer over the base
+image.
 
 ```sh
 docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
 ```
 
-This will build an image with the tag` matrixdotorg/synapse-workers`.
-
-It's worth noting at this point that this image is fully functional, and
-can be used for testing against locally. See instructions for using the container
-under
-[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
-below.
-
-Finally, build the Synapse image for Complement, which is based on
-`matrixdotorg/synapse-workers`.
+Finally, build the multi-purpose image for Complement, which is a layer over the workers image.
 
 ```sh
-docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement
+docker build -t complement-synapse -f docker/complement/Dockerfile docker/complement
 ```
 
-This will build an image with the tag `complement-synapse-workers`, which can be handed to
+This will build an image with the tag `complement-synapse`, which can be handed to
 Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
 [Complement's documentation](https://github.com/matrix-org/complement/#running) for
 how to run the tests, as well as the various available command line flags.
 
+See [the Complement image README](./complement/README.md) for information about the
+expected environment variables.
+
+
 ## Running the Dockerfile-worker image standalone
 
 For manual testing of a multi-process Synapse instance in Docker,
@@ -113,6 +84,9 @@ docker run -d --name synapse \
 ...substituting `POSTGRES*` variables for those that match a postgres host you have
 available (usually a running postgres docker container).
 
+
+### Workers
+
 The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
 use when running the container. All possible worker names are defined by the keys of the
 `WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the
@@ -125,8 +99,11 @@ type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES`
 (e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`).
 
 Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers
-(leaving only the main process). The container is configured to use redis-based worker
-mode.
+(leaving only the main process).
+The container will only be configured to use Redis-based worker mode if there are
+workers enabled.
+
+### Logging
 
 Logs for workers and the main process are logged to stdout and can be viewed with
 standard `docker logs` tooling. Worker logs contain their worker name
@@ -136,3 +113,21 @@ Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be writ
 `<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
 00, according to the container's clock. Logging for the main process must still be
 configured by modifying the homeserver's log config in your Synapse data volume.
+
+
+### Application Services
+
+Setting the `SYNAPSE_AS_REGISTRATION_DIR` environment variable to the path of
+a directory (within the container) will cause the configuration script to scan
+that directory for `.yaml`/`.yml` registration files.
+Synapse will be configured to load these configuration files.
+
+
+### TLS Termination
+
+Nginx is present in the image to route requests to the appropriate workers,
+but it does not serve TLS by default.
+
+You can configure `SYNAPSE_TLS_CERT` and `SYNAPSE_TLS_KEY` to point to a
+TLS certificate and key (respectively), both in PEM (textual) format.
+In this case, Nginx will additionally serve using HTTPS on port 8448.
diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile
index 4823ce7364..50684c956d 100644
--- a/docker/complement/Dockerfile
+++ b/docker/complement/Dockerfile
@@ -1,22 +1,45 @@
-# A dockerfile which builds an image suitable for testing Synapse under
-# complement.
-
+# This dockerfile builds on top of 'docker/Dockerfile-workers' in matrix-org/synapse
+# by including a built-in postgres instance, as well as setting up the homeserver so
+# that it is ready for testing via Complement.
+#
+# Instructions for building this image from those it depends on is detailed in this guide:
+# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
 ARG SYNAPSE_VERSION=latest
+FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
+
+# Install postgresql
+RUN apt-get update && \
+  DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
+
+# Configure a user and create a database for Synapse
+RUN pg_ctlcluster 13 main start &&  su postgres -c "echo \
+ \"ALTER USER postgres PASSWORD 'somesecret'; \
+ CREATE DATABASE synapse \
+  ENCODING 'UTF8' \
+  LC_COLLATE='C' \
+  LC_CTYPE='C' \
+  template=template0;\" | psql" && pg_ctlcluster 13 main stop
+
+# Extend the shared homeserver config to disable rate-limiting,
+# set Complement's static shared secret, enable registration, amongst other
+# tweaks to get Synapse ready for testing.
+# To do this, we copy the old template out of the way and then include it
+# with Jinja2.
+RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2
+COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2
 
-FROM matrixdotorg/synapse:${SYNAPSE_VERSION}
-
-ENV SERVER_NAME=localhost
-
-COPY conf/* /conf/
+WORKDIR /data
 
-# generate a signing key
-RUN generate_signing_key -o /conf/server.signing.key
+COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
 
-WORKDIR /data
+# Copy the entrypoint
+COPY conf/start_for_complement.sh /
 
+# Expose nginx's listener ports
 EXPOSE 8008 8448
 
-ENTRYPOINT ["/conf/start.sh"]
+ENTRYPOINT ["/start_for_complement.sh"]
 
+# Update the healthcheck to have a shorter check interval
 HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
-    CMD curl -fSs http://localhost:8008/health || exit 1
+    CMD /bin/sh /healthcheck.sh
diff --git a/docker/complement/README.md b/docker/complement/README.md
index e075418e4a..37c39e2dfc 100644
--- a/docker/complement/README.md
+++ b/docker/complement/README.md
@@ -1 +1,32 @@
-Stuff for building the docker image used for testing under complement.
+# Unified Complement image for Synapse
+
+This is an image for testing Synapse with [the *Complement* integration test suite][complement].
+It contains some insecure defaults that are only suitable for testing purposes,
+so **please don't use this image for a production server**.
+
+This multi-purpose image is built on top of `Dockerfile-workers` in the parent directory
+and can be switched using environment variables between the following configurations:
+
+- Monolithic Synapse with SQLite (`SYNAPSE_COMPLEMENT_DATABASE=sqlite`)
+- Monolithic Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres`)
+- Workerised Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres` and `SYNAPSE_COMPLEMENT_USE_WORKERS=true`)
+
+The image is self-contained; it contains an integrated Postgres, Redis and Nginx.
+
+
+## How to get Complement to pass the environment variables through
+
+To pass these environment variables, use [Complement's `COMPLEMENT_SHARE_ENV_PREFIX`][complementEnv]
+variable to configure an environment prefix to pass through, then prefix the above options
+with that prefix.
+
+Example:
+```
+COMPLEMENT_SHARE_ENV_PREFIX=PASS_ PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
+```
+
+Consult `scripts-dev/complement.sh` in the repository root for a real example.
+
+
+[complement]: https://github.com/matrix-org/complement
+[complementEnv]: https://github.com/matrix-org/complement/pull/382
diff --git a/docker/complement/SynapseWorkers.Dockerfile b/docker/complement/SynapseWorkers.Dockerfile
deleted file mode 100644
index 99a09cbc2b..0000000000
--- a/docker/complement/SynapseWorkers.Dockerfile
+++ /dev/null
@@ -1,40 +0,0 @@
-# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse
-# by including a built-in postgres instance, as well as setting up the homeserver so
-# that it is ready for testing via Complement.
-#
-# Instructions for building this image from those it depends on is detailed in this guide:
-# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
-FROM matrixdotorg/synapse-workers
-
-# Install postgresql
-RUN apt-get update && \
-  DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
-
-# Configure a user and create a database for Synapse
-RUN pg_ctlcluster 13 main start &&  su postgres -c "echo \
- \"ALTER USER postgres PASSWORD 'somesecret'; \
- CREATE DATABASE synapse \
-  ENCODING 'UTF8' \
-  LC_COLLATE='C' \
-  LC_CTYPE='C' \
-  template=template0;\" | psql" && pg_ctlcluster 13 main stop
-
-# Modify the shared homeserver config with postgres support, certificate setup
-# and the disabling of rate-limiting
-COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
-
-WORKDIR /data
-
-COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
-
-# Copy the entrypoint
-COPY conf-workers/start-complement-synapse-workers.sh /
-
-# Expose nginx's listener ports
-EXPOSE 8008 8448
-
-ENTRYPOINT ["/start-complement-synapse-workers.sh"]
-
-# Update the healthcheck to have a shorter check interval
-HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
-    CMD /bin/sh /healthcheck.sh
diff --git a/docker/complement/conf-workers/start-complement-synapse-workers.sh b/docker/complement/conf-workers/start-complement-synapse-workers.sh
deleted file mode 100755
index b7e2444000..0000000000
--- a/docker/complement/conf-workers/start-complement-synapse-workers.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/bash
-#
-# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
-
-set -e
-
-function log {
-    d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
-    echo "$d $@"
-}
-
-# Set the server name of the homeserver
-export SYNAPSE_SERVER_NAME=${SERVER_NAME}
-
-# No need to report stats here
-export SYNAPSE_REPORT_STATS=no
-
-# Set postgres authentication details which will be placed in the homeserver config file
-export POSTGRES_PASSWORD=somesecret
-export POSTGRES_USER=postgres
-export POSTGRES_HOST=localhost
-
-# Specify the workers to test with
-export SYNAPSE_WORKER_TYPES="\
-    event_persister, \
-    event_persister, \
-    background_worker, \
-    frontend_proxy, \
-    event_creator, \
-    user_dir, \
-    media_repository, \
-    federation_inbound, \
-    federation_reader, \
-    federation_sender, \
-    synchrotron, \
-    appservice, \
-    pusher"
-
-# Add Complement's appservice registration directory, if there is one
-# (It can be absent when there are no application services in this test!)
-if [ -d /complement/appservice ]; then
-    export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice
-fi
-
-# Generate a TLS key, then generate a certificate by having Complement's CA sign it
-# Note that both the key and certificate are in PEM format (not DER).
-openssl genrsa -out /conf/server.tls.key 2048
-
-openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-  -subj "/CN=${SERVER_NAME}"
-
-openssl x509 -req -in /conf/server.tls.csr \
-  -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-  -out /conf/server.tls.crt
-
-export SYNAPSE_TLS_CERT=/conf/server.tls.crt
-export SYNAPSE_TLS_KEY=/conf/server.tls.key
-
-# Run the script that writes the necessary config files and starts supervisord, which in turn
-# starts everything else
-exec /configure_workers_and_start.py
diff --git a/docker/complement/conf/homeserver.yaml b/docker/complement/conf/homeserver.yaml
deleted file mode 100644
index e2be540bbb..0000000000
--- a/docker/complement/conf/homeserver.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-## Server ##
-
-server_name: SERVER_NAME
-log_config: /conf/log_config.yaml
-report_stats: False
-signing_key_path: /conf/server.signing.key
-trusted_key_servers: []
-enable_registration: true
-enable_registration_without_verification: true
-
-## Listeners ##
-
-tls_certificate_path: /conf/server.tls.crt
-tls_private_key_path: /conf/server.tls.key
-bcrypt_rounds: 4
-registration_shared_secret: complement
-
-listeners:
-  - port: 8448
-    bind_addresses: ['::']
-    type: http
-    tls: true
-    resources:
-      - names: [federation]
-
-  - port: 8008
-    bind_addresses: ['::']
-    type: http
-
-    resources:
-      - names: [client]
-
-## Database ##
-
-database:
-  name: "sqlite3"
-  args:
-    # We avoid /data, as it is a volume and is not transferred when the container is committed,
-    # which is a fundamental necessity in complement.
-    database: "/conf/homeserver.db"
-
-## Federation ##
-
-# trust certs signed by the complement CA
-federation_custom_ca_list:
-- /complement/ca/ca.crt
-
-# unblacklist RFC1918 addresses
-ip_range_blacklist: []
-
-# Disable server rate-limiting
-rc_federation:
-  window_size: 1000
-  sleep_limit: 10
-  sleep_delay: 500
-  reject_limit: 99999
-  concurrent: 3
-
-rc_message:
-  per_second: 9999
-  burst_count: 9999
-
-rc_registration:
-  per_second: 9999
-  burst_count: 9999
-
-rc_login:
-  address:
-    per_second: 9999
-    burst_count: 9999
-  account:
-    per_second: 9999
-    burst_count: 9999
-  failed_attempts:
-    per_second: 9999
-    burst_count: 9999
-
-rc_admin_redaction:
-  per_second: 9999
-  burst_count: 9999
-
-rc_joins:
-  local:
-    per_second: 9999
-    burst_count: 9999
-  remote:
-    per_second: 9999
-    burst_count: 9999
-
-rc_3pid_validation:
-  per_second: 1000
-  burst_count: 1000
-
-rc_invites:
-  per_room:
-    per_second: 1000
-    burst_count: 1000
-  per_user:
-    per_second: 1000
-    burst_count: 1000
-
-federation_rr_transactions_per_room_per_second: 9999
-
-## API Configuration ##
-
-# A list of application service config files to use
-#
-app_service_config_files:
-AS_REGISTRATION_FILES  
-
-## Experimental Features ##
-
-experimental_features:
-  # Enable spaces support
-  spaces_enabled: true
-  # Enable history backfilling support
-  msc2716_enabled: true
-  # server-side support for partial state in /send_join responses
-  msc3706_enabled: true
-  # client-side support for partial state in /send_join responses
-  faster_joins: true
-  # Enable jump to date endpoint
-  msc3030_enabled: true
-
-server_notices:
-  system_mxid_localpart: _server
-  system_mxid_display_name: "Server Alert"
-  system_mxid_avatar_url: ""
-  room_name: "Server Alert"
diff --git a/docker/complement/conf/log_config.yaml b/docker/complement/conf/log_config.yaml
deleted file mode 100644
index c33fd6cd00..0000000000
--- a/docker/complement/conf/log_config.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-version: 1
-
-formatters:
-  precise:
-   format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
-
-filters:
-  context:
-    (): synapse.logging.context.LoggingContextFilter
-    request: ""
-
-handlers:
-  console:
-    class: logging.StreamHandler
-    formatter: precise
-    filters: [context]
-    # log to stdout, for easier use with 'docker logs'
-    stream: 'ext://sys.stdout'
-
-root:
-    level: INFO
-    handlers: [console]
-
-disable_existing_loggers: false
diff --git a/docker/complement/conf-workers/postgres.supervisord.conf b/docker/complement/conf/postgres.supervisord.conf
index 5608342d1a..5dae3e6330 100644
--- a/docker/complement/conf-workers/postgres.supervisord.conf
+++ b/docker/complement/conf/postgres.supervisord.conf
@@ -1,6 +1,9 @@
 [program:postgres]
 command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground
 
+# Only start if START_POSTGRES=1
+autostart=%(ENV_START_POSTGRES)s
+
 # Lower priority number = starts first
 priority=1
 
diff --git a/docker/complement/conf/start.sh b/docker/complement/conf/start.sh
deleted file mode 100755
index 5d8d0fe016..0000000000
--- a/docker/complement/conf/start.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-
-set -e
-
-sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml
-
-# Add the application service registration files to the homeserver.yaml config
-for filename in /complement/appservice/*.yaml; do
-  [ -f "$filename" ] || break
-
-  as_id=$(basename "$filename" .yaml)
-
-  # Insert the path to the registration file and the AS_REGISTRATION_FILES marker after 
-  # so we can add the next application service in the next iteration of this for loop
-  sed -i "s/AS_REGISTRATION_FILES/  - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml
-done
-# Remove the AS_REGISTRATION_FILES entry
-sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml
-
-# generate an ssl key and cert for the server, signed by the complement CA
-openssl genrsa -out /conf/server.tls.key 2048
-
-openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
-  -subj "/CN=${SERVER_NAME}"
-openssl x509 -req -in /conf/server.tls.csr \
-  -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
-  -out /conf/server.tls.crt
-
-exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@"
-
diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh
new file mode 100755
index 0000000000..b9c97ab687
--- /dev/null
+++ b/docker/complement/conf/start_for_complement.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+#
+# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
+
+set -e
+
+echo "Complement Synapse launcher"
+echo "  Args: $@"
+echo "  Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS"
+
+function log {
+    d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
+    echo "$d $@"
+}
+
+# Set the server name of the homeserver
+export SYNAPSE_SERVER_NAME=${SERVER_NAME}
+
+# No need to report stats here
+export SYNAPSE_REPORT_STATS=no
+
+
+case "$SYNAPSE_COMPLEMENT_DATABASE" in
+  postgres)
+    # Set postgres authentication details which will be placed in the homeserver config file
+    export POSTGRES_PASSWORD=somesecret
+    export POSTGRES_USER=postgres
+    export POSTGRES_HOST=localhost
+
+    # configure supervisord to start postgres
+    export START_POSTGRES=true
+    ;;
+
+  sqlite)
+    # Configure supervisord not to start Postgres, as we don't need it
+    export START_POSTGRES=false
+    ;;
+
+  *)
+    echo "Unknown Synapse database: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE" >&2
+    exit 1
+    ;;
+esac
+
+
+if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
+  # Specify the workers to test with
+  export SYNAPSE_WORKER_TYPES="\
+      event_persister, \
+      event_persister, \
+      background_worker, \
+      frontend_proxy, \
+      event_creator, \
+      user_dir, \
+      media_repository, \
+      federation_inbound, \
+      federation_reader, \
+      federation_sender, \
+      synchrotron, \
+      appservice, \
+      pusher"
+else
+  # Empty string here means 'main process only'
+  export SYNAPSE_WORKER_TYPES=""
+fi
+
+
+# Add Complement's appservice registration directory, if there is one
+# (It can be absent when there are no application services in this test!)
+if [ -d /complement/appservice ]; then
+    export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice
+fi
+
+# Generate a TLS key, then generate a certificate by having Complement's CA sign it
+# Note that both the key and certificate are in PEM format (not DER).
+openssl genrsa -out /conf/server.tls.key 2048
+
+openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
+  -subj "/CN=${SERVER_NAME}"
+
+openssl x509 -req -in /conf/server.tls.csr \
+  -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
+  -out /conf/server.tls.crt
+
+export SYNAPSE_TLS_CERT=/conf/server.tls.crt
+export SYNAPSE_TLS_KEY=/conf/server.tls.key
+
+# Run the script that writes the necessary config files and starts supervisord, which in turn
+# starts everything else
+exec /configure_workers_and_start.py
diff --git a/docker/complement/conf-workers/workers-shared.yaml b/docker/complement/conf/workers-shared-extra.yaml.j2
index cd7b50c65c..a5b1b6bb8b 100644
--- a/docker/complement/conf-workers/workers-shared.yaml
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -1,3 +1,11 @@
+{#
+  This file extends the default 'shared' configuration file (from the 'synapse-workers'
+  docker image) with Complement-specific  tweak.
+
+  The base configuration is moved out of the default path to `shared-orig.yaml.j2`
+  in the Complement Dockerfile and below we include that original file.
+#}
+
 ## Server ##
 report_stats: False
 trusted_key_servers: []
@@ -76,10 +84,16 @@ federation_rr_transactions_per_room_per_second: 9999
 ## Experimental Features ##
 
 experimental_features:
-  # Enable history backfilling support
-  msc2716_enabled: true
   # Enable spaces support
   spaces_enabled: true
+  # Enable history backfilling support
+  msc2716_enabled: true
+  # server-side support for partial state in /send_join responses
+  msc3706_enabled: true
+  {% if not workers_in_use %}
+  # client-side support for partial state in /send_join responses
+  faster_joins: true
+  {% endif %}
   # Enable jump to date endpoint
   msc3030_enabled: true
 
@@ -88,3 +102,5 @@ server_notices:
   system_mxid_display_name: "Server Alert"
   system_mxid_avatar_url: ""
   room_name: "Server Alert"
+
+{% include "shared-orig.yaml.j2" %}
diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2
index 644ed788f3..92d25386dc 100644
--- a/docker/conf-workers/shared.yaml.j2
+++ b/docker/conf-workers/shared.yaml.j2
@@ -3,8 +3,10 @@
 # configure_workers_and_start.py uses and amends to this file depending on the workers
 # that have been selected.
 
+{% if enable_redis %}
 redis:
     enabled: true
+{% endif %}
 
 {% if appservice_registrations is not none %}
 ## Application Services ##
diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2
index ca1f7aef8e..7afab05133 100644
--- a/docker/conf-workers/supervisord.conf.j2
+++ b/docker/conf-workers/supervisord.conf.j2
@@ -28,6 +28,9 @@ stderr_logfile_maxbytes=0
 username=redis
 autorestart=true
 
+# Redis can be disabled if the image is being used without workers
+autostart={{ enable_redis }}
+
 [program:synapse_main]
 command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
 priority=10
@@ -41,4 +44,4 @@ autorestart=unexpected
 exitcodes=0
 
 # Additional process blocks
-{{ worker_config }}
\ No newline at end of file
+{{ worker_config }}
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index f7dac90222..64697e0354 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -37,8 +37,8 @@ import sys
 from pathlib import Path
 from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
 
-import jinja2
 import yaml
+from jinja2 import Environment, FileSystemLoader
 
 MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
 
@@ -236,12 +236,13 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
         template_vars: The arguments to replace placeholder variables in the template with.
     """
     # Read the template file
-    with open(src) as infile:
-        template = infile.read()
+    # We disable autoescape to prevent template variables from being escaped,
+    # as we're not using HTML.
+    env = Environment(loader=FileSystemLoader(os.path.dirname(src)), autoescape=False)
+    template = env.get_template(os.path.basename(src))
 
-    # Generate a string from the template. We disable autoescape to prevent template
-    # variables from being escaped.
-    rendered = jinja2.Template(template, autoescape=False).render(**template_vars)
+    # Generate a string from the template.
+    rendered = template.render(**template_vars)
 
     # Write the generated contents to a file
     #
@@ -378,8 +379,8 @@ def generate_worker_files(
     nginx_locations = {}
 
     # Read the desired worker configuration from the environment
-    worker_types_env = environ.get("SYNAPSE_WORKER_TYPES")
-    if worker_types_env is None:
+    worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
+    if not worker_types_env:
         # No workers, just the main process
         worker_types = []
     else:
@@ -506,12 +507,16 @@ def generate_worker_files(
             if reg_path.suffix.lower() in (".yaml", ".yml")
         ]
 
+    workers_in_use = len(worker_types) > 0
+
     # Shared homeserver config
     convert(
         "/conf/shared.yaml.j2",
         "/conf/workers/shared.yaml",
         shared_worker_config=yaml.dump(shared_config),
         appservice_registrations=appservice_registrations,
+        enable_redis=workers_in_use,
+        workers_in_use=workers_in_use,
     )
 
     # Nginx config
@@ -531,6 +536,7 @@ def generate_worker_files(
         "/etc/supervisor/supervisord.conf",
         main_config_path=config_path,
         worker_config=supervisord_config,
+        enable_redis=workers_in_use,
     )
 
     # healthcheck config
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 8400a6539a..d7cf2df112 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -88,6 +88,7 @@
   - [OpenTracing](opentracing.md)
   - [Database Schemas](development/database_schema.md)
   - [Experimental features](development/experimental_features.md)
+  - [Dependency management](development/dependencies.md)
   - [Synapse Architecture]()
     - [Cancellation](development/synapse_architecture/cancellation.md)
     - [Log Contexts](log_contexts.md)
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 2b3714df66..c2f04a3905 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -304,6 +304,11 @@ To run a specific test, you can specify the whole name structure:
 COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order
 ```
 
+The above will run a monolithic (single-process) Synapse with SQLite as the database. For other configurations, try:
+
+- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
+- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
+
 
 ### Access database for homeserver after Complement test runs.
 
diff --git a/mypy.ini b/mypy.ini
index fe3e3f9b8e..7973f2ac01 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -56,7 +56,6 @@ exclude = (?x)
    |tests/rest/media/v1/test_media_storage.py
    |tests/server.py
    |tests/server_notices/test_resource_limits_server_notices.py
-   |tests/state/test_v2.py
    |tests/test_metrics.py
    |tests/test_server.py
    |tests/test_state.py
@@ -115,6 +114,9 @@ disallow_untyped_defs = False
 [mypy-tests.handlers.test_user_directory]
 disallow_untyped_defs = True
 
+[mypy-tests.state.test_profile]
+disallow_untyped_defs = True
+
 [mypy-tests.storage.test_profile]
 disallow_untyped_defs = True
 
diff --git a/poetry.lock b/poetry.lock
index 7c561e3182..8a54a939fe 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -524,7 +524,7 @@ python-versions = ">=3.7"
 
 [[package]]
 name = "matrix-common"
-version = "1.1.0"
+version = "1.2.1"
 description = "Common utilities for Synapse, Sydent and Sygnal"
 category = "main"
 optional = false
@@ -535,7 +535,7 @@ attrs = "*"
 importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""}
 
 [package.extras]
-dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==21.9b0)", "flake8 (==4.0.1)", "isort (==5.9.3)"]
+dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "build (==0.8.0)", "twine (==4.0.1)"]
 test = ["tox", "twisted", "aiounittest"]
 
 [[package]]
@@ -1563,7 +1563,7 @@ url_preview = ["lxml"]
 [metadata]
 lock-version = "1.1"
 python-versions = "^3.7.1"
-content-hash = "539e5326f401472d1ffc8325d53d72e544cd70156b3f43f32f1285c4c131f831"
+content-hash = "c1bb4dabba1e87517e25ca7bf778e8082fbc960a51d83819aec3a154110a374f"
 
 [metadata.files]
 attrs = [
@@ -2042,8 +2042,8 @@ markupsafe = [
     {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"},
 ]
 matrix-common = [
-    {file = "matrix_common-1.1.0-py3-none-any.whl", hash = "sha256:5d6dfd777503b2f3a031b566e6af25b6e95f9c0818ef57d954c3190fce5eb407"},
-    {file = "matrix_common-1.1.0.tar.gz", hash = "sha256:a8238748afc2b37079818367fed5156f355771b07c8ff0a175934f47e0ff3276"},
+    {file = "matrix_common-1.2.1-py3-none-any.whl", hash = "sha256:946709c405944a0d4b1d73207b77eb064b6dbfc5d70a69471320b06d8ce98b20"},
+    {file = "matrix_common-1.2.1.tar.gz", hash = "sha256:a99dcf02a6bd95b24a5a61b354888a2ac92bf2b4b839c727b8dd9da2cdfa3853"},
 ]
 matrix-synapse-ldap3 = [
     {file = "matrix-synapse-ldap3-0.2.0.tar.gz", hash = "sha256:91a0715b43a41ec3033244174fca20846836da98fda711fb01687f7199eecd2e"},
diff --git a/pyproject.toml b/pyproject.toml
index 1934174b82..fde6a4f424 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -150,7 +150,7 @@ typing-extensions = ">=3.10.0.1"
 cryptography = ">=3.4.7"
 # ijson 3.1.4 fixes a bug with "." in property names
 ijson = ">=3.1.4"
-matrix-common = "~=1.1.0"
+matrix-common = "~=1.2.1"
 # We need packaging.requirements.Requirement, added in 16.1.
 packaging = ">=16.1"
 # At the time of writing, we only use functions from the version `importlib.metadata`
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index 3c472c576e..30b974b954 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -18,7 +18,7 @@
 # argument to the script. Complement will then only run those tests. If
 # no regex is supplied, all tests are run. For example;
 #
-# ./complement.sh "TestOutboundFederation(Profile|Send)"
+# ./complement.sh -run "TestOutboundFederation(Profile|Send)"
 #
 
 # Exit if a line returns a non-zero exit code
@@ -43,17 +43,29 @@ fi
 # Build the base Synapse image from the local checkout
 docker build -t matrixdotorg/synapse -f "docker/Dockerfile" .
 
+# Build the workers docker image (from the base Synapse image we just built).
+docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
+
+# Build the unified Complement image (from the worker Synapse image we just built).
+docker build -t complement-synapse \
+  -f "docker/complement/Dockerfile" "docker/complement"
+
+export COMPLEMENT_BASE_IMAGE=complement-synapse
+
 extra_test_args=()
 
 test_tags="synapse_blacklist,msc2716,msc3030,msc3787"
 
-# If we're using workers, modify the docker files slightly.
+# All environment variables starting with PASS_ will be shared.
+# (The prefix is stripped off before reaching the container.)
+export COMPLEMENT_SHARE_ENV_PREFIX=PASS_
+
 if [[ -n "$WORKERS" ]]; then
-  # Build the workers docker image (from the base Synapse image).
-  docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
+  # Use workers.
+  export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
 
-  export COMPLEMENT_BASE_IMAGE=complement-synapse-workers
-  COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
+  # Workers can only use Postgres as a database.
+  export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
 
   # And provide some more configuration to complement.
 
@@ -65,17 +77,18 @@ if [[ -n "$WORKERS" ]]; then
   # ... and it takes longer than 10m to run the whole suite.
   extra_test_args+=("-timeout=60m")
 else
-  export COMPLEMENT_BASE_IMAGE=complement-synapse
-  COMPLEMENT_DOCKERFILE=Dockerfile
+  export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=
+  if [[ -n "$POSTGRES" ]]; then
+    export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
+  else
+    export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
+  fi
 
   # We only test faster room joins on monoliths, because they are purposefully
   # being developed without worker support to start with.
   test_tags="$test_tags,faster_joins"
 fi
 
-# Build the Complement image from the Synapse image we just built.
-docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERFILE" "docker/complement"
-
 # Run the tests!
 echo "Images built; running complement"
 cd "$COMPLEMENT_DIR"
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 1613941759..b1369aca8f 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -20,8 +20,6 @@ import json
 import os
 import sys
 
-from matrix_common.versionstring import get_distribution_version_string
-
 # Check that we're not running on an unsupported Python version.
 if sys.version_info < (3, 7):
     print("Synapse requires Python 3.7 or above.")
@@ -70,7 +68,9 @@ try:
 except ImportError:
     pass
 
-__version__ = get_distribution_version_string("matrix-synapse")
+import synapse.util
+
+__version__ = synapse.util.SYNAPSE_VERSION
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index 361b51d2fa..c753dfa7cb 100755
--- a/synapse/_scripts/synapse_port_db.py
+++ b/synapse/_scripts/synapse_port_db.py
@@ -40,7 +40,6 @@ from typing import (
 )
 
 import yaml
-from matrix_common.versionstring import get_distribution_version_string
 from typing_extensions import TypedDict
 
 from twisted.internet import defer, reactor as reactor_
@@ -84,7 +83,7 @@ from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStor
 from synapse.storage.engines import create_engine
 from synapse.storage.prepare_database import prepare_database
 from synapse.types import ISynapseReactor
-from synapse.util import Clock
+from synapse.util import SYNAPSE_VERSION, Clock
 
 # Cast safety: Twisted does some naughty magic which replaces the
 # twisted.internet.reactor module with a Reactor instance at runtime.
@@ -258,9 +257,7 @@ class MockHomeserver:
         self.clock = Clock(reactor)
         self.config = config
         self.hostname = config.server.server_name
-        self.version_string = "Synapse/" + get_distribution_version_string(
-            "matrix-synapse"
-        )
+        self.version_string = SYNAPSE_VERSION
 
     def get_clock(self) -> Clock:
         return self.clock
diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py
index c443522c05..b4aeae6dd5 100755
--- a/synapse/_scripts/update_synapse_database.py
+++ b/synapse/_scripts/update_synapse_database.py
@@ -19,7 +19,6 @@ import sys
 from typing import cast
 
 import yaml
-from matrix_common.versionstring import get_distribution_version_string
 
 from twisted.internet import defer, reactor as reactor_
 
@@ -28,6 +27,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.server import HomeServer
 from synapse.storage import DataStore
 from synapse.types import ISynapseReactor
+from synapse.util import SYNAPSE_VERSION
 
 # Cast safety: Twisted does some naughty magic which replaces the
 # twisted.internet.reactor module with a Reactor instance at runtime.
@@ -43,8 +43,7 @@ class MockHomeserver(HomeServer):
             hostname=config.server.server_name,
             config=config,
             reactor=reactor,
-            version_string="Synapse/"
-            + get_distribution_version_string("matrix-synapse"),
+            version_string=f"Synapse/{SYNAPSE_VERSION}",
         )
 
 
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index a3446ac6e8..84e389a6cd 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -37,7 +37,6 @@ from typing import (
 )
 
 from cryptography.utils import CryptographyDeprecationWarning
-from matrix_common.versionstring import get_distribution_version_string
 from typing_extensions import ParamSpec
 
 import twisted
@@ -68,6 +67,7 @@ from synapse.metrics import install_gc_manager, register_threadpool
 from synapse.metrics.background_process_metrics import wrap_as_background_process
 from synapse.metrics.jemalloc import setup_jemalloc_stats
 from synapse.types import ISynapseReactor
+from synapse.util import SYNAPSE_VERSION
 from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
 from synapse.util.daemonize import daemonize_process
 from synapse.util.gai_resolver import GAIResolver
@@ -540,7 +540,7 @@ def setup_sentry(hs: "HomeServer") -> None:
 
     sentry_sdk.init(
         dsn=hs.config.metrics.sentry_dsn,
-        release=get_distribution_version_string("matrix-synapse"),
+        release=SYNAPSE_VERSION,
     )
 
     # We set some default tags that give some context to this instance
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 6fedf681f8..561621a285 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -19,8 +19,6 @@ import sys
 import tempfile
 from typing import List, Optional
 
-from matrix_common.versionstring import get_distribution_version_string
-
 from twisted.internet import defer, task
 
 import synapse
@@ -43,6 +41,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
 from synapse.server import HomeServer
 from synapse.storage.databases.main.room import RoomWorkerStore
 from synapse.types import StateMap
+from synapse.util import SYNAPSE_VERSION
 from synapse.util.logcontext import LoggingContext
 
 logger = logging.getLogger("synapse.app.admin_cmd")
@@ -220,7 +219,7 @@ def start(config_options: List[str]) -> None:
     ss = AdminCmdServer(
         config.server.server_name,
         config=config,
-        version_string="Synapse/" + get_distribution_version_string("matrix-synapse"),
+        version_string=f"Synapse/{SYNAPSE_VERSION}",
     )
 
     setup_logging(ss, config, use_worker_options=True)
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 89f8998f0e..4a987fb759 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -16,8 +16,6 @@ import logging
 import sys
 from typing import Dict, List, Optional, Tuple
 
-from matrix_common.versionstring import get_distribution_version_string
-
 from twisted.internet import address
 from twisted.web.resource import Resource
 
@@ -121,6 +119,7 @@ from synapse.storage.databases.main.transactions import TransactionWorkerStore
 from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
 from synapse.storage.databases.main.user_directory import UserDirectoryStore
 from synapse.types import JsonDict
+from synapse.util import SYNAPSE_VERSION
 from synapse.util.httpresourcetree import create_resource_tree
 
 logger = logging.getLogger("synapse.app.generic_worker")
@@ -447,7 +446,7 @@ def start(config_options: List[str]) -> None:
     hs = GenericWorkerServer(
         config.server.server_name,
         config=config,
-        version_string="Synapse/" + get_distribution_version_string("matrix-synapse"),
+        version_string=f"Synapse/{SYNAPSE_VERSION}",
     )
 
     setup_logging(hs, config, use_worker_options=True)
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 4c6c0658ab..745e704141 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -18,8 +18,6 @@ import os
 import sys
 from typing import Dict, Iterable, List
 
-from matrix_common.versionstring import get_distribution_version_string
-
 from twisted.internet.tcp import Port
 from twisted.web.resource import EncodingResourceWrapper, Resource
 from twisted.web.server import GzipEncoderFactory
@@ -69,7 +67,7 @@ from synapse.rest.synapse.client import build_synapse_client_resource_tree
 from synapse.rest.well_known import well_known_resource
 from synapse.server import HomeServer
 from synapse.storage import DataStore
-from synapse.util.check_dependencies import check_requirements
+from synapse.util.check_dependencies import VERSION, check_requirements
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.module_loader import load_module
 
@@ -371,7 +369,7 @@ def setup(config_options: List[str]) -> SynapseHomeServer:
     hs = SynapseHomeServer(
         config.server.server_name,
         config=config,
-        version_string="Synapse/" + get_distribution_version_string("matrix-synapse"),
+        version_string=f"Synapse/{VERSION}",
     )
 
     synapse.config.logger.setup_logging(hs, config, use_worker_options=False)
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index f2dfd49b07..0a285dba31 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -84,3 +84,6 @@ class ExperimentalConfig(Config):
 
         # MSC3772: A push rule for mutual relations.
         self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False)
+
+        # MSC3715: dir param on /relations.
+        self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False)
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 470b8b4492..82a5b5fa12 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -22,7 +22,6 @@ from string import Template
 from typing import TYPE_CHECKING, Any, Dict, Optional
 
 import yaml
-from matrix_common.versionstring import get_distribution_version_string
 from zope.interface import implementer
 
 from twisted.logger import (
@@ -37,6 +36,7 @@ from synapse.logging.context import LoggingContextFilter
 from synapse.logging.filter import MetadataFilter
 from synapse.types import JsonDict
 
+from ..util import SYNAPSE_VERSION
 from ._base import Config, ConfigError
 
 if TYPE_CHECKING:
@@ -349,7 +349,7 @@ def setup_logging(
     logging.warning(
         "Server %s version %s",
         sys.argv[0],
-        get_distribution_version_string("matrix-synapse"),
+        SYNAPSE_VERSION,
     )
     logging.info("Server hostname: %s", config.server.server_name)
     logging.info("Instance name: %s", hs.get_instance_name())
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index ad475a913b..66e6305562 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -1642,10 +1642,6 @@ def _validate_hierarchy_event(d: JsonDict) -> None:
     if not isinstance(event_type, str):
         raise ValueError("Invalid event: 'event_type' must be a str")
 
-    room_id = d.get("room_id")
-    if not isinstance(room_id, str):
-        raise ValueError("Invalid event: 'room_id' must be a str")
-
     state_key = d.get("state_key")
     if not isinstance(state_key, str):
         raise ValueError("Invalid event: 'state_key' must be a str")
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index 333ca9a97f..41d8b937af 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -37,6 +37,7 @@ from synapse.metrics import sent_transactions_counter
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.types import ReadReceipt
 from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
+from synapse.visibility import filter_events_for_server
 
 if TYPE_CHECKING:
     import synapse.server
@@ -77,6 +78,7 @@ class PerDestinationQueue:
     ):
         self._server_name = hs.hostname
         self._clock = hs.get_clock()
+        self._storage_controllers = hs.get_storage_controllers()
         self._store = hs.get_datastores().main
         self._transaction_manager = transaction_manager
         self._instance_name = hs.get_instance_name()
@@ -442,6 +444,12 @@ class PerDestinationQueue:
                     "This should not happen." % event_ids
                 )
 
+            logger.info(
+                "Catching up destination %s with %d PDUs",
+                self._destination,
+                len(catchup_pdus),
+            )
+
             # We send transactions with events from one room only, as its likely
             # that the remote will have to do additional processing, which may
             # take some time. It's better to give it small amounts of work
@@ -487,19 +495,20 @@ class PerDestinationQueue:
                         ):
                             continue
 
-                        # Filter out events where the server is not in the room,
-                        # e.g. it may have left/been kicked. *Ideally* we'd pull
-                        # out the kick and send that, but it's a rare edge case
-                        # so we don't bother for now (the server that sent the
-                        # kick should send it out if its online).
-                        hosts = await self._state.get_hosts_in_room_at_events(
-                            p.room_id, [p.event_id]
-                        )
-                        if self._destination not in hosts:
-                            continue
-
                         new_pdus.append(p)
 
+                    # Filter out events where the server is not in the room,
+                    # e.g. it may have left/been kicked. *Ideally* we'd pull
+                    # out the kick and send that, but it's a rare edge case
+                    # so we don't bother for now (the server that sent the
+                    # kick should send it out if its online).
+                    new_pdus = await filter_events_for_server(
+                        self._storage_controllers,
+                        self._destination,
+                        new_pdus,
+                        redact=False,
+                    )
+
                     # If we've filtered out all the extremities, fall back to
                     # sending the original event. This should ensure that the
                     # server gets at least some of missed events (especially if
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index 7dfb890661..f7884bfbe0 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -24,7 +24,6 @@ from typing import (
     Union,
 )
 
-from matrix_common.versionstring import get_distribution_version_string
 from typing_extensions import Literal
 
 from synapse.api.constants import EduTypes
@@ -42,6 +41,7 @@ from synapse.http.servlet import (
     parse_strings_from_args,
 )
 from synapse.types import JsonDict
+from synapse.util import SYNAPSE_VERSION
 from synapse.util.ratelimitutils import FederationRateLimiter
 
 if TYPE_CHECKING:
@@ -622,7 +622,7 @@ class FederationVersionServlet(BaseFederationServlet):
             {
                 "server": {
                     "name": "Synapse",
-                    "version": get_distribution_version_string("matrix-synapse"),
+                    "version": SYNAPSE_VERSION,
                 }
             },
         )
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index fbafbbee6b..6e15028b0a 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -81,6 +81,8 @@ if TYPE_CHECKING:
 
 logger = logging.getLogger(__name__)
 
+INVALID_USERNAME_OR_PASSWORD = "Invalid username or password"
+
 
 def convert_client_dict_legacy_fields_to_identifier(
     submission: JsonDict,
@@ -1215,7 +1217,9 @@ class AuthHandler:
                     await self._failed_login_attempts_ratelimiter.can_do_action(
                         None, (medium, address)
                     )
-                raise LoginError(403, "", errcode=Codes.FORBIDDEN)
+                raise LoginError(
+                    403, msg=INVALID_USERNAME_OR_PASSWORD, errcode=Codes.FORBIDDEN
+                )
 
             identifier_dict = {"type": "m.id.user", "user": user_id}
 
@@ -1341,7 +1345,7 @@ class AuthHandler:
 
         # We raise a 403 here, but note that if we're doing user-interactive
         # login, it turns all LoginErrors into a 401 anyway.
-        raise LoginError(403, "Invalid password", errcode=Codes.FORBIDDEN)
+        raise LoginError(403, msg=INVALID_USERNAME_OR_PASSWORD, errcode=Codes.FORBIDDEN)
 
     async def check_password_provider_3pid(
         self, medium: str, address: str, password: str
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index a0cbeedc30..b79c551703 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -398,35 +398,6 @@ class DeviceHandler(DeviceWorkerHandler):
             await self.delete_devices(user_id, user_devices)
 
     @trace
-    async def delete_device(self, user_id: str, device_id: str) -> None:
-        """Delete the given device
-
-        Args:
-            user_id: The user to delete the device from.
-            device_id: The device to delete.
-        """
-
-        try:
-            await self.store.delete_device(user_id, device_id)
-        except errors.StoreError as e:
-            if e.code == 404:
-                # no match
-                set_tag("error", True)
-                log_kv(
-                    {"reason": "User doesn't have device id.", "device_id": device_id}
-                )
-            else:
-                raise
-
-        await self._auth_handler.delete_access_tokens_for_user(
-            user_id, device_id=device_id
-        )
-
-        await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id)
-
-        await self.notify_device_update(user_id, [device_id])
-
-    @trace
     async def delete_all_devices_for_user(
         self, user_id: str, except_device_id: Optional[str] = None
     ) -> None:
@@ -591,7 +562,7 @@ class DeviceHandler(DeviceWorkerHandler):
             user_id, device_id, device_data
         )
         if old_device_id is not None:
-            await self.delete_device(user_id, old_device_id)
+            await self.delete_devices(user_id, [old_device_id])
         return device_id
 
     async def get_dehydrated_device(
@@ -638,7 +609,7 @@ class DeviceHandler(DeviceWorkerHandler):
         await self.store.update_device(user_id, device_id, old_device["display_name"])
         # can't call self.delete_device because that will clobber the
         # access token so call the storage layer directly
-        await self.store.delete_device(user_id, old_device_id)
+        await self.store.delete_devices(user_id, [old_device_id])
         await self.store.delete_e2e_keys_by_device(
             user_id=user_id, device_id=old_device_id
         )
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 6310f0ef27..1e5694244a 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -545,6 +545,7 @@ class FederationHandler:
             if ret.partial_state:
                 # TODO(faster_joins): roll this back if we don't manage to start the
                 #   background resync (eg process_remote_join fails)
+                #   https://github.com/matrix-org/synapse/issues/12998
                 await self.store.store_partial_state_room(room_id, ret.servers_in_room)
 
             max_stream_id = await self._federation_event_handler.process_remote_join(
@@ -1498,14 +1499,17 @@ class FederationHandler:
         # TODO(faster_joins): do we need to lock to avoid races? What happens if other
         #   worker processes kick off a resync in parallel? Perhaps we should just elect
         #   a single worker to do the resync.
+        #   https://github.com/matrix-org/synapse/issues/12994
         #
         # TODO(faster_joins): what happens if we leave the room during a resync? if we
         #   really leave, that might mean we have difficulty getting the room state over
         #   federation.
+        #   https://github.com/matrix-org/synapse/issues/12802
         #
         # TODO(faster_joins): we need some way of prioritising which homeservers in
         #   `other_destinations` to try first, otherwise we'll spend ages trying dead
         #   homeservers for large rooms.
+        #   https://github.com/matrix-org/synapse/issues/12999
 
         if initial_destination is None and len(other_destinations) == 0:
             raise ValueError(
@@ -1535,9 +1539,11 @@ class FederationHandler:
                 # all the events are updated, so we can update current state and
                 # clear the lazy-loading flag.
                 logger.info("Updating current state for %s", room_id)
+                # TODO(faster_joins): support workers
+                #   https://github.com/matrix-org/synapse/issues/12994
                 assert (
                     self._storage_controllers.persistence is not None
-                ), "TODO(faster_joins): support for workers"
+                ), "worker-mode deployments not currently supported here"
                 await self._storage_controllers.persistence.update_current_state(
                     room_id
                 )
@@ -1551,6 +1557,8 @@ class FederationHandler:
                     )
 
                     # TODO(faster_joins) update room stats and user directory?
+                    #   https://github.com/matrix-org/synapse/issues/12814
+                    #   https://github.com/matrix-org/synapse/issues/12815
                     return
 
                 # we raced against more events arriving with partial state. Go round
@@ -1558,6 +1566,8 @@ class FederationHandler:
                 # TODO(faster_joins): there is still a race here, whereby incoming events which raced
                 #   with us will fail to be persisted after the call to `clear_partial_state_room` due to
                 #   having partial state.
+                #   https://github.com/matrix-org/synapse/issues/12988
+                #
                 continue
 
             events = await self.store.get_events_as_list(
@@ -1580,6 +1590,7 @@ class FederationHandler:
                             #   indefinitely is also not the right thing to do if we can
                             #   reach all homeservers and they all claim they don't have
                             #   the state we want.
+                            #   https://github.com/matrix-org/synapse/issues/13000
                             logger.error(
                                 "Failed to get state for %s at %s from %s because %s, "
                                 "giving up!",
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 9488fef297..6c9e6a00b5 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -532,6 +532,7 @@ class FederationEventHandler:
                 #
                 # TODO(faster_joins): we probably need to be more intelligent, and
                 #    exclude partial-state prev_events from consideration
+                #    https://github.com/matrix-org/synapse/issues/13001
                 logger.warning(
                     "%s still has partial state: can't de-partial-state it yet",
                     event.event_id,
@@ -777,6 +778,7 @@ class FederationEventHandler:
             state_ids = await self._resolve_state_at_missing_prevs(origin, event)
             # TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does
             #   not return partial state
+            #   https://github.com/matrix-org/synapse/issues/13002
 
             await self._process_received_pdu(
                 origin, event, state_ids=state_ids, backfilled=backfilled
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index c8bbcfd8c2..9b17939163 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -1102,6 +1102,7 @@ class EventCreationHandler:
             #
             # TODO(faster_joins): figure out how this works, and make sure that the
             #   old state is complete.
+            #   https://github.com/matrix-org/synapse/issues/13003
             metadata = await self.store.get_metadata_for_events(state_event_ids)
 
             state_map_for_event: MutableStateMap[str] = {}
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index fffd83546c..496fce2ecc 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -35,7 +35,6 @@ from typing import (
 )
 
 import attr
-from matrix_common.versionstring import get_distribution_version_string
 from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram, Metric
 from prometheus_client.core import (
     REGISTRY,
@@ -54,6 +53,7 @@ from synapse.metrics._exposition import (
 )
 from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
 from synapse.metrics._types import Collector
+from synapse.util import SYNAPSE_VERSION
 
 logger = logging.getLogger(__name__)
 
@@ -419,7 +419,7 @@ build_info = Gauge(
 )
 build_info.labels(
     " ".join([platform.python_implementation(), platform.python_version()]),
-    get_distribution_version_string("matrix-synapse"),
+    SYNAPSE_VERSION,
     " ".join([platform.system(), platform.release()]),
 ).set(1)
 
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index a8ad575fcd..30b2aeffdd 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -799,7 +799,7 @@ class ModuleApi:
         if device_id:
             # delete the device, which will also delete its access tokens
             yield defer.ensureDeferred(
-                self._hs.get_device_handler().delete_device(user_id, device_id)
+                self._hs.get_device_handler().delete_devices(user_id, [device_id])
             )
         else:
             # no associated device. Just delete the access token.
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 1aa08f8d95..fa3266720b 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -20,8 +20,6 @@ import platform
 from http import HTTPStatus
 from typing import TYPE_CHECKING, Optional, Tuple
 
-from matrix_common.versionstring import get_distribution_version_string
-
 from synapse.api.errors import Codes, NotFoundError, SynapseError
 from synapse.http.server import HttpServer, JsonResource
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
@@ -88,6 +86,7 @@ from synapse.rest.admin.users import (
     WhoisRestServlet,
 )
 from synapse.types import JsonDict, RoomStreamToken
+from synapse.util import SYNAPSE_VERSION
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -100,7 +99,7 @@ class VersionServlet(RestServlet):
 
     def __init__(self, hs: "HomeServer"):
         self.res = {
-            "server_version": get_distribution_version_string("matrix-synapse"),
+            "server_version": SYNAPSE_VERSION,
             "python_version": platform.python_version(),
         }
 
diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py
index cef46ba0dd..d934880102 100644
--- a/synapse/rest/admin/devices.py
+++ b/synapse/rest/admin/devices.py
@@ -80,7 +80,7 @@ class DeviceRestServlet(RestServlet):
         if u is None:
             raise NotFoundError("Unknown user")
 
-        await self.device_handler.delete_device(target_user.to_string(), device_id)
+        await self.device_handler.delete_devices(target_user.to_string(), [device_id])
         return HTTPStatus.OK, {}
 
     async def on_PUT(
diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py
index ad6fd6492b..6fab102437 100644
--- a/synapse/rest/client/devices.py
+++ b/synapse/rest/client/devices.py
@@ -147,7 +147,9 @@ class DeviceRestServlet(RestServlet):
             can_skip_ui_auth=True,
         )
 
-        await self.device_handler.delete_device(requester.user.to_string(), device_id)
+        await self.device_handler.delete_devices(
+            requester.user.to_string(), [device_id]
+        )
         return 200, {}
 
     async def on_PUT(
diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py
index 193a6951b9..23dfa4518f 100644
--- a/synapse/rest/client/logout.py
+++ b/synapse/rest/client/logout.py
@@ -45,8 +45,8 @@ class LogoutRestServlet(RestServlet):
             access_token = self.auth.get_access_token_from_request(request)
             await self._auth_handler.delete_access_token(access_token)
         else:
-            await self._device_handler.delete_device(
-                requester.user.to_string(), requester.device_id
+            await self._device_handler.delete_devices(
+                requester.user.to_string(), [requester.device_id]
             )
 
         return 200, {}
diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py
index 3cae6d2b55..ce97080013 100644
--- a/synapse/rest/client/relations.py
+++ b/synapse/rest/client/relations.py
@@ -43,6 +43,7 @@ class RelationPaginationServlet(RestServlet):
         self.auth = hs.get_auth()
         self.store = hs.get_datastores().main
         self._relations_handler = hs.get_relations_handler()
+        self._msc3715_enabled = hs.config.experimental.msc3715_enabled
 
     async def on_GET(
         self,
@@ -55,9 +56,15 @@ class RelationPaginationServlet(RestServlet):
         requester = await self.auth.get_user_by_req(request, allow_guest=True)
 
         limit = parse_integer(request, "limit", default=5)
-        direction = parse_string(
-            request, "org.matrix.msc3715.dir", default="b", allowed_values=["f", "b"]
-        )
+        if self._msc3715_enabled:
+            direction = parse_string(
+                request,
+                "org.matrix.msc3715.dir",
+                default="b",
+                allowed_values=["f", "b"],
+            )
+        else:
+            direction = "b"
         from_token_str = parse_string(request, "from")
         to_token_str = parse_string(request, "to")
 
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 041ccac59e..6a16f38a15 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -17,12 +17,14 @@ import itertools
 import logging
 from typing import (
     Any,
+    Awaitable,
     Callable,
     Collection,
     Dict,
     Generator,
     Iterable,
     List,
+    Mapping,
     Optional,
     Sequence,
     Set,
@@ -30,33 +32,58 @@ from typing import (
     overload,
 )
 
-from typing_extensions import Literal
+from typing_extensions import Literal, Protocol
 
-import synapse.state
 from synapse import event_auth
 from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError
 from synapse.api.room_versions import RoomVersion
 from synapse.events import EventBase
 from synapse.types import MutableStateMap, StateMap
-from synapse.util import Clock
 
 logger = logging.getLogger(__name__)
 
 
+class Clock(Protocol):
+    # This is usually synapse.util.Clock, but it's replaced with a FakeClock in tests.
+    # We only ever sleep(0) though, so that other async functions can make forward
+    # progress without waiting for stateres to complete.
+    def sleep(self, duration_ms: float) -> Awaitable[None]:
+        ...
+
+
+class StateResolutionStore(Protocol):
+    # This is usually synapse.state.StateResolutionStore, but it's replaced with a
+    # TestStateResolutionStore in tests.
+    def get_events(
+        self, event_ids: Collection[str], allow_rejected: bool = False
+    ) -> Awaitable[Dict[str, EventBase]]:
+        ...
+
+    def get_auth_chain_difference(
+        self, room_id: str, state_sets: List[Set[str]]
+    ) -> Awaitable[Set[str]]:
+        ...
+
+
 # We want to await to the reactor occasionally during state res when dealing
 # with large data sets, so that we don't exhaust the reactor. This is done by
 # awaiting to reactor during loops every N iterations.
 _AWAIT_AFTER_ITERATIONS = 100
 
 
+__all__ = [
+    "resolve_events_with_store",
+]
+
+
 async def resolve_events_with_store(
     clock: Clock,
     room_id: str,
     room_version: RoomVersion,
     state_sets: Sequence[StateMap[str]],
     event_map: Optional[Dict[str, EventBase]],
-    state_res_store: "synapse.state.StateResolutionStore",
+    state_res_store: StateResolutionStore,
 ) -> StateMap[str]:
     """Resolves the state using the v2 state resolution algorithm
 
@@ -194,7 +221,7 @@ async def _get_power_level_for_sender(
     room_id: str,
     event_id: str,
     event_map: Dict[str, EventBase],
-    state_res_store: "synapse.state.StateResolutionStore",
+    state_res_store: StateResolutionStore,
 ) -> int:
     """Return the power level of the sender of the given event according to
     their auth events.
@@ -243,9 +270,9 @@ async def _get_power_level_for_sender(
 
 async def _get_auth_chain_difference(
     room_id: str,
-    state_sets: Sequence[StateMap[str]],
+    state_sets: Sequence[Mapping[Any, str]],
     event_map: Dict[str, EventBase],
-    state_res_store: "synapse.state.StateResolutionStore",
+    state_res_store: StateResolutionStore,
 ) -> Set[str]:
     """Compare the auth chains of each state set and return the set of events
     that only appear in some but not all of the auth chains.
@@ -406,7 +433,7 @@ async def _add_event_and_auth_chain_to_graph(
     room_id: str,
     event_id: str,
     event_map: Dict[str, EventBase],
-    state_res_store: "synapse.state.StateResolutionStore",
+    state_res_store: StateResolutionStore,
     auth_diff: Set[str],
 ) -> None:
     """Helper function for _reverse_topological_power_sort that add the event
@@ -440,7 +467,7 @@ async def _reverse_topological_power_sort(
     room_id: str,
     event_ids: Iterable[str],
     event_map: Dict[str, EventBase],
-    state_res_store: "synapse.state.StateResolutionStore",
+    state_res_store: StateResolutionStore,
     auth_diff: Set[str],
 ) -> List[str]:
     """Returns a list of the event_ids sorted by reverse topological ordering,
@@ -501,7 +528,7 @@ async def _iterative_auth_checks(
     event_ids: List[str],
     base_state: StateMap[str],
     event_map: Dict[str, EventBase],
-    state_res_store: "synapse.state.StateResolutionStore",
+    state_res_store: StateResolutionStore,
 ) -> MutableStateMap[str]:
     """Sequentially apply auth checks to each event in given list, updating the
     state as it goes along.
@@ -569,7 +596,7 @@ async def _mainline_sort(
     event_ids: List[str],
     resolved_power_event_id: Optional[str],
     event_map: Dict[str, EventBase],
-    state_res_store: "synapse.state.StateResolutionStore",
+    state_res_store: StateResolutionStore,
 ) -> List[str]:
     """Returns a sorted list of event_ids sorted by mainline ordering based on
     the given event resolved_power_event_id
@@ -638,7 +665,7 @@ async def _get_mainline_depth_for_event(
     event: EventBase,
     mainline_map: Dict[str, int],
     event_map: Dict[str, EventBase],
-    state_res_store: "synapse.state.StateResolutionStore",
+    state_res_store: StateResolutionStore,
 ) -> int:
     """Get the mainline depths for the given event based on the mainline map
 
@@ -682,7 +709,7 @@ async def _get_event(
     room_id: str,
     event_id: str,
     event_map: Dict[str, EventBase],
-    state_res_store: "synapse.state.StateResolutionStore",
+    state_res_store: StateResolutionStore,
     allow_none: Literal[False] = False,
 ) -> EventBase:
     ...
@@ -693,7 +720,7 @@ async def _get_event(
     room_id: str,
     event_id: str,
     event_map: Dict[str, EventBase],
-    state_res_store: "synapse.state.StateResolutionStore",
+    state_res_store: StateResolutionStore,
     allow_none: Literal[True],
 ) -> Optional[EventBase]:
     ...
@@ -703,7 +730,7 @@ async def _get_event(
     room_id: str,
     event_id: str,
     event_map: Dict[str, EventBase],
-    state_res_store: "synapse.state.StateResolutionStore",
+    state_res_store: StateResolutionStore,
     allow_none: bool = False,
 ) -> Optional[EventBase]:
     """Helper function to look up event in event_map, falling back to looking
diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py
index 4caaa81808..4bcb99d06e 100644
--- a/synapse/storage/controllers/persist_events.py
+++ b/synapse/storage/controllers/persist_events.py
@@ -388,10 +388,13 @@ class EventsPersistenceStorageController:
 
         # TODO(faster_joins): get a real stream ordering, to make this work correctly
         #    across workers.
+        #    https://github.com/matrix-org/synapse/issues/12994
         #
         # TODO(faster_joins): this can race against event persistence, in which case we
         #    will end up with incorrect state. Perhaps we should make this a job we
-        #    farm out to the event persister, somehow.
+        #    farm out to the event persister thread, somehow.
+        #    https://github.com/matrix-org/synapse/issues/13007
+        #
         stream_id = self.main_store.get_room_max_stream_ordering()
         await self.persist_events_store.update_current_state(room_id, delta, stream_id)
 
diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py
index 3b4cdb67eb..d3a44bc876 100644
--- a/synapse/storage/controllers/state.py
+++ b/synapse/storage/controllers/state.py
@@ -452,6 +452,9 @@ class StateStorageController:
                  up to date.
         """
         # FIXME(faster_joins): what do we do here?
+        #   https://github.com/matrix-org/synapse/issues/12814
+        #   https://github.com/matrix-org/synapse/issues/12815
+        #   https://github.com/matrix-org/synapse/issues/13008
 
         return await self.stores.main.get_partial_current_state_deltas(
             prev_stream_id, max_stream_id
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index d900064c07..71e7863dd8 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -1433,16 +1433,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             )
             raise StoreError(500, "Problem storing device.")
 
-    async def delete_device(self, user_id: str, device_id: str) -> None:
-        """Delete a device and its device_inbox.
-
-        Args:
-            user_id: The ID of the user which owns the device
-            device_id: The ID of the device to delete
-        """
-
-        await self.delete_devices(user_id, [device_id])
-
     async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
         """Deletes several devices.
 
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 17e35cf63e..a8773374be 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -46,7 +46,7 @@ from synapse.storage.database import (
 )
 from synapse.storage.databases.main.events_worker import EventCacheEntry
 from synapse.storage.databases.main.search import SearchEntry
-from synapse.storage.engines.postgres import PostgresEngine
+from synapse.storage.engines import PostgresEngine
 from synapse.storage.util.id_generators import AbstractStreamIdGenerator
 from synapse.storage.util.sequence import SequenceGenerator
 from synapse.types import JsonDict, StateMap, get_domain_from_id
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index 21e954ccc1..b6106affa6 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -36,6 +36,7 @@ from synapse.storage.database import (
     LoggingTransaction,
 )
 from synapse.storage.engines import PostgresEngine
+from synapse.storage.engines._base import IsolationLevel
 from synapse.storage.util.id_generators import (
     AbstractStreamIdTracker,
     MultiWriterIdGenerator,
@@ -764,6 +765,10 @@ class ReceiptsWorkerStore(SQLBaseStore):
                 linearized_event_id,
                 data,
                 stream_id=stream_id,
+                # Read committed is actually beneficial here because we check for a receipt with
+                # greater stream order, and checking the very latest data at select time is better
+                # than the data at transaction start time.
+                isolation_level=IsolationLevel.READ_COMMITTED,
             )
 
         # If the receipt was older than the currently persisted one, nothing to do.
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 68d4fc2e64..5760d3428e 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -1112,6 +1112,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
         # this can race with incoming events, so we watch out for FK errors.
         # TODO(faster_joins): this still doesn't completely fix the race, since the persist process
         #   is not atomic. I fear we need an application-level lock.
+        #   https://github.com/matrix-org/synapse/issues/12988
         try:
             await self.db_pool.runInteraction(
                 "clear_partial_state_room", self._clear_partial_state_room_txn, room_id
@@ -1119,6 +1120,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
             return True
         except self.db_pool.engine.module.DatabaseError as e:
             # TODO(faster_joins): how do we distinguish between FK errors and other errors?
+            #   https://github.com/matrix-org/synapse/issues/12988
             logger.warning(
                 "Exception while clearing lazy partial-state-room %s, retrying: %s",
                 room_id,
diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py
index bdd00273cd..9674c4a757 100644
--- a/synapse/storage/databases/main/state.py
+++ b/synapse/storage/databases/main/state.py
@@ -127,13 +127,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             NotFoundError: if the room is unknown
         """
 
-        # First we try looking up room version from the database, but for old
-        # rooms we might not have added the room version to it yet so we fall
-        # back to previous behaviour and look in current state events.
-        #
         # We really should have an entry in the rooms table for every room we
-        # care about, but let's be a bit paranoid (at least while the background
-        # update is happening) to avoid breaking existing rooms.
+        # care about, but let's be a bit paranoid.
         room_version = self.db_pool.simple_select_one_onecol_txn(
             txn,
             table="rooms",
@@ -440,6 +435,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         )
 
         # TODO(faster_joins): need to do something about workers here
+        #   https://github.com/matrix-org/synapse/issues/12994
         txn.call_after(self.is_partial_state_event.invalidate, (event.event_id,))
         txn.call_after(
             self._get_state_group_for_event.prefill,
diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py
index f51b3d228e..a182e8a098 100644
--- a/synapse/storage/engines/__init__.py
+++ b/synapse/storage/engines/__init__.py
@@ -11,11 +11,35 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from typing import Any, Mapping
+from typing import Any, Mapping, NoReturn
 
 from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup
-from .postgres import PostgresEngine
-from .sqlite import Sqlite3Engine
+
+# The classes `PostgresEngine` and `Sqlite3Engine` must always be importable, because
+# we use `isinstance(engine, PostgresEngine)` to write different queries for postgres
+# and sqlite. But the database driver modules are both optional: they may not be
+# installed. To account for this, create dummy classes on import failure so we can
+# still run `isinstance()` checks.
+try:
+    from .postgres import PostgresEngine
+except ImportError:
+
+    class PostgresEngine(BaseDatabaseEngine):  # type: ignore[no-redef]
+        def __new__(cls, *args: object, **kwargs: object) -> NoReturn:  # type: ignore[misc]
+            raise RuntimeError(
+                f"Cannot create {cls.__name__} -- psycopg2 module is not installed"
+            )
+
+
+try:
+    from .sqlite import Sqlite3Engine
+except ImportError:
+
+    class Sqlite3Engine(BaseDatabaseEngine):  # type: ignore[no-redef]
+        def __new__(cls, *args: object, **kwargs: object) -> NoReturn:  # type: ignore[misc]
+            raise RuntimeError(
+                f"Cannot create {cls.__name__} -- sqlite3 module is not installed"
+            )
 
 
 def create_engine(database_config: Mapping[str, Any]) -> BaseDatabaseEngine:
@@ -30,4 +54,10 @@ def create_engine(database_config: Mapping[str, Any]) -> BaseDatabaseEngine:
     raise RuntimeError("Unsupported database engine '%s'" % (name,))
 
 
-__all__ = ["create_engine", "BaseDatabaseEngine", "IncorrectDatabaseSetup"]
+__all__ = [
+    "create_engine",
+    "BaseDatabaseEngine",
+    "PostgresEngine",
+    "Sqlite3Engine",
+    "IncorrectDatabaseSetup",
+]
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 391f8ed24a..517f9d5f98 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -15,6 +15,8 @@
 import logging
 from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, Tuple, cast
 
+import psycopg2.extensions
+
 from synapse.storage.engines._base import (
     BaseDatabaseEngine,
     IncorrectDatabaseSetup,
@@ -23,18 +25,14 @@ from synapse.storage.engines._base import (
 from synapse.storage.types import Cursor
 
 if TYPE_CHECKING:
-    import psycopg2  # noqa: F401
-
     from synapse.storage.database import LoggingDatabaseConnection
 
 
 logger = logging.getLogger(__name__)
 
 
-class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]):
+class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]):
     def __init__(self, database_config: Mapping[str, Any]):
-        import psycopg2.extensions
-
         super().__init__(psycopg2, database_config)
         psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
 
@@ -69,7 +67,9 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]):
         return collation, ctype
 
     def check_database(
-        self, db_conn: "psycopg2.connection", allow_outdated_version: bool = False
+        self,
+        db_conn: psycopg2.extensions.connection,
+        allow_outdated_version: bool = False,
     ) -> None:
         # Get the version of PostgreSQL that we're using. As per the psycopg2
         # docs: The number is formed by converting the major, minor, and
@@ -176,8 +176,6 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]):
         return True
 
     def is_deadlock(self, error: Exception) -> bool:
-        import psycopg2.extensions
-
         if isinstance(error, psycopg2.DatabaseError):
             # https://www.postgresql.org/docs/current/static/errcodes-appendix.html
             # "40001" serialization_failure
@@ -185,7 +183,7 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]):
             return error.pgcode in ["40001", "40P01"]
         return False
 
-    def is_connection_closed(self, conn: "psycopg2.connection") -> bool:
+    def is_connection_closed(self, conn: psycopg2.extensions.connection) -> bool:
         return bool(conn.closed)
 
     def lock_table(self, txn: Cursor, table: str) -> None:
@@ -205,18 +203,16 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]):
         else:
             return "%i.%i.%i" % (numver / 10000, (numver % 10000) / 100, numver % 100)
 
-    def in_transaction(self, conn: "psycopg2.connection") -> bool:
-        import psycopg2.extensions
-
+    def in_transaction(self, conn: psycopg2.extensions.connection) -> bool:
         return conn.status != psycopg2.extensions.STATUS_READY
 
     def attempt_to_set_autocommit(
-        self, conn: "psycopg2.connection", autocommit: bool
+        self, conn: psycopg2.extensions.connection, autocommit: bool
     ) -> None:
         return conn.set_session(autocommit=autocommit)
 
     def attempt_to_set_isolation_level(
-        self, conn: "psycopg2.connection", isolation_level: Optional[int]
+        self, conn: psycopg2.extensions.connection, isolation_level: Optional[int]
     ) -> None:
         if isolation_level is None:
             isolation_level = self.default_isolation_level
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index c33df42084..09a2b58f4c 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -23,8 +23,7 @@ from typing_extensions import Counter as CounterType
 
 from synapse.config.homeserver import HomeServerConfig
 from synapse.storage.database import LoggingDatabaseConnection
-from synapse.storage.engines import BaseDatabaseEngine
-from synapse.storage.engines.postgres import PostgresEngine
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
 from synapse.storage.schema import SCHEMA_COMPAT_VERSION, SCHEMA_VERSION
 from synapse.storage.types import Cursor
 
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 96aaffb53c..af3bab2c15 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -546,6 +546,7 @@ class StateFilter:
         #  the sender of a piece of state wasn't actually in the room, then clearly that
         #  state shouldn't have been returned.
         #  We should at least add some tests around this to see what happens.
+        #  https://github.com/matrix-org/synapse/issues/13006
 
         # if we haven't requested membership events, then it depends on the value of
         # 'include_others'
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index d8046b7553..6323d452e7 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -19,6 +19,7 @@ from typing import Any, Callable, Dict, Generator, Optional
 
 import attr
 from frozendict import frozendict
+from matrix_common.versionstring import get_distribution_version_string
 
 from twisted.internet import defer, task
 from twisted.internet.defer import Deferred
@@ -183,3 +184,8 @@ def log_failure(
     if not consumeErrors:
         return failure
     return None
+
+
+# Version string with git info. Computed here once so that we don't invoke git multiple
+# times.
+SYNAPSE_VERSION = get_distribution_version_string("matrix-synapse", __file__)
diff --git a/tests/federation/transport/server/test__base.py b/tests/federation/transport/server/test__base.py
index e63885c1c9..d33e86db4c 100644
--- a/tests/federation/transport/server/test__base.py
+++ b/tests/federation/transport/server/test__base.py
@@ -24,7 +24,7 @@ from synapse.types import JsonDict
 from synapse.util.ratelimitutils import FederationRateLimiter
 
 from tests import unittest
-from tests.http.server._base import EndpointCancellationTestHelperMixin
+from tests.http.server._base import test_disconnect
 
 
 class CancellableFederationServlet(BaseFederationServlet):
@@ -54,9 +54,7 @@ class CancellableFederationServlet(BaseFederationServlet):
         return HTTPStatus.OK, {"result": True}
 
 
-class BaseFederationServletCancellationTests(
-    unittest.FederatingHomeserverTestCase, EndpointCancellationTestHelperMixin
-):
+class BaseFederationServletCancellationTests(unittest.FederatingHomeserverTestCase):
     """Tests for `BaseFederationServlet` cancellation."""
 
     skip = "`BaseFederationServlet` does not support cancellation yet."
@@ -86,7 +84,7 @@ class BaseFederationServletCancellationTests(
         # request won't be processed.
         self.pump()
 
-        self._test_disconnect(
+        test_disconnect(
             self.reactor,
             channel,
             expect_cancellation=True,
@@ -106,7 +104,7 @@ class BaseFederationServletCancellationTests(
         # request won't be processed.
         self.pump()
 
-        self._test_disconnect(
+        test_disconnect(
             self.reactor,
             channel,
             expect_cancellation=False,
diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py
index 01ea7d2a42..b8b465d35b 100644
--- a/tests/handlers/test_device.py
+++ b/tests/handlers/test_device.py
@@ -154,7 +154,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
         self._record_users()
 
         # delete the device
-        self.get_success(self.handler.delete_device(user1, "abc"))
+        self.get_success(self.handler.delete_devices(user1, ["abc"]))
 
         # check the device was deleted
         self.get_failure(self.handler.get_device(user1, "abc"), NotFoundError)
@@ -179,7 +179,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
         )
 
         # delete the device
-        self.get_success(self.handler.delete_device(user1, "abc"))
+        self.get_success(self.handler.delete_devices(user1, ["abc"]))
 
         # check that the device_inbox was deleted
         res = self.get_success(
diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py
index 0546655690..aa650756e4 100644
--- a/tests/handlers/test_room_summary.py
+++ b/tests/handlers/test_room_summary.py
@@ -178,7 +178,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
             result_room_ids.append(result_room["room_id"])
             result_children_ids.append(
                 [
-                    (cs["room_id"], cs["state_key"])
+                    (result_room["room_id"], cs["state_key"])
                     for cs in result_room["children_state"]
                 ]
             )
diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py
index b9f1a381aa..994d8880b0 100644
--- a/tests/http/server/_base.py
+++ b/tests/http/server/_base.py
@@ -12,89 +12,543 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import inspect
+import itertools
+import logging
 from http import HTTPStatus
-from typing import Any, Callable, Optional, Union
+from typing import (
+    Any,
+    Callable,
+    ContextManager,
+    Dict,
+    List,
+    Optional,
+    Set,
+    Tuple,
+    TypeVar,
+    Union,
+)
 from unittest import mock
+from unittest.mock import Mock
 
+from twisted.internet.defer import Deferred
 from twisted.internet.error import ConnectionDone
+from twisted.python.failure import Failure
+from twisted.test.proto_helpers import MemoryReactorClock
+from twisted.web.server import Site
 
 from synapse.http.server import (
     HTTP_STATUS_REQUEST_CANCELLED,
     respond_with_html_bytes,
     respond_with_json,
 )
+from synapse.http.site import SynapseRequest
+from synapse.logging.context import LoggingContext, make_deferred_yieldable
 from synapse.types import JsonDict
 
-from tests import unittest
-from tests.server import FakeChannel, ThreadedMemoryReactorClock
+from tests.server import FakeChannel, make_request
+from tests.unittest import logcontext_clean
 
+logger = logging.getLogger(__name__)
 
-class EndpointCancellationTestHelperMixin(unittest.TestCase):
-    """Provides helper methods for testing cancellation of endpoints."""
 
-    def _test_disconnect(
-        self,
-        reactor: ThreadedMemoryReactorClock,
-        channel: FakeChannel,
-        expect_cancellation: bool,
-        expected_body: Union[bytes, JsonDict],
-        expected_code: Optional[int] = None,
-    ) -> None:
-        """Disconnects an in-flight request and checks the response.
+T = TypeVar("T")
 
-        Args:
-            reactor: The twisted reactor running the request handler.
-            channel: The `FakeChannel` for the request.
-            expect_cancellation: `True` if request processing is expected to be
-                cancelled, `False` if the request should run to completion.
-            expected_body: The expected response for the request.
-            expected_code: The expected status code for the request. Defaults to `200`
-                or `499` depending on `expect_cancellation`.
-        """
-        # Determine the expected status code.
-        if expected_code is None:
-            if expect_cancellation:
-                expected_code = HTTP_STATUS_REQUEST_CANCELLED
-            else:
-                expected_code = HTTPStatus.OK
-
-        request = channel.request
-        self.assertFalse(
-            channel.is_finished(),
+
+def test_disconnect(
+    reactor: MemoryReactorClock,
+    channel: FakeChannel,
+    expect_cancellation: bool,
+    expected_body: Union[bytes, JsonDict],
+    expected_code: Optional[int] = None,
+) -> None:
+    """Disconnects an in-flight request and checks the response.
+
+    Args:
+        reactor: The twisted reactor running the request handler.
+        channel: The `FakeChannel` for the request.
+        expect_cancellation: `True` if request processing is expected to be cancelled,
+            `False` if the request should run to completion.
+        expected_body: The expected response for the request.
+        expected_code: The expected status code for the request. Defaults to `200` or
+            `499` depending on `expect_cancellation`.
+    """
+    # Determine the expected status code.
+    if expected_code is None:
+        if expect_cancellation:
+            expected_code = HTTP_STATUS_REQUEST_CANCELLED
+        else:
+            expected_code = HTTPStatus.OK
+
+    request = channel.request
+    if channel.is_finished():
+        raise AssertionError(
             "Request finished before we could disconnect - "
-            "was `await_result=False` passed to `make_request`?",
+            "ensure `await_result=False` is passed to `make_request`.",
         )
 
-        # We're about to disconnect the request. This also disconnects the channel, so
-        # we have to rely on mocks to extract the response.
-        respond_method: Callable[..., Any]
-        if isinstance(expected_body, bytes):
-            respond_method = respond_with_html_bytes
+    # We're about to disconnect the request. This also disconnects the channel, so we
+    # have to rely on mocks to extract the response.
+    respond_method: Callable[..., Any]
+    if isinstance(expected_body, bytes):
+        respond_method = respond_with_html_bytes
+    else:
+        respond_method = respond_with_json
+
+    with mock.patch(
+        f"synapse.http.server.{respond_method.__name__}", wraps=respond_method
+    ) as respond_mock:
+        # Disconnect the request.
+        request.connectionLost(reason=ConnectionDone())
+
+        if expect_cancellation:
+            # An immediate cancellation is expected.
+            respond_mock.assert_called_once()
         else:
-            respond_method = respond_with_json
+            respond_mock.assert_not_called()
 
-        with mock.patch(
-            f"synapse.http.server.{respond_method.__name__}", wraps=respond_method
-        ) as respond_mock:
-            # Disconnect the request.
-            request.connectionLost(reason=ConnectionDone())
+            # The handler is expected to run to completion.
+            reactor.advance(1.0)
+            respond_mock.assert_called_once()
 
-            if expect_cancellation:
-                # An immediate cancellation is expected.
-                respond_mock.assert_called_once()
-                args, _kwargs = respond_mock.call_args
-                code, body = args[1], args[2]
-                self.assertEqual(code, expected_code)
-                self.assertEqual(request.code, expected_code)
-                self.assertEqual(body, expected_body)
-            else:
-                respond_mock.assert_not_called()
-
-                # The handler is expected to run to completion.
-                reactor.pump([1.0])
+        args, _kwargs = respond_mock.call_args
+        code, body = args[1], args[2]
+
+        if code != expected_code:
+            raise AssertionError(
+                f"{code} != {expected_code} : "
+                "Request did not finish with the expected status code."
+            )
+
+        if request.code != expected_code:
+            raise AssertionError(
+                f"{request.code} != {expected_code} : "
+                "Request did not finish with the expected status code."
+            )
+
+        if body != expected_body:
+            raise AssertionError(
+                f"{body!r} != {expected_body!r} : "
+                "Request did not finish with the expected status code."
+            )
+
+
+@logcontext_clean
+def make_request_with_cancellation_test(
+    test_name: str,
+    reactor: MemoryReactorClock,
+    site: Site,
+    method: str,
+    path: str,
+    content: Union[bytes, str, JsonDict] = b"",
+) -> FakeChannel:
+    """Performs a request repeatedly, disconnecting at successive `await`s, until
+    one completes.
+
+    Fails if:
+        * A logging context is lost during cancellation.
+        * A logging context get restarted after it is marked as finished, eg. if
+            a request's logging context is used by some processing started by the
+            request, but the request neglects to cancel that processing or wait for it
+            to complete.
+
+            Note that "Re-starting finished log context" errors get raised within the
+            request handling code and may or may not get caught. These errors will
+            likely manifest as a different logging context error at a later point. When
+            debugging logging context failures, setting a breakpoint in
+            `logcontext_error` can prove useful.
+        * A request gets stuck, possibly due to a previous cancellation.
+        * The request does not return a 499 when the client disconnects.
+            This implies that a `CancelledError` was swallowed somewhere.
+
+    It is up to the caller to verify that the request returns the correct data when
+    it finally runs to completion.
+
+    Note that this function can only cover a single code path and does not guarantee
+    that an endpoint is compatible with cancellation on every code path.
+    To allow inspection of the code path that is being tested, this function will
+    log the stack trace at every `await` that gets cancelled. To view these log
+    lines, `trial` can be run with the `SYNAPSE_TEST_LOG_LEVEL=INFO` environment
+    variable, which will include the log lines in `_trial_temp/test.log`.
+    Alternatively, `_log_for_request` can be modified to write to `sys.stdout`.
+
+    Args:
+        test_name: The name of the test, which will be logged.
+        reactor: The twisted reactor running the request handler.
+        site: The twisted `Site` to use to render the request.
+        method: The HTTP request method ("verb").
+        path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and
+            such).
+        content: The body of the request.
+
+    Returns:
+        The `FakeChannel` object which stores the result of the final request that
+        runs to completion.
+    """
+    # To process a request, a coroutine run is created for the async method handling
+    # the request. That method may then start other coroutine runs, wrapped in
+    # `Deferred`s.
+    #
+    # We would like to trigger a cancellation at the first `await`, re-run the
+    # request and cancel at the second `await`, and so on. By patching
+    # `Deferred.__next__`, we can intercept `await`s, track which ones we have or
+    # have not seen, and force them to block when they wouldn't have.
+
+    # The set of previously seen `await`s.
+    # Each element is a stringified stack trace.
+    seen_awaits: Set[Tuple[str, ...]] = set()
+
+    _log_for_request(
+        0, f"Running make_request_with_cancellation_test for {test_name}..."
+    )
+
+    for request_number in itertools.count(1):
+        deferred_patch = Deferred__next__Patch(seen_awaits, request_number)
+
+        try:
+            with mock.patch(
+                "synapse.http.server.respond_with_json", wraps=respond_with_json
+            ) as respond_mock:
+                with deferred_patch.patch():
+                    # Start the request.
+                    channel = make_request(
+                        reactor, site, method, path, content, await_result=False
+                    )
+                    request = channel.request
+
+                    # Run the request until we see a new `await` which we have not
+                    # yet cancelled at, or it completes.
+                    while not respond_mock.called and not deferred_patch.new_await_seen:
+                        previous_awaits_seen = deferred_patch.awaits_seen
+
+                        reactor.advance(0.0)
+
+                        if deferred_patch.awaits_seen == previous_awaits_seen:
+                            # We didn't see any progress. Try advancing the clock.
+                            reactor.advance(1.0)
+
+                        if deferred_patch.awaits_seen == previous_awaits_seen:
+                            # We still didn't see any progress. The request might be
+                            # stuck.
+                            raise AssertionError(
+                                "Request appears to be stuck, possibly due to a "
+                                "previous cancelled request"
+                            )
+
+                if respond_mock.called:
+                    # The request ran to completion and we are done with testing it.
+
+                    # `respond_with_json` writes the response asynchronously, so we
+                    # might have to give the reactor a kick before the channel gets
+                    # the response.
+                    deferred_patch.unblock_awaits()
+                    channel.await_result()
+
+                    return channel
+
+                # Disconnect the client and wait for the response.
+                request.connectionLost(reason=ConnectionDone())
+
+                _log_for_request(request_number, "--- disconnected ---")
+
+                # Advance the reactor just enough to get a response.
+                # We don't want to advance the reactor too far, because we can only
+                # detect re-starts of finished logging contexts after we set the
+                # finished flag below.
+                for _ in range(2):
+                    # We may need to pump the reactor to allow `delay_cancellation`s to
+                    # finish.
+                    if not respond_mock.called:
+                        reactor.advance(0.0)
+
+                    # Try advancing the clock if that didn't work.
+                    if not respond_mock.called:
+                        reactor.advance(1.0)
+
+                    # `delay_cancellation`s may be waiting for processing that we've
+                    # forced to block. Try unblocking them, followed by another round of
+                    # pumping the reactor.
+                    if not respond_mock.called:
+                        deferred_patch.unblock_awaits()
+
+                # Mark the request's logging context as finished. If it gets
+                # activated again, an `AssertionError` will be raised and bubble up
+                # through request handling code. This `AssertionError` may or may not be
+                # caught. Eventually some other code will deactivate the logging
+                # context which will raise a different `AssertionError` because
+                # resource usage won't have been correctly tracked.
+                if isinstance(request, SynapseRequest) and request.logcontext:
+                    request.logcontext.finished = True
+
+                # Check that the request finished with a 499,
+                # ie. the `CancelledError` wasn't swallowed.
                 respond_mock.assert_called_once()
-                args, _kwargs = respond_mock.call_args
-                code, body = args[1], args[2]
-                self.assertEqual(code, expected_code)
-                self.assertEqual(request.code, expected_code)
-                self.assertEqual(body, expected_body)
+
+                if request.code != HTTP_STATUS_REQUEST_CANCELLED:
+                    raise AssertionError(
+                        f"{request.code} != {HTTP_STATUS_REQUEST_CANCELLED} : "
+                        "Cancelled request did not finish with the correct status code."
+                    )
+        finally:
+            # Unblock any processing that might be shared between requests, if we
+            # haven't already done so.
+            deferred_patch.unblock_awaits()
+
+    assert False, "unreachable"  # noqa: B011
+
+
+class Deferred__next__Patch:
+    """A `Deferred.__next__` patch that will intercept `await`s and force them
+    to block once it sees a new `await`.
+
+    When done with the patch, `unblock_awaits()` must be called to clean up after any
+    `await`s that were forced to block, otherwise processing shared between multiple
+    requests, such as database queries started by `@cached`, will become permanently
+    stuck.
+
+    Usage:
+        seen_awaits = set()
+        deferred_patch = Deferred__next__Patch(seen_awaits, 1)
+        try:
+            with deferred_patch.patch():
+                # do things
+                ...
+        finally:
+            deferred_patch.unblock_awaits()
+    """
+
+    def __init__(self, seen_awaits: Set[Tuple[str, ...]], request_number: int):
+        """
+        Args:
+            seen_awaits: The set of stack traces of `await`s that have been previously
+                seen. When the `Deferred.__next__` patch sees a new `await`, it will add
+                it to the set.
+            request_number: The request number to log against.
+        """
+        self._request_number = request_number
+        self._seen_awaits = seen_awaits
+
+        self._original_Deferred___next__ = Deferred.__next__
+
+        # The number of `await`s on `Deferred`s we have seen so far.
+        self.awaits_seen = 0
+
+        # Whether we have seen a new `await` not in `seen_awaits`.
+        self.new_await_seen = False
+
+        # To force `await`s on resolved `Deferred`s to block, we make up a new
+        # unresolved `Deferred` and return it out of `Deferred.__next__` /
+        # `coroutine.send()`. We have to resolve it later, in case the `await`ing
+        # coroutine is part of some shared processing, such as `@cached`.
+        self._to_unblock: Dict[Deferred, Union[object, Failure]] = {}
+
+        # The last stack we logged.
+        self._previous_stack: List[inspect.FrameInfo] = []
+
+    def patch(self) -> ContextManager[Mock]:
+        """Returns a context manager which patches `Deferred.__next__`."""
+
+        def Deferred___next__(
+            deferred: "Deferred[T]", value: object = None
+        ) -> "Deferred[T]":
+            """Intercepts `await`s on `Deferred`s and rigs them to block once we have
+            seen enough of them.
+
+            `Deferred.__next__` will normally:
+                * return `self` if the `Deferred` is unresolved, in which case
+                   `coroutine.send()` will return the `Deferred`, and
+                   `_defer.inlineCallbacks` will stop running the coroutine until the
+                   `Deferred` is resolved.
+                * raise a `StopIteration(result)`, containing the result of the `await`.
+                * raise another exception, which will come out of the `await`.
+            """
+            self.awaits_seen += 1
+
+            stack = _get_stack(skip_frames=1)
+            stack_hash = _hash_stack(stack)
+
+            if stack_hash not in self._seen_awaits:
+                # Block at the current `await` onwards.
+                self._seen_awaits.add(stack_hash)
+                self.new_await_seen = True
+
+            if not self.new_await_seen:
+                # This `await` isn't interesting. Let it proceed normally.
+
+                # Don't log the stack. It's been seen before in a previous run.
+                self._previous_stack = stack
+
+                return self._original_Deferred___next__(deferred, value)
+
+            # We want to block at the current `await`.
+            if deferred.called and not deferred.paused:
+                # This `Deferred` already has a result.
+                # We return a new, unresolved, `Deferred` for `_inlineCallbacks` to wait
+                # on. This blocks the coroutine that did this `await`.
+                # We queue it up for unblocking later.
+                new_deferred: "Deferred[T]" = Deferred()
+                self._to_unblock[new_deferred] = deferred.result
+
+                _log_await_stack(
+                    stack,
+                    self._previous_stack,
+                    self._request_number,
+                    "force-blocked await",
+                )
+                self._previous_stack = stack
+
+                return make_deferred_yieldable(new_deferred)
+
+            # This `Deferred` does not have a result yet.
+            # The `await` will block normally, so we don't have to do anything.
+            _log_await_stack(
+                stack,
+                self._previous_stack,
+                self._request_number,
+                "blocking await",
+            )
+            self._previous_stack = stack
+
+            return self._original_Deferred___next__(deferred, value)
+
+        return mock.patch.object(Deferred, "__next__", new=Deferred___next__)
+
+    def unblock_awaits(self) -> None:
+        """Unblocks any shared processing that we forced to block.
+
+        Must be called when done, otherwise processing shared between multiple requests,
+        such as database queries started by `@cached`, will become permanently stuck.
+        """
+        to_unblock = self._to_unblock
+        self._to_unblock = {}
+        for deferred, result in to_unblock.items():
+            deferred.callback(result)
+
+
+def _log_for_request(request_number: int, message: str) -> None:
+    """Logs a message for an iteration of `make_request_with_cancellation_test`."""
+    # We want consistent alignment when logging stack traces, so ensure the logging
+    # context has a fixed width name.
+    with LoggingContext(name=f"request-{request_number:<2}"):
+        logger.info(message)
+
+
+def _log_await_stack(
+    stack: List[inspect.FrameInfo],
+    previous_stack: List[inspect.FrameInfo],
+    request_number: int,
+    note: str,
+) -> None:
+    """Logs the stack for an `await` in `make_request_with_cancellation_test`.
+
+    Only logs the part of the stack that has changed since the previous call.
+
+    Example output looks like:
+    ```
+    delay_cancellation:750 (synapse/util/async_helpers.py:750)
+        DatabasePool._runInteraction:768 (synapse/storage/database.py:768)
+            > *blocked on await* at DatabasePool.runWithConnection:891 (synapse/storage/database.py:891)
+    ```
+
+    Args:
+        stack: The stack to log, as returned by `_get_stack()`.
+        previous_stack: The previous stack logged, with callers appearing before
+            callees.
+        request_number: The request number to log against.
+        note: A note to attach to the last stack frame, eg. "blocked on await".
+    """
+    for i, frame_info in enumerate(stack[:-1]):
+        # Skip any frames in common with the previous logging.
+        if i < len(previous_stack) and frame_info == previous_stack[i]:
+            continue
+
+        frame = _format_stack_frame(frame_info)
+        message = f"{'  ' * i}{frame}"
+        _log_for_request(request_number, message)
+
+    # Always print the final frame with the `await`.
+    # If the frame with the `await` started another coroutine run, we may have already
+    # printed a deeper stack which includes our final frame. We want to log where all
+    # `await`s happen, so we reprint the frame in this case.
+    i = len(stack) - 1
+    frame_info = stack[i]
+    frame = _format_stack_frame(frame_info)
+    message = f"{'  ' * i}> *{note}* at {frame}"
+    _log_for_request(request_number, message)
+
+
+def _format_stack_frame(frame_info: inspect.FrameInfo) -> str:
+    """Returns a string representation of a stack frame.
+
+    Used for debug logging.
+
+    Returns:
+        A string, formatted like
+        "JsonResource._async_render:559 (synapse/http/server.py:559)".
+    """
+    method_name = _get_stack_frame_method_name(frame_info)
+
+    return (
+        f"{method_name}:{frame_info.lineno} ({frame_info.filename}:{frame_info.lineno})"
+    )
+
+
+def _get_stack(skip_frames: int) -> List[inspect.FrameInfo]:
+    """Captures the stack for a request.
+
+    Skips any twisted frames and stops at `JsonResource.wrapped_async_request_handler`.
+
+    Used for debug logging.
+
+    Returns:
+        A list of `inspect.FrameInfo`s, with callers appearing before callees.
+    """
+    stack = []
+
+    skip_frames += 1  # Also skip `get_stack` itself.
+
+    for frame_info in inspect.stack()[skip_frames:]:
+        # Skip any twisted `inlineCallbacks` gunk.
+        if "/twisted/" in frame_info.filename:
+            continue
+
+        # Exclude the reactor frame, upwards.
+        method_name = _get_stack_frame_method_name(frame_info)
+        if method_name == "ThreadedMemoryReactorClock.advance":
+            break
+
+        stack.append(frame_info)
+
+        # Stop at `JsonResource`'s `wrapped_async_request_handler`, which is the entry
+        # point for request handling.
+        if frame_info.function == "wrapped_async_request_handler":
+            break
+
+    return stack[::-1]
+
+
+def _get_stack_frame_method_name(frame_info: inspect.FrameInfo) -> str:
+    """Returns the name of a stack frame's method.
+
+    eg. "JsonResource._async_render".
+    """
+    method_name = frame_info.function
+
+    # Prefix the class name for instance methods.
+    frame_self = frame_info.frame.f_locals.get("self")
+    if frame_self:
+        method = getattr(frame_self, method_name, None)
+        if method:
+            method_name = method.__qualname__
+        else:
+            # We couldn't find the method on `self`.
+            # Make something up. It's useful to know which class "contains" a
+            # function anyway.
+            method_name = f"{type(frame_self).__name__} {method_name}"
+
+    return method_name
+
+
+def _hash_stack(stack: List[inspect.FrameInfo]):
+    """Turns a stack into a hashable value that can be put into a set."""
+    return tuple(_format_stack_frame(frame) for frame in stack)
diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py
index b3655d7b44..bb966c80c6 100644
--- a/tests/http/test_servlet.py
+++ b/tests/http/test_servlet.py
@@ -30,7 +30,7 @@ from synapse.server import HomeServer
 from synapse.types import JsonDict
 
 from tests import unittest
-from tests.http.server._base import EndpointCancellationTestHelperMixin
+from tests.http.server._base import test_disconnect
 
 
 def make_request(content):
@@ -108,9 +108,7 @@ class CancellableRestServlet(RestServlet):
         return HTTPStatus.OK, {"result": True}
 
 
-class TestRestServletCancellation(
-    unittest.HomeserverTestCase, EndpointCancellationTestHelperMixin
-):
+class TestRestServletCancellation(unittest.HomeserverTestCase):
     """Tests for `RestServlet` cancellation."""
 
     servlets = [
@@ -120,7 +118,7 @@ class TestRestServletCancellation(
     def test_cancellable_disconnect(self) -> None:
         """Test that handlers with the `@cancellable` flag can be cancelled."""
         channel = self.make_request("GET", "/sleep", await_result=False)
-        self._test_disconnect(
+        test_disconnect(
             self.reactor,
             channel,
             expect_cancellation=True,
@@ -130,7 +128,7 @@ class TestRestServletCancellation(
     def test_uncancellable_disconnect(self) -> None:
         """Test that handlers without the `@cancellable` flag cannot be cancelled."""
         channel = self.make_request("POST", "/sleep", await_result=False)
-        self._test_disconnect(
+        test_disconnect(
             self.reactor,
             channel,
             expect_cancellation=False,
diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py
index a5ab093a27..822a957c3a 100644
--- a/tests/replication/http/test__base.py
+++ b/tests/replication/http/test__base.py
@@ -25,7 +25,7 @@ from synapse.server import HomeServer
 from synapse.types import JsonDict
 
 from tests import unittest
-from tests.http.server._base import EndpointCancellationTestHelperMixin
+from tests.http.server._base import test_disconnect
 
 
 class CancellableReplicationEndpoint(ReplicationEndpoint):
@@ -69,9 +69,7 @@ class UncancellableReplicationEndpoint(ReplicationEndpoint):
         return HTTPStatus.OK, {"result": True}
 
 
-class ReplicationEndpointCancellationTestCase(
-    unittest.HomeserverTestCase, EndpointCancellationTestHelperMixin
-):
+class ReplicationEndpointCancellationTestCase(unittest.HomeserverTestCase):
     """Tests for `ReplicationEndpoint` cancellation."""
 
     def create_test_resource(self):
@@ -87,7 +85,7 @@ class ReplicationEndpointCancellationTestCase(
         """Test that handlers with the `@cancellable` flag can be cancelled."""
         path = f"{REPLICATION_PREFIX}/{CancellableReplicationEndpoint.NAME}/"
         channel = self.make_request("POST", path, await_result=False)
-        self._test_disconnect(
+        test_disconnect(
             self.reactor,
             channel,
             expect_cancellation=True,
@@ -98,7 +96,7 @@ class ReplicationEndpointCancellationTestCase(
         """Test that handlers without the `@cancellable` flag cannot be cancelled."""
         path = f"{REPLICATION_PREFIX}/{UncancellableReplicationEndpoint.NAME}/"
         channel = self.make_request("POST", path, await_result=False)
-        self._test_disconnect(
+        test_disconnect(
             self.reactor,
             channel,
             expect_cancellation=False,
diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py
index 62e4db23ef..aa84906548 100644
--- a/tests/rest/client/test_relations.py
+++ b/tests/rest/client/test_relations.py
@@ -728,6 +728,7 @@ class RelationsTestCase(BaseRelationsTestCase):
 
 
 class RelationPaginationTestCase(BaseRelationsTestCase):
+    @unittest.override_config({"experimental_features": {"msc3715_enabled": True}})
     def test_basic_paginate_relations(self) -> None:
         """Tests that calling pagination API correctly the latest relations."""
         channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index f523d89b8f..4be83dfd6d 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -42,6 +42,7 @@ from synapse.util import Clock
 from synapse.util.stringutils import random_string
 
 from tests import unittest
+from tests.http.server._base import make_request_with_cancellation_test
 from tests.test_utils import make_awaitable
 
 PATH_PREFIX = b"/_matrix/client/api/v1"
@@ -471,6 +472,49 @@ class RoomPermissionsTestCase(RoomBase):
         )
 
 
+class RoomStateTestCase(RoomBase):
+    """Tests /rooms/$room_id/state."""
+
+    user_id = "@sid1:red"
+
+    def test_get_state_cancellation(self) -> None:
+        """Test cancellation of a `/rooms/$room_id/state` request."""
+        room_id = self.helper.create_room_as(self.user_id)
+        channel = make_request_with_cancellation_test(
+            "test_state_cancellation",
+            self.reactor,
+            self.site,
+            "GET",
+            "/rooms/%s/state" % room_id,
+        )
+
+        self.assertEqual(200, channel.code, msg=channel.result["body"])
+        self.assertCountEqual(
+            [state_event["type"] for state_event in channel.json_body],
+            {
+                "m.room.create",
+                "m.room.power_levels",
+                "m.room.join_rules",
+                "m.room.member",
+                "m.room.history_visibility",
+            },
+        )
+
+    def test_get_state_event_cancellation(self) -> None:
+        """Test cancellation of a `/rooms/$room_id/state/$event_type` request."""
+        room_id = self.helper.create_room_as(self.user_id)
+        channel = make_request_with_cancellation_test(
+            "test_state_cancellation",
+            self.reactor,
+            self.site,
+            "GET",
+            "/rooms/%s/state/m.room.member/%s" % (room_id, self.user_id),
+        )
+
+        self.assertEqual(200, channel.code, msg=channel.result["body"])
+        self.assertEqual(channel.json_body, {"membership": "join"})
+
+
 class RoomsMemberListTestCase(RoomBase):
     """Tests /rooms/$room_id/members/list REST events."""
 
@@ -591,6 +635,62 @@ class RoomsMemberListTestCase(RoomBase):
         channel = self.make_request("GET", room_path)
         self.assertEqual(200, channel.code, msg=channel.result["body"])
 
+    def test_get_member_list_cancellation(self) -> None:
+        """Test cancellation of a `/rooms/$room_id/members` request."""
+        room_id = self.helper.create_room_as(self.user_id)
+        channel = make_request_with_cancellation_test(
+            "test_get_member_list_cancellation",
+            self.reactor,
+            self.site,
+            "GET",
+            "/rooms/%s/members" % room_id,
+        )
+
+        self.assertEqual(200, channel.code, msg=channel.result["body"])
+        self.assertEqual(len(channel.json_body["chunk"]), 1)
+        self.assertLessEqual(
+            {
+                "content": {"membership": "join"},
+                "room_id": room_id,
+                "sender": self.user_id,
+                "state_key": self.user_id,
+                "type": "m.room.member",
+                "user_id": self.user_id,
+            }.items(),
+            channel.json_body["chunk"][0].items(),
+        )
+
+    def test_get_member_list_with_at_token_cancellation(self) -> None:
+        """Test cancellation of a `/rooms/$room_id/members?at=<sync token>` request."""
+        room_id = self.helper.create_room_as(self.user_id)
+
+        # first sync to get an at token
+        channel = self.make_request("GET", "/sync")
+        self.assertEqual(200, channel.code)
+        sync_token = channel.json_body["next_batch"]
+
+        channel = make_request_with_cancellation_test(
+            "test_get_member_list_with_at_token_cancellation",
+            self.reactor,
+            self.site,
+            "GET",
+            "/rooms/%s/members?at=%s" % (room_id, sync_token),
+        )
+
+        self.assertEqual(200, channel.code, msg=channel.result["body"])
+        self.assertEqual(len(channel.json_body["chunk"]), 1)
+        self.assertLessEqual(
+            {
+                "content": {"membership": "join"},
+                "room_id": room_id,
+                "sender": self.user_id,
+                "state_key": self.user_id,
+                "type": "m.room.member",
+                "user_id": self.user_id,
+            }.items(),
+            channel.json_body["chunk"][0].items(),
+        )
+
 
 class RoomsCreateTestCase(RoomBase):
     """Tests /rooms and /rooms/$room_id REST events."""
diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py
index 8370a27195..78b83d97b6 100644
--- a/tests/state/test_v2.py
+++ b/tests/state/test_v2.py
@@ -13,7 +13,17 @@
 # limitations under the License.
 
 import itertools
-from typing import List
+from typing import (
+    Collection,
+    Dict,
+    Iterable,
+    List,
+    Mapping,
+    Optional,
+    Set,
+    Tuple,
+    TypeVar,
+)
 
 import attr
 
@@ -22,13 +32,13 @@ from twisted.internet import defer
 from synapse.api.constants import EventTypes, JoinRules, Membership
 from synapse.api.room_versions import RoomVersions
 from synapse.event_auth import auth_types_for_event
-from synapse.events import make_event_from_dict
+from synapse.events import EventBase, make_event_from_dict
 from synapse.state.v2 import (
     _get_auth_chain_difference,
     lexicographical_topological_sort,
     resolve_events_with_store,
 )
-from synapse.types import EventID
+from synapse.types import EventID, StateMap
 
 from tests import unittest
 
@@ -48,7 +58,7 @@ ORIGIN_SERVER_TS = 0
 
 
 class FakeClock:
-    def sleep(self, msec):
+    def sleep(self, msec: float) -> "defer.Deferred[None]":
         return defer.succeed(None)
 
 
@@ -60,7 +70,14 @@ class FakeEvent:
     as domain.
     """
 
-    def __init__(self, id, sender, type, state_key, content):
+    def __init__(
+        self,
+        id: str,
+        sender: str,
+        type: str,
+        state_key: Optional[str],
+        content: Mapping[str, object],
+    ):
         self.node_id = id
         self.event_id = EventID(id, "example.com").to_string()
         self.sender = sender
@@ -69,12 +86,12 @@ class FakeEvent:
         self.content = content
         self.room_id = ROOM_ID
 
-    def to_event(self, auth_events, prev_events):
+    def to_event(self, auth_events: List[str], prev_events: List[str]) -> EventBase:
         """Given the auth_events and prev_events, convert to a Frozen Event
 
         Args:
-            auth_events (list[str]): list of event_ids
-            prev_events (list[str]): list of event_ids
+            auth_events: list of event_ids
+            prev_events: list of event_ids
 
         Returns:
             FrozenEvent
@@ -164,7 +181,7 @@ INITIAL_EDGES = ["START", "IMZ", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE"]
 
 
 class StateTestCase(unittest.TestCase):
-    def test_ban_vs_pl(self):
+    def test_ban_vs_pl(self) -> None:
         events = [
             FakeEvent(
                 id="PA",
@@ -202,7 +219,7 @@ class StateTestCase(unittest.TestCase):
 
         self.do_check(events, edges, expected_state_ids)
 
-    def test_join_rule_evasion(self):
+    def test_join_rule_evasion(self) -> None:
         events = [
             FakeEvent(
                 id="JR",
@@ -226,7 +243,7 @@ class StateTestCase(unittest.TestCase):
 
         self.do_check(events, edges, expected_state_ids)
 
-    def test_offtopic_pl(self):
+    def test_offtopic_pl(self) -> None:
         events = [
             FakeEvent(
                 id="PA",
@@ -257,7 +274,7 @@ class StateTestCase(unittest.TestCase):
 
         self.do_check(events, edges, expected_state_ids)
 
-    def test_topic_basic(self):
+    def test_topic_basic(self) -> None:
         events = [
             FakeEvent(
                 id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={}
@@ -297,7 +314,7 @@ class StateTestCase(unittest.TestCase):
 
         self.do_check(events, edges, expected_state_ids)
 
-    def test_topic_reset(self):
+    def test_topic_reset(self) -> None:
         events = [
             FakeEvent(
                 id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={}
@@ -327,7 +344,7 @@ class StateTestCase(unittest.TestCase):
 
         self.do_check(events, edges, expected_state_ids)
 
-    def test_topic(self):
+    def test_topic(self) -> None:
         events = [
             FakeEvent(
                 id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={}
@@ -380,7 +397,7 @@ class StateTestCase(unittest.TestCase):
 
         self.do_check(events, edges, expected_state_ids)
 
-    def test_mainline_sort(self):
+    def test_mainline_sort(self) -> None:
         """Tests that the mainline ordering works correctly."""
 
         events = [
@@ -434,22 +451,26 @@ class StateTestCase(unittest.TestCase):
 
         self.do_check(events, edges, expected_state_ids)
 
-    def do_check(self, events, edges, expected_state_ids):
+    def do_check(
+        self,
+        events: List[FakeEvent],
+        edges: List[List[str]],
+        expected_state_ids: List[str],
+    ) -> None:
         """Take a list of events and edges and calculate the state of the
         graph at END, and asserts it matches `expected_state_ids`
 
         Args:
-            events (list[FakeEvent])
-            edges (list[list[str]]): A list of chains of event edges, e.g.
+            events
+            edges: A list of chains of event edges, e.g.
                 `[[A, B, C]]` are edges A->B and B->C.
-            expected_state_ids (list[str]): The expected state at END, (excluding
+            expected_state_ids: The expected state at END, (excluding
                 the keys that haven't changed since START).
         """
         # We want to sort the events into topological order for processing.
-        graph = {}
+        graph: Dict[str, Set[str]] = {}
 
-        # node_id -> FakeEvent
-        fake_event_map = {}
+        fake_event_map: Dict[str, FakeEvent] = {}
 
         for ev in itertools.chain(INITIAL_EVENTS, events):
             graph[ev.node_id] = set()
@@ -462,10 +483,8 @@ class StateTestCase(unittest.TestCase):
             for a, b in pairwise(edge_list):
                 graph[a].add(b)
 
-        # event_id -> FrozenEvent
-        event_map = {}
-        # node_id -> state
-        state_at_event = {}
+        event_map: Dict[str, EventBase] = {}
+        state_at_event: Dict[str, StateMap[str]] = {}
 
         # We copy the map as the sort consumes the graph
         graph_copy = {k: set(v) for k, v in graph.items()}
@@ -496,7 +515,16 @@ class StateTestCase(unittest.TestCase):
             if fake_event.state_key is not None:
                 state_after[(fake_event.type, fake_event.state_key)] = event_id
 
-            auth_types = set(auth_types_for_event(RoomVersions.V6, fake_event))
+            # This type ignore is a bit sad. Things we have tried:
+            # 1. Define a `GenericEvent` Protocol satisfied by FakeEvent, EventBase and
+            #    EventBuilder. But this is Hard because the relevant attributes are
+            #    DictProperty[T] descriptors on EventBase but normal Ts on FakeEvent.
+            # 2. Define a `GenericEvent` Protocol describing `FakeEvent` only, and
+            #    change this function to accept Union[Event, EventBase, EventBuilder].
+            #    This seems reasonable to me, but mypy isn't happy. I think that's
+            #    a mypy bug, see https://github.com/python/mypy/issues/5570
+            # Instead, resort to a type-ignore.
+            auth_types = set(auth_types_for_event(RoomVersions.V6, fake_event))  # type: ignore[arg-type]
 
             auth_events = []
             for key in auth_types:
@@ -530,8 +558,14 @@ class StateTestCase(unittest.TestCase):
 
 
 class LexicographicalTestCase(unittest.TestCase):
-    def test_simple(self):
-        graph = {"l": {"o"}, "m": {"n", "o"}, "n": {"o"}, "o": set(), "p": {"o"}}
+    def test_simple(self) -> None:
+        graph: Dict[str, Set[str]] = {
+            "l": {"o"},
+            "m": {"n", "o"},
+            "n": {"o"},
+            "o": set(),
+            "p": {"o"},
+        }
 
         res = list(lexicographical_topological_sort(graph, key=lambda x: x))
 
@@ -539,7 +573,7 @@ class LexicographicalTestCase(unittest.TestCase):
 
 
 class SimpleParamStateTestCase(unittest.TestCase):
-    def setUp(self):
+    def setUp(self) -> None:
         # We build up a simple DAG.
 
         event_map = {}
@@ -627,7 +661,7 @@ class SimpleParamStateTestCase(unittest.TestCase):
             ]
         }
 
-    def test_event_map_none(self):
+    def test_event_map_none(self) -> None:
         # Test that we correctly handle passing `None` as the event_map
 
         state_d = resolve_events_with_store(
@@ -649,7 +683,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase):
     events.
     """
 
-    def test_simple(self):
+    def test_simple(self) -> None:
         # Test getting the auth difference for a simple chain with a single
         # unpersisted event:
         #
@@ -695,7 +729,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase):
 
         self.assertEqual(difference, {c.event_id})
 
-    def test_multiple_unpersisted_chain(self):
+    def test_multiple_unpersisted_chain(self) -> None:
         # Test getting the auth difference for a simple chain with multiple
         # unpersisted events:
         #
@@ -752,7 +786,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase):
 
         self.assertEqual(difference, {d.event_id, c.event_id})
 
-    def test_unpersisted_events_different_sets(self):
+    def test_unpersisted_events_different_sets(self) -> None:
         # Test getting the auth difference for with multiple unpersisted events
         # in different branches:
         #
@@ -820,7 +854,10 @@ class AuthChainDifferenceTestCase(unittest.TestCase):
         self.assertEqual(difference, {d.event_id, e.event_id})
 
 
-def pairwise(iterable):
+T = TypeVar("T")
+
+
+def pairwise(iterable: Iterable[T]) -> Iterable[Tuple[T, T]]:
     "s -> (s0,s1), (s1,s2), (s2, s3), ..."
     a, b = itertools.tee(iterable)
     next(b, None)
@@ -829,24 +866,26 @@ def pairwise(iterable):
 
 @attr.s
 class TestStateResolutionStore:
-    event_map = attr.ib()
+    event_map: Dict[str, EventBase] = attr.ib()
 
-    def get_events(self, event_ids, allow_rejected=False):
+    def get_events(
+        self, event_ids: Collection[str], allow_rejected: bool = False
+    ) -> "defer.Deferred[Dict[str, EventBase]]":
         """Get events from the database
 
         Args:
-            event_ids (list): The event_ids of the events to fetch
-            allow_rejected (bool): If True return rejected events.
+            event_ids: The event_ids of the events to fetch
+            allow_rejected: If True return rejected events.
 
         Returns:
-            Deferred[dict[str, FrozenEvent]]: Dict from event_id to event.
+            Dict from event_id to event.
         """
 
         return defer.succeed(
             {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map}
         )
 
-    def _get_auth_chain(self, event_ids: List[str]) -> List[str]:
+    def _get_auth_chain(self, event_ids: Iterable[str]) -> List[str]:
         """Gets the full auth chain for a set of events (including rejected
         events).
 
@@ -880,7 +919,9 @@ class TestStateResolutionStore:
 
         return list(result)
 
-    def get_auth_chain_difference(self, room_id, auth_sets):
+    def get_auth_chain_difference(
+        self, room_id: str, auth_sets: List[Set[str]]
+    ) -> "defer.Deferred[Set[str]]":
         chains = [frozenset(self._get_auth_chain(a)) for a in auth_sets]
 
         common = set(chains[0]).intersection(*chains[1:])
diff --git a/tests/test_server.py b/tests/test_server.py
index 0f1eb43cbc..847432f791 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -34,7 +34,7 @@ from synapse.types import JsonDict
 from synapse.util import Clock
 
 from tests import unittest
-from tests.http.server._base import EndpointCancellationTestHelperMixin
+from tests.http.server._base import test_disconnect
 from tests.server import (
     FakeSite,
     ThreadedMemoryReactorClock,
@@ -407,7 +407,7 @@ class CancellableDirectServeHtmlResource(DirectServeHtmlResource):
         return HTTPStatus.OK, b"ok"
 
 
-class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMixin):
+class DirectServeJsonResourceCancellationTests(unittest.TestCase):
     """Tests for `DirectServeJsonResource` cancellation."""
 
     def setUp(self):
@@ -421,7 +421,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix
         channel = make_request(
             self.reactor, self.site, "GET", "/sleep", await_result=False
         )
-        self._test_disconnect(
+        test_disconnect(
             self.reactor,
             channel,
             expect_cancellation=True,
@@ -433,7 +433,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix
         channel = make_request(
             self.reactor, self.site, "POST", "/sleep", await_result=False
         )
-        self._test_disconnect(
+        test_disconnect(
             self.reactor,
             channel,
             expect_cancellation=False,
@@ -441,7 +441,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix
         )
 
 
-class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMixin):
+class DirectServeHtmlResourceCancellationTests(unittest.TestCase):
     """Tests for `DirectServeHtmlResource` cancellation."""
 
     def setUp(self):
@@ -455,7 +455,7 @@ class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMix
         channel = make_request(
             self.reactor, self.site, "GET", "/sleep", await_result=False
         )
-        self._test_disconnect(
+        test_disconnect(
             self.reactor,
             channel,
             expect_cancellation=True,
@@ -467,6 +467,6 @@ class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMix
         channel = make_request(
             self.reactor, self.site, "POST", "/sleep", await_result=False
         )
-        self._test_disconnect(
+        test_disconnect(
             self.reactor, channel, expect_cancellation=False, expected_body=b"ok"
         )