summary refs log tree commit diff
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--CHANGES.md135
-rw-r--r--INSTALL.md109
-rw-r--r--README.rst43
-rw-r--r--UPGRADE.rst18
-rw-r--r--changelog.d/6455.feature1
-rw-r--r--changelog.d/7613.feature1
-rw-r--r--changelog.d/7736.feature1
-rw-r--r--changelog.d/7798.feature1
-rw-r--r--changelog.d/7802.misc1
-rw-r--r--changelog.d/7813.misc1
-rw-r--r--changelog.d/7815.bugfix1
-rw-r--r--changelog.d/7817.bugfix1
-rw-r--r--changelog.d/7820.misc1
-rw-r--r--changelog.d/7822.bugfix1
-rw-r--r--changelog.d/7827.feature1
-rw-r--r--changelog.d/7829.bugfix1
-rw-r--r--changelog.d/7830.feature1
-rw-r--r--changelog.d/7836.misc1
-rw-r--r--changelog.d/7839.docker1
-rw-r--r--changelog.d/7842.feature1
-rw-r--r--changelog.d/7844.bugfix1
-rw-r--r--changelog.d/7846.feature1
-rw-r--r--changelog.d/7847.feature1
-rw-r--r--changelog.d/7848.misc1
-rw-r--r--changelog.d/7849.misc1
-rw-r--r--changelog.d/7850.bugfix1
-rw-r--r--changelog.d/7851.misc1
-rw-r--r--changelog.d/7853.misc1
-rw-r--r--changelog.d/7854.bugfix1
-rw-r--r--changelog.d/7855.feature1
-rw-r--r--changelog.d/7856.misc1
-rw-r--r--changelog.d/7858.misc1
-rw-r--r--changelog.d/7859.bugfix1
-rw-r--r--changelog.d/7860.misc1
-rw-r--r--changelog.d/7861.misc1
-rw-r--r--changelog.d/7866.bugfix1
-rw-r--r--changelog.d/7868.misc1
-rw-r--r--changelog.d/7869.feature1
-rw-r--r--changelog.d/7870.misc1
-rw-r--r--changelog.d/7871.misc1
-rw-r--r--changelog.d/7872.bugfix1
-rw-r--r--changelog.d/7877.misc1
-rw-r--r--changelog.d/7878.removal1
-rw-r--r--changelog.d/7879.feature1
-rw-r--r--changelog.d/7880.bugfix1
-rw-r--r--changelog.d/7881.misc1
-rw-r--r--changelog.d/7882.misc1
-rw-r--r--changelog.d/7885.doc1
-rw-r--r--changelog.d/7888.misc1
-rw-r--r--changelog.d/7889.doc1
-rw-r--r--changelog.d/7890.misc1
-rw-r--r--changelog.d/7892.misc1
-rw-r--r--changelog.d/7895.bugfix1
-rw-r--r--changelog.d/7897.misc2
-rw-r--r--changelog.d/7899.doc1
-rw-r--r--changelog.d/7902.feature1
-rw-r--r--changelog.d/7912.misc1
-rw-r--r--changelog.d/7914.misc1
-rw-r--r--changelog.d/7919.misc1
-rw-r--r--changelog.d/7927.misc1
-rw-r--r--changelog.d/7929.misc1
-rw-r--r--changelog.d/7936.misc1
-rw-r--r--changelog.d/7947.misc1
-rw-r--r--changelog.d/7948.misc1
-rw-r--r--changelog.d/7949.misc1
-rw-r--r--changelog.d/7951.misc1
-rw-r--r--changelog.d/7952.misc1
-rw-r--r--changelog.d/7963.misc1
-rw-r--r--changelog.d/7964.feature1
-rw-r--r--changelog.d/7965.misc1
-rw-r--r--changelog.d/7970.misc1
-rw-r--r--changelog.d/7971.misc1
-rw-r--r--changelog.d/7973.misc1
-rw-r--r--changelog.d/7975.misc1
-rw-r--r--changelog.d/7976.misc1
-rw-r--r--changelog.d/7978.bugfix1
-rw-r--r--changelog.d/7979.misc1
-rw-r--r--changelog.d/7980.bugfix1
-rw-r--r--changelog.d/7981.misc1
-rw-r--r--changelog.d/7990.doc1
-rw-r--r--changelog.d/7992.doc1
-rwxr-xr-xcontrib/cmdclient/console.py16
-rw-r--r--debian/changelog16
-rw-r--r--debian/matrix-synapse.default2
-rw-r--r--debian/synctl.ronn27
-rw-r--r--docs/.sample_config_header.yaml11
-rw-r--r--docs/ACME.md5
-rw-r--r--docs/admin_api/rooms.md13
-rw-r--r--docs/metrics-howto.md2
-rw-r--r--docs/password_auth_providers.md187
-rw-r--r--docs/postgres.md3
-rw-r--r--docs/sample_config.yaml87
-rw-r--r--docs/synctl_workers.md32
-rw-r--r--docs/workers.md480
-rwxr-xr-xscripts-dev/check_line_terminators.sh34
-rwxr-xr-xscripts/synapse_port_db2
-rw-r--r--synapse/__init__.py14
-rw-r--r--synapse/api/auth.py14
-rw-r--r--synapse/app/generic_worker.py15
-rw-r--r--synapse/appservice/__init__.py31
-rw-r--r--synapse/appservice/api.py21
-rw-r--r--synapse/appservice/scheduler.py49
-rw-r--r--synapse/config/federation.py12
-rw-r--r--synapse/config/homeserver.py2
-rw-r--r--synapse/config/logger.py2
-rw-r--r--synapse/config/redis.py23
-rw-r--r--synapse/config/registration.py18
-rw-r--r--synapse/config/server.py7
-rw-r--r--synapse/config/workers.py49
-rw-r--r--synapse/crypto/keyring.py60
-rw-r--r--synapse/events/builder.py17
-rw-r--r--synapse/events/snapshot.py46
-rw-r--r--synapse/events/third_party_rules.py55
-rw-r--r--synapse/events/utils.py15
-rw-r--r--synapse/federation/federation_client.py8
-rw-r--r--synapse/federation/federation_server.py13
-rw-r--r--synapse/federation/send_queue.py2
-rw-r--r--synapse/federation/sender/__init__.py17
-rw-r--r--synapse/federation/sender/per_destination_queue.py2
-rw-r--r--synapse/federation/transport/client.py96
-rw-r--r--synapse/federation/transport/server.py6
-rw-r--r--synapse/groups/attestations.py25
-rw-r--r--synapse/handlers/admin.py2
-rw-r--r--synapse/handlers/appservice.py10
-rw-r--r--synapse/handlers/auth.py7
-rw-r--r--synapse/handlers/deactivate_account.py22
-rw-r--r--synapse/handlers/e2e_keys.py24
-rw-r--r--synapse/handlers/federation.py24
-rw-r--r--synapse/handlers/groups_local.py35
-rw-r--r--synapse/handlers/identity.py271
-rw-r--r--synapse/handlers/message.py307
-rw-r--r--synapse/handlers/presence.py47
-rw-r--r--synapse/handlers/room.py13
-rw-r--r--synapse/handlers/room_member.py37
-rw-r--r--synapse/handlers/saml_handler.py3
-rw-r--r--synapse/handlers/search.py7
-rw-r--r--synapse/handlers/sync.py10
-rw-r--r--synapse/handlers/ui_auth/checkers.py35
-rw-r--r--synapse/http/client.py16
-rw-r--r--synapse/http/federation/matrix_federation_agent.py16
-rw-r--r--synapse/http/federation/srv_resolver.py10
-rw-r--r--synapse/http/matrixfederationclient.py79
-rw-r--r--synapse/http/server.py24
-rw-r--r--synapse/http/site.py4
-rw-r--r--synapse/logging/opentracing.py13
-rw-r--r--synapse/logging/scopecontextmanager.py2
-rw-r--r--synapse/push/action_generator.py7
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py60
-rw-r--r--synapse/push/httppusher.py58
-rw-r--r--synapse/push/presentable_names.py15
-rw-r--r--synapse/push/push_tools.py35
-rw-r--r--synapse/push/pusherpool.py70
-rw-r--r--synapse/python_dependencies.py2
-rw-r--r--synapse/replication/http/__init__.py2
-rw-r--r--synapse/replication/http/federation.py4
-rw-r--r--synapse/replication/http/send_event.py2
-rw-r--r--synapse/replication/tcp/client.py8
-rw-r--r--synapse/replication/tcp/handler.py115
-rw-r--r--synapse/replication/tcp/protocol.py45
-rw-r--r--synapse/replication/tcp/redis.py37
-rw-r--r--synapse/rest/admin/rooms.py11
-rw-r--r--synapse/rest/client/v2_alpha/_base.py47
-rw-r--r--synapse/rest/client/v2_alpha/sync.py1
-rw-r--r--synapse/rest/media/v1/_base.py23
-rw-r--r--synapse/rest/media/v1/media_repository.py105
-rw-r--r--synapse/rest/media/v1/media_storage.py96
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py275
-rw-r--r--synapse/rest/media/v1/storage_provider.py62
-rw-r--r--synapse/server.pyi3
-rw-r--r--synapse/state/__init__.py95
-rw-r--r--synapse/state/v1.py15
-rw-r--r--synapse/state/v2.py107
-rw-r--r--synapse/storage/data_stores/main/cache.py1
-rw-r--r--synapse/storage/data_stores/main/event_push_actions.py96
-rw-r--r--synapse/storage/data_stores/main/events.py48
-rw-r--r--synapse/storage/data_stores/main/events_worker.py86
-rw-r--r--synapse/storage/data_stores/main/purge_events.py2
-rw-r--r--synapse/storage/data_stores/main/push_rule.py2
-rw-r--r--synapse/storage/data_stores/main/room.py98
-rw-r--r--synapse/storage/data_stores/main/roommember.py2
-rw-r--r--synapse/storage/data_stores/main/schema/delta/58/12unread_messages.sql18
-rw-r--r--synapse/storage/data_stores/main/state.py57
-rw-r--r--synapse/storage/data_stores/main/stats.py53
-rw-r--r--synapse/storage/data_stores/main/stream.py2
-rw-r--r--synapse/storage/data_stores/main/user_directory.py4
-rw-r--r--synapse/storage/data_stores/main/user_erasure_store.py26
-rw-r--r--synapse/storage/data_stores/state/store.py37
-rw-r--r--synapse/storage/database.py18
-rw-r--r--synapse/storage/persist_events.py45
-rw-r--r--synapse/storage/purge_events.py38
-rw-r--r--synapse/storage/state.py216
-rw-r--r--synapse/visibility.py30
-rw-r--r--tests/appservice/test_appservice.py89
-rw-r--r--tests/appservice/test_scheduler.py19
-rw-r--r--tests/crypto/test_keyring.py11
-rw-r--r--tests/events/test_snapshot.py36
-rw-r--r--tests/federation/test_complexity.py118
-rw-r--r--tests/federation/test_federation_sender.py29
-rw-r--r--tests/handlers/test_appservice.py5
-rw-r--r--tests/handlers/test_directory.py5
-rw-r--r--tests/handlers/test_profile.py3
-rw-r--r--tests/http/federation/test_matrix_federation_agent.py51
-rw-r--r--tests/http/federation/test_srv_resolver.py26
-rw-r--r--tests/http/test_fedclient.py50
-rw-r--r--tests/replication/slave/storage/test_events.py6
-rw-r--r--tests/replication/tcp/streams/test_events.py76
-rw-r--r--tests/replication/test_federation_sender_shard.py13
-rw-r--r--tests/rest/admin/test_admin.py4
-rw-r--r--tests/rest/admin/test_room.py2947
-rw-r--r--tests/rest/client/v1/utils.py20
-rw-r--r--tests/rest/client/v2_alpha/test_sync.py157
-rw-r--r--tests/rest/key/v2/test_remote_key_resource.py4
-rw-r--r--tests/rest/media/v1/test_media_storage.py5
-rw-r--r--tests/rest/media/v1/test_url_preview.py142
-rw-r--r--tests/state/test_v2.py17
-rw-r--r--tests/storage/test_event_push_actions.py18
-rw-r--r--tests/storage/test_purge.py8
-rw-r--r--tests/storage/test_redaction.py4
-rw-r--r--tests/storage/test_room.py38
-rw-r--r--tests/storage/test_roommember.py56
-rw-r--r--tests/storage/test_state.py80
-rw-r--r--tests/test_federation.py2
-rw-r--r--tests/test_server.py71
-rw-r--r--tests/test_state.py86
-rw-r--r--tests/test_utils/__init__.py7
-rw-r--r--tests/test_utils/event_injection.py28
-rw-r--r--tests/test_visibility.py40
-rw-r--r--tests/unittest.py4
-rw-r--r--tests/utils.py14
-rw-r--r--tox.ini2
230 files changed, 5355 insertions, 4052 deletions
diff --git a/CHANGES.md b/CHANGES.md
index 6d4bd23e4e..6c986808eb 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,138 @@
+Synapse 1.18.0 (2020-07-30)
+===========================
+
+Deprecation Warnings
+--------------------
+
+### Docker Tags with `-py3` Suffix
+
+From 10th August 2020, we will no longer publish Docker images with the `-py3` tag suffix. The images tagged with the `-py3` suffix have been identical to the non-suffixed tags since release 0.99.0, and the suffix is obsolete.
+
+On 10th August, we will remove the `latest-py3` tag. Existing per-release tags (such as `v1.18.0-py3`) will not be removed, but no new `-py3` tags will be added.
+
+Scripts relying on the `-py3` suffix will need to be updated.
+
+
+### TCP-based Replication
+
+When setting up worker processes, we now recommend the use of a Redis server for replication. The old direct TCP connection method is deprecated and will be removed in a future release. See [docs/workers.md](https://github.com/matrix-org/synapse/blob/release-v1.18.0/docs/workers.md) for more details.
+
+
+Improved Documentation
+----------------------
+
+- Update worker docs with latest enhancements. ([\#7969](https://github.com/matrix-org/synapse/issues/7969))
+
+
+Synapse 1.18.0rc2 (2020-07-28)
+==============================
+
+Bugfixes
+--------
+
+- Fix an `AssertionError` exception introduced in v1.18.0rc1. ([\#7876](https://github.com/matrix-org/synapse/issues/7876))
+- Fix experimental support for moving typing off master when worker is restarted, which is broken in v1.18.0rc1. ([\#7967](https://github.com/matrix-org/synapse/issues/7967))
+
+
+Internal Changes
+----------------
+
+- Further optimise queueing of inbound replication commands. ([\#7876](https://github.com/matrix-org/synapse/issues/7876))
+
+
+Synapse 1.18.0rc1 (2020-07-27)
+==============================
+
+Features
+--------
+
+- Include room states on invite events that are sent to application services. Contributed by @Sorunome. ([\#6455](https://github.com/matrix-org/synapse/issues/6455))
+- Add delete room admin endpoint (`POST /_synapse/admin/v1/rooms/<room_id>/delete`). Contributed by @dklimpel. ([\#7613](https://github.com/matrix-org/synapse/issues/7613), [\#7953](https://github.com/matrix-org/synapse/issues/7953))
+- Add experimental support for running multiple federation sender processes. ([\#7798](https://github.com/matrix-org/synapse/issues/7798))
+- Add the option to validate the `iss` and `aud` claims for JWT logins. ([\#7827](https://github.com/matrix-org/synapse/issues/7827))
+- Add support for handling registration requests across multiple client reader workers. ([\#7830](https://github.com/matrix-org/synapse/issues/7830))
+- Add an admin API to list the users in a room. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#7842](https://github.com/matrix-org/synapse/issues/7842))
+- Allow email subjects to be customised through Synapse's configuration. ([\#7846](https://github.com/matrix-org/synapse/issues/7846))
+- Add the ability to re-activate an account from the admin API. ([\#7847](https://github.com/matrix-org/synapse/issues/7847), [\#7908](https://github.com/matrix-org/synapse/issues/7908))
+- Add experimental support for running multiple pusher workers. ([\#7855](https://github.com/matrix-org/synapse/issues/7855))
+- Add experimental support for moving typing off master. ([\#7869](https://github.com/matrix-org/synapse/issues/7869), [\#7959](https://github.com/matrix-org/synapse/issues/7959))
+- Report CPU metrics to prometheus for time spent processing replication commands. ([\#7879](https://github.com/matrix-org/synapse/issues/7879))
+- Support oEmbed for media previews. ([\#7920](https://github.com/matrix-org/synapse/issues/7920))
+- Abort federation requests where the client disconnects before the ratelimiter expires. ([\#7930](https://github.com/matrix-org/synapse/issues/7930))
+- Cache responses to `/_matrix/federation/v1/state_ids` to reduce duplicated work. ([\#7931](https://github.com/matrix-org/synapse/issues/7931))
+
+
+Bugfixes
+--------
+
+- Fix detection of out of sync remote device lists when receiving events from remote users. ([\#7815](https://github.com/matrix-org/synapse/issues/7815))
+- Fix bug where Synapse fails to process an incoming event over federation if the server is missing too much of the event's auth chain. ([\#7817](https://github.com/matrix-org/synapse/issues/7817))
+- Fix a bug causing Synapse to misinterpret the value `off` for `encryption_enabled_by_default_for_room_type` in its configuration file(s) if that value isn't surrounded by quotes. This bug was introduced in v1.16.0. ([\#7822](https://github.com/matrix-org/synapse/issues/7822))
+- Fix bug where we did not always pass in `app_name` or `server_name` to email templates, including e.g. for registration emails. ([\#7829](https://github.com/matrix-org/synapse/issues/7829))
+- Errors which occur while using the non-standard JWT login now return the proper error: `403 Forbidden` with an error code of `M_FORBIDDEN`. ([\#7844](https://github.com/matrix-org/synapse/issues/7844))
+- Fix "AttributeError: 'str' object has no attribute 'get'" error message when applying per-room message retention policies. The bug was introduced in Synapse 1.7.0. ([\#7850](https://github.com/matrix-org/synapse/issues/7850))
+- Fix a bug introduced in Synapse 1.10.0 which could cause a "no create event in auth events" error during room creation. ([\#7854](https://github.com/matrix-org/synapse/issues/7854))
+- Fix a bug which allowed empty rooms to be rejoined over federation. ([\#7859](https://github.com/matrix-org/synapse/issues/7859))
+- Fix 'Unable to find a suitable guest user ID' error when using multiple client_reader workers. ([\#7866](https://github.com/matrix-org/synapse/issues/7866))
+- Fix a long standing bug where the tracing of async functions with opentracing was broken. ([\#7872](https://github.com/matrix-org/synapse/issues/7872), [\#7961](https://github.com/matrix-org/synapse/issues/7961))
+- Fix "TypeError in `synapse.notifier`" exceptions. ([\#7880](https://github.com/matrix-org/synapse/issues/7880))
+- Fix deprecation warning due to invalid escape sequences. ([\#7895](https://github.com/matrix-org/synapse/issues/7895))
+
+
+Updates to the Docker image
+---------------------------
+
+- Base docker image on Debian Buster rather than Alpine Linux. Contributed by @maquis196. ([\#7839](https://github.com/matrix-org/synapse/issues/7839))
+
+
+Improved Documentation
+----------------------
+
+- Provide instructions on using `register_new_matrix_user` via docker. ([\#7885](https://github.com/matrix-org/synapse/issues/7885))
+- Change the sample config postgres user section to use `synapse_user` instead of `synapse` to align with the documentation. ([\#7889](https://github.com/matrix-org/synapse/issues/7889))
+- Reorder database paragraphs to promote postgres over sqlite. ([\#7933](https://github.com/matrix-org/synapse/issues/7933))
+- Update the dates of ACME v1's end of life in [`ACME.md`](https://github.com/matrix-org/synapse/blob/master/docs/ACME.md). ([\#7934](https://github.com/matrix-org/synapse/issues/7934))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove unused `synapse_replication_tcp_resource_invalidate_cache` prometheus metric. ([\#7878](https://github.com/matrix-org/synapse/issues/7878))
+- Remove Ubuntu Eoan from the list of `.deb` packages that we build as it is now end-of-life. Contributed by @gary-kim. ([\#7888](https://github.com/matrix-org/synapse/issues/7888))
+
+
+Internal Changes
+----------------
+
+- Switch parts of the codebase from `simplejson` to the standard library `json`. ([\#7802](https://github.com/matrix-org/synapse/issues/7802))
+- Add type hints to the http server code and remove an unused parameter. ([\#7813](https://github.com/matrix-org/synapse/issues/7813))
+- Add type hints to synapse.api.errors module. ([\#7820](https://github.com/matrix-org/synapse/issues/7820))
+- Ensure that calls to `json.dumps` are compatible with the standard library json. ([\#7836](https://github.com/matrix-org/synapse/issues/7836))
+- Remove redundant `retry_on_integrity_error` wrapper for event persistence code. ([\#7848](https://github.com/matrix-org/synapse/issues/7848))
+- Consistently use `db_to_json` to convert from database values to JSON objects. ([\#7849](https://github.com/matrix-org/synapse/issues/7849))
+- Convert various parts of the codebase to async/await. ([\#7851](https://github.com/matrix-org/synapse/issues/7851), [\#7860](https://github.com/matrix-org/synapse/issues/7860), [\#7868](https://github.com/matrix-org/synapse/issues/7868), [\#7871](https://github.com/matrix-org/synapse/issues/7871), [\#7873](https://github.com/matrix-org/synapse/issues/7873), [\#7874](https://github.com/matrix-org/synapse/issues/7874), [\#7884](https://github.com/matrix-org/synapse/issues/7884), [\#7912](https://github.com/matrix-org/synapse/issues/7912), [\#7935](https://github.com/matrix-org/synapse/issues/7935), [\#7939](https://github.com/matrix-org/synapse/issues/7939), [\#7942](https://github.com/matrix-org/synapse/issues/7942), [\#7944](https://github.com/matrix-org/synapse/issues/7944))
+- Add support for handling registration requests across multiple client reader workers. ([\#7853](https://github.com/matrix-org/synapse/issues/7853))
+- Small performance improvement in typing processing. ([\#7856](https://github.com/matrix-org/synapse/issues/7856))
+- The default value of `filter_timeline_limit` was changed from -1 (no limit) to 100. ([\#7858](https://github.com/matrix-org/synapse/issues/7858))
+- Optimise queueing of inbound replication commands. ([\#7861](https://github.com/matrix-org/synapse/issues/7861))
+- Add some type annotations to `HomeServer` and `BaseHandler`. ([\#7870](https://github.com/matrix-org/synapse/issues/7870))
+- Clean up `PreserveLoggingContext`. ([\#7877](https://github.com/matrix-org/synapse/issues/7877))
+- Change "unknown room version" logging from 'error' to 'warning'. ([\#7881](https://github.com/matrix-org/synapse/issues/7881))
+- Stop using `device_max_stream_id` table and just use `device_inbox.stream_id`. ([\#7882](https://github.com/matrix-org/synapse/issues/7882))
+- Return an empty body for OPTIONS requests. ([\#7886](https://github.com/matrix-org/synapse/issues/7886))
+- Fix typo in generated config file. Contributed by @ThiefMaster. ([\#7890](https://github.com/matrix-org/synapse/issues/7890))
+- Import ABC from `collections.abc` for Python 3.10 compatibility. ([\#7892](https://github.com/matrix-org/synapse/issues/7892))
+- Remove unused functions `time_function`, `trace_function`, `get_previous_frames`
+  and `get_previous_frame` from `synapse.logging.utils` module. ([\#7897](https://github.com/matrix-org/synapse/issues/7897))
+- Lint the `contrib/` directory in CI and linting scripts, add `synctl` to the linting script for consistency with CI. ([\#7914](https://github.com/matrix-org/synapse/issues/7914))
+- Use Element CSS and logo in notification emails when app name is Element. ([\#7919](https://github.com/matrix-org/synapse/issues/7919))
+- Optimisation to /sync handling: skip serializing the response if the client has already disconnected. ([\#7927](https://github.com/matrix-org/synapse/issues/7927))
+- When a client disconnects, don't log it as 'Error processing request'. ([\#7928](https://github.com/matrix-org/synapse/issues/7928))
+- Add debugging to `/sync` response generation (disabled by default). ([\#7929](https://github.com/matrix-org/synapse/issues/7929))
+- Update comments that refer to Deferreds for async functions. ([\#7945](https://github.com/matrix-org/synapse/issues/7945))
+- Simplify error handling in federation handler. ([\#7950](https://github.com/matrix-org/synapse/issues/7950))
+
+
 Synapse 1.17.0 (2020-07-13)
 ===========================
 
diff --git a/INSTALL.md b/INSTALL.md
index b507de7442..22f7b7c029 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -1,10 +1,12 @@
 - [Choosing your server name](#choosing-your-server-name)
+- [Picking a database engine](#picking-a-database-engine)
 - [Installing Synapse](#installing-synapse)
   - [Installing from source](#installing-from-source)
     - [Platform-Specific Instructions](#platform-specific-instructions)
   - [Prebuilt packages](#prebuilt-packages)
 - [Setting up Synapse](#setting-up-synapse)
   - [TLS certificates](#tls-certificates)
+  - [Client Well-Known URI](#client-well-known-uri)
   - [Email](#email)
   - [Registering a user](#registering-a-user)
   - [Setting up a TURN server](#setting-up-a-turn-server)
@@ -27,6 +29,25 @@ that your email address is probably `user@example.com` rather than
 `user@email.example.com`) - but doing so may require more advanced setup: see
 [Setting up Federation](docs/federate.md).
 
+# Picking a database engine
+
+Synapse offers two database engines:
+ * [PostgreSQL](https://www.postgresql.org)
+ * [SQLite](https://sqlite.org/)
+
+Almost all installations should opt to use PostgreSQL. Advantages include:
+
+* significant performance improvements due to the superior threading and
+  caching model, smarter query optimiser
+* allowing the DB to be run on separate hardware
+
+For information on how to install and use PostgreSQL, please see
+[docs/postgres.md](docs/postgres.md)
+
+By default Synapse uses SQLite and in doing so trades performance for convenience.
+SQLite is only recommended in Synapse for testing purposes or for servers with
+light workloads.
+
 # Installing Synapse
 
 ## Installing from source
@@ -234,9 +255,9 @@ for a number of platforms.
 
 There is an offical synapse image available at
 https://hub.docker.com/r/matrixdotorg/synapse which can be used with
-the docker-compose file available at [contrib/docker](contrib/docker). Further information on
-this including configuration options is available in the README on
-hub.docker.com.
+the docker-compose file available at [contrib/docker](contrib/docker). Further
+information on this including configuration options is available in the README
+on hub.docker.com.
 
 Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
 Dockerfile to automate a synapse server in a single Docker image, at
@@ -244,7 +265,8 @@ https://hub.docker.com/r/avhost/docker-matrix/tags/
 
 Slavi Pantaleev has created an Ansible playbook,
 which installs the offical Docker image of Matrix Synapse
-along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
+along with many other Matrix-related services (Postgres database, Element, coturn,
+ma1sd, SSL support, etc.).
 For more details, see
 https://github.com/spantaleev/matrix-docker-ansible-deploy
 
@@ -277,22 +299,27 @@ The fingerprint of the repository signing key (as shown by `gpg
 /usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
 `AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
 
-#### Downstream Debian/Ubuntu packages
+#### Downstream Debian packages
 
-For `buster` and `sid`, Synapse is available in the Debian repositories and
-it should be possible to install it with simply:
+We do not recommend using the packages from the default Debian `buster`
+repository at this time, as they are old and suffer from known security
+vulnerabilities. You can install the latest version of Synapse from
+[our repository](#matrixorg-packages) or from `buster-backports`. Please
+see the [Debian documentation](https://backports.debian.org/Instructions/)
+for information on how to use backports.
+
+If you are using Debian `sid` or testing, Synapse is available in the default
+repositories and it should be possible to install it simply with:
 
 ```
 sudo apt install matrix-synapse
 ```
 
-There is also a version of `matrix-synapse` in `stretch-backports`. Please see
-the [Debian documentation on
-backports](https://backports.debian.org/Instructions/) for information on how
-to use them.
+#### Downstream Ubuntu packages
 
-We do not recommend using the packages in downstream Ubuntu at this time, as
-they are old and suffer from known security vulnerabilities.
+We do not recommend using the packages in the default Ubuntu repository
+at this time, as they are old and suffer from known security vulnerabilities.
+The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
 
 ### Fedora
 
@@ -419,6 +446,60 @@ so, you will need to edit `homeserver.yaml`, as follows:
 For a more detailed guide to configuring your server for federation, see
 [federate.md](docs/federate.md).
 
+## Client Well-Known URI
+
+Setting up the client Well-Known URI is optional but if you set it up, it will
+allow users to enter their full username (e.g. `@user:<server_name>`) into clients
+which support well-known lookup to automatically configure the homeserver and
+identity server URLs. This is useful so that users don't have to memorize or think
+about the actual homeserver URL you are using.
+
+The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
+the following format.
+
+```
+{
+  "m.homeserver": {
+    "base_url": "https://<matrix.example.com>"
+  }
+}
+```
+
+It can optionally contain identity server information as well.
+
+```
+{
+  "m.homeserver": {
+    "base_url": "https://<matrix.example.com>"
+  },
+  "m.identity_server": {
+    "base_url": "https://<identity.example.com>"
+  }
+}
+```
+
+To work in browser based clients, the file must be served with the appropriate
+Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
+`Access-Control-Allow-Origin: *` which would allow all browser based clients to
+view it.
+
+In nginx this would be something like:
+```
+location /.well-known/matrix/client {
+    return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
+    add_header Content-Type application/json;
+    add_header Access-Control-Allow-Origin *;
+}
+```
+
+You should also ensure the `public_baseurl` option in `homeserver.yaml` is set
+correctly. `public_baseurl` should be set to the URL that clients will use to
+connect to your server. This is the same URL you put for the `m.homeserver`
+`base_url` above.
+
+```
+public_baseurl: "https://<matrix.example.com>"
+```
 
 ## Email
 
@@ -437,7 +518,7 @@ email will be disabled.
 
 ## Registering a user
 
-The easiest way to create a new user is to do so from a client like [Riot](https://riot.im).
+The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
 
 Alternatively you can do so from the command line if you have installed via pip.
 
diff --git a/README.rst b/README.rst
index 38376e23c2..4a189c8bc4 100644
--- a/README.rst
+++ b/README.rst
@@ -45,7 +45,7 @@ which handle:
 - Eventually-consistent cryptographically secure synchronisation of room
   state across a global open network of federated servers and services
 - Sending and receiving extensible messages in a room with (optional)
-  end-to-end encryption[1]
+  end-to-end encryption
 - Inviting, joining, leaving, kicking, banning room members
 - Managing user accounts (registration, login, logout)
 - Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
@@ -82,9 +82,6 @@ at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
 
 Thanks for using Matrix!
 
-[1] End-to-end encryption is currently in beta: `blog post <https://matrix.org/blog/2016/11/21/matrixs-olm-end-to-end-encryption-security-assessment-released-and-implemented-cross-platform-on-riot-at-last>`_.
-
-
 Support
 =======
 
@@ -115,12 +112,11 @@ Unless you are running a test instance of Synapse on your local machine, in
 general, you will need to enable TLS support before you can successfully
 connect from a client: see `<INSTALL.md#tls-certificates>`_.
 
-An easy way to get started is to login or register via Riot at
-https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
+An easy way to get started is to login or register via Element at
+https://app.element.io/#/login or https://app.element.io/#/register respectively.
 You will need to change the server you are logging into from ``matrix.org``
 and instead specify a Homeserver URL of ``https://<server_name>:8448``
 (or just ``https://<server_name>`` if you are using a reverse proxy).
-(Leave the identity server as the default - see `Identity servers`_.)
 If you prefer to use another client, refer to our
 `client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
 
@@ -137,7 +133,7 @@ it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
 recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.md>`_.)
 
 Once ``enable_registration`` is set to ``true``, it is possible to register a
-user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
+user via a Matrix client.
 
 Your new user name will be formed partly from the ``server_name``, and partly
 from a localpart you specify when you create the account. Your name will take
@@ -183,30 +179,6 @@ versions of synapse.
 
 .. _UPGRADE.rst: UPGRADE.rst
 
-
-Using PostgreSQL
-================
-
-Synapse offers two database engines:
- * `SQLite <https://sqlite.org/>`_
- * `PostgreSQL <https://www.postgresql.org>`_
-
-By default Synapse uses SQLite in and doing so trades performance for convenience.
-SQLite is only recommended in Synapse for testing purposes or for servers with
-light workloads.
-
-Almost all installations should opt to use PostgreSQL. Advantages include:
-
-* significant performance improvements due to the superior threading and
-  caching model, smarter query optimiser
-* allowing the DB to be run on separate hardware
-* allowing basic active/backup high-availability with a "hot spare" synapse
-  pointing at the same DB master, as well as enabling DB replication in
-  synapse itself.
-
-For information on how to install and use PostgreSQL, please see
-`docs/postgres.md <docs/postgres.md>`_.
-
 .. _reverse-proxy:
 
 Using a reverse proxy with Synapse
@@ -255,10 +227,9 @@ email address.
 Password reset
 ==============
 
-If a user has registered an email address to their account using an identity
-server, they can request a password-reset token via clients such as Riot.
-
-A manual password reset can be done via direct database access as follows.
+Users can reset their password through their client. Alternatively, a server admin
+can reset a users password using the `admin API <docs/admin_api/user_admin_api.rst#reset-password>`_
+or by directly editing the database as shown below.
 
 First calculate the hash of the new password::
 
diff --git a/UPGRADE.rst b/UPGRADE.rst
index 3b5627e852..6492fa011f 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -75,6 +75,24 @@ for example:
      wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
      dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
 
+Upgrading to v1.18.0
+====================
+
+Docker `-py3` suffix will be removed in future versions
+-------------------------------------------------------
+
+From 10th August 2020, we will no longer publish Docker images with the `-py3` tag suffix. The images tagged with the `-py3` suffix have been identical to the non-suffixed tags since release 0.99.0, and the suffix is obsolete.
+
+On 10th August, we will remove the `latest-py3` tag. Existing per-release tags (such as `v1.18.0-py3`) will not be removed, but no new `-py3` tags will be added.
+
+Scripts relying on the `-py3` suffix will need to be updated.
+
+Redis replication is now recommended in lieu of TCP replication
+---------------------------------------------------------------
+
+When setting up worker processes, we now recommend the use of a Redis server for replication. **The old direct TCP connection method is deprecated and will be removed in a future release.**
+See `docs/workers.md <docs/workers.md>`_ for more details.
+
 Upgrading to v1.14.0
 ====================
 
diff --git a/changelog.d/6455.feature b/changelog.d/6455.feature
deleted file mode 100644
index eb286cb70f..0000000000
--- a/changelog.d/6455.feature
+++ /dev/null
@@ -1 +0,0 @@
-Include room states on invite events that are sent to application services. Contributed by @Sorunome.
diff --git a/changelog.d/7613.feature b/changelog.d/7613.feature
deleted file mode 100644
index b671dc2fcc..0000000000
--- a/changelog.d/7613.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add delete room admin endpoint (`POST /_synapse/admin/v1/rooms/<room_id>/delete`). Contributed by @dklimpel.
diff --git a/changelog.d/7736.feature b/changelog.d/7736.feature
new file mode 100644
index 0000000000..feb02be234
--- /dev/null
+++ b/changelog.d/7736.feature
@@ -0,0 +1 @@
+Add unread messages count to sync responses, as specified in [MSC2654](https://github.com/matrix-org/matrix-doc/pull/2654).
diff --git a/changelog.d/7798.feature b/changelog.d/7798.feature
deleted file mode 100644
index 56ffaf0d4a..0000000000
--- a/changelog.d/7798.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add experimental support for running multiple federation sender processes.
diff --git a/changelog.d/7802.misc b/changelog.d/7802.misc
deleted file mode 100644
index d81f8875c5..0000000000
--- a/changelog.d/7802.misc
+++ /dev/null
@@ -1 +0,0 @@
- Switch from simplejson to the standard library json.
diff --git a/changelog.d/7813.misc b/changelog.d/7813.misc
deleted file mode 100644
index f3005cfd27..0000000000
--- a/changelog.d/7813.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add type hints to the http server code and remove an unused parameter.
diff --git a/changelog.d/7815.bugfix b/changelog.d/7815.bugfix
deleted file mode 100644
index 3e7c7d412e..0000000000
--- a/changelog.d/7815.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix detection of out of sync remote device lists when receiving events from remote users.
diff --git a/changelog.d/7817.bugfix b/changelog.d/7817.bugfix
deleted file mode 100644
index 1c001070d5..0000000000
--- a/changelog.d/7817.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where Synapse fails to process an incoming event over federation if the server is missing too much of the event's auth chain.
diff --git a/changelog.d/7820.misc b/changelog.d/7820.misc
deleted file mode 100644
index b77b5672e3..0000000000
--- a/changelog.d/7820.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add type hints to synapse.api.errors module.
diff --git a/changelog.d/7822.bugfix b/changelog.d/7822.bugfix
deleted file mode 100644
index faf249a678..0000000000
--- a/changelog.d/7822.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug causing Synapse to misinterpret the value `off` for `encryption_enabled_by_default_for_room_type` in its configuration file(s) if that value isn't surrounded by quotes. This bug was introduced in v1.16.0.
diff --git a/changelog.d/7827.feature b/changelog.d/7827.feature
deleted file mode 100644
index 0fd116e198..0000000000
--- a/changelog.d/7827.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add the option to validate the `iss` and `aud` claims for JWT logins.
diff --git a/changelog.d/7829.bugfix b/changelog.d/7829.bugfix
deleted file mode 100644
index dcbf385de6..0000000000
--- a/changelog.d/7829.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where we did not always pass in `app_name` or `server_name` to email templates, including e.g. for registration emails.
diff --git a/changelog.d/7830.feature b/changelog.d/7830.feature
deleted file mode 100644
index b4f614084d..0000000000
--- a/changelog.d/7830.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for handling registration requests across multiple client reader workers.
diff --git a/changelog.d/7836.misc b/changelog.d/7836.misc
deleted file mode 100644
index a3a97c7590..0000000000
--- a/changelog.d/7836.misc
+++ /dev/null
@@ -1 +0,0 @@
-Ensure that calls to `json.dumps` are compatible with the standard library json.
diff --git a/changelog.d/7839.docker b/changelog.d/7839.docker
deleted file mode 100644
index cdf3c9631c..0000000000
--- a/changelog.d/7839.docker
+++ /dev/null
@@ -1 +0,0 @@
-Base docker image on Debian Buster rather than Alpine Linux. Contributed by @maquis196.
diff --git a/changelog.d/7842.feature b/changelog.d/7842.feature
deleted file mode 100644
index 727deb01c9..0000000000
--- a/changelog.d/7842.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add an admin API to list the users in a room. Contributed by Awesome Technologies Innovationslabor GmbH.
diff --git a/changelog.d/7844.bugfix b/changelog.d/7844.bugfix
deleted file mode 100644
index ad296f1b3c..0000000000
--- a/changelog.d/7844.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Errors which occur while using the non-standard JWT login now return the proper error: `403 Forbidden` with an error code of `M_FORBIDDEN`.
diff --git a/changelog.d/7846.feature b/changelog.d/7846.feature
deleted file mode 100644
index 997376fe42..0000000000
--- a/changelog.d/7846.feature
+++ /dev/null
@@ -1 +0,0 @@
-Allow email subjects to be customised through Synapse's configuration.
diff --git a/changelog.d/7847.feature b/changelog.d/7847.feature
deleted file mode 100644
index 4b9a8d8569..0000000000
--- a/changelog.d/7847.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add the ability to re-activate an account from the admin API.
diff --git a/changelog.d/7848.misc b/changelog.d/7848.misc
deleted file mode 100644
index d9db1d8357..0000000000
--- a/changelog.d/7848.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove redundant `retry_on_integrity_error` wrapper for event persistence code.
diff --git a/changelog.d/7849.misc b/changelog.d/7849.misc
deleted file mode 100644
index e3296418c1..0000000000
--- a/changelog.d/7849.misc
+++ /dev/null
@@ -1 +0,0 @@
-Consistently use `db_to_json` to convert from database values to JSON objects.
diff --git a/changelog.d/7850.bugfix b/changelog.d/7850.bugfix
deleted file mode 100644
index 5f19a89043..0000000000
--- a/changelog.d/7850.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix "AttributeError: 'str' object has no attribute 'get'" error message when applying per-room message retention policies. The bug was introduced in Synapse 1.7.0.
diff --git a/changelog.d/7851.misc b/changelog.d/7851.misc
deleted file mode 100644
index e5cf540edf..0000000000
--- a/changelog.d/7851.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert E2E keys and room keys handlers to async/await.
diff --git a/changelog.d/7853.misc b/changelog.d/7853.misc
deleted file mode 100644
index b4f614084d..0000000000
--- a/changelog.d/7853.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add support for handling registration requests across multiple client reader workers.
diff --git a/changelog.d/7854.bugfix b/changelog.d/7854.bugfix
deleted file mode 100644
index b11f9dedfe..0000000000
--- a/changelog.d/7854.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse 1.10.0 which could cause a "no create event in auth events" error during room creation.
diff --git a/changelog.d/7855.feature b/changelog.d/7855.feature
deleted file mode 100644
index 2b6a9f0e71..0000000000
--- a/changelog.d/7855.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add experimental support for running multiple pusher workers.
diff --git a/changelog.d/7856.misc b/changelog.d/7856.misc
deleted file mode 100644
index 7d99fb67be..0000000000
--- a/changelog.d/7856.misc
+++ /dev/null
@@ -1 +0,0 @@
-Small performance improvement in typing processing.
diff --git a/changelog.d/7858.misc b/changelog.d/7858.misc
deleted file mode 100644
index 8f0fc2de74..0000000000
--- a/changelog.d/7858.misc
+++ /dev/null
@@ -1 +0,0 @@
-The default value of `filter_timeline_limit` was changed from -1 (no limit) to 100.
diff --git a/changelog.d/7859.bugfix b/changelog.d/7859.bugfix
deleted file mode 100644
index 19cff4b061..0000000000
--- a/changelog.d/7859.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug which allowed empty rooms to be rejoined over federation.
diff --git a/changelog.d/7860.misc b/changelog.d/7860.misc
deleted file mode 100644
index fdd48b955c..0000000000
--- a/changelog.d/7860.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert _base, profile, and _receipts handlers to async/await.
diff --git a/changelog.d/7861.misc b/changelog.d/7861.misc
deleted file mode 100644
index ada616c62f..0000000000
--- a/changelog.d/7861.misc
+++ /dev/null
@@ -1 +0,0 @@
-Optimise queueing of inbound replication commands.
diff --git a/changelog.d/7866.bugfix b/changelog.d/7866.bugfix
deleted file mode 100644
index 6b5c3c4eca..0000000000
--- a/changelog.d/7866.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix 'Unable to find a suitable guest user ID' error when using multiple client_reader workers.
diff --git a/changelog.d/7868.misc b/changelog.d/7868.misc
deleted file mode 100644
index eadef5e4c2..0000000000
--- a/changelog.d/7868.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert synapse.app and federation client to async/await.
diff --git a/changelog.d/7869.feature b/changelog.d/7869.feature
deleted file mode 100644
index 1982049a52..0000000000
--- a/changelog.d/7869.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add experimental support for moving typing off master.
diff --git a/changelog.d/7870.misc b/changelog.d/7870.misc
deleted file mode 100644
index 27cce2f2f9..0000000000
--- a/changelog.d/7870.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some type annotations to `HomeServer` and `BaseHandler`.
diff --git a/changelog.d/7871.misc b/changelog.d/7871.misc
deleted file mode 100644
index 4d398a9f3a..0000000000
--- a/changelog.d/7871.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert device handler to async/await.
diff --git a/changelog.d/7872.bugfix b/changelog.d/7872.bugfix
deleted file mode 100644
index b21f8e1f14..0000000000
--- a/changelog.d/7872.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long standing bug where the tracing of async functions with opentracing was broken.
diff --git a/changelog.d/7877.misc b/changelog.d/7877.misc
deleted file mode 100644
index a62aa0329c..0000000000
--- a/changelog.d/7877.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up `PreserveLoggingContext`.
diff --git a/changelog.d/7878.removal b/changelog.d/7878.removal
deleted file mode 100644
index d5a4066624..0000000000
--- a/changelog.d/7878.removal
+++ /dev/null
@@ -1 +0,0 @@
-Remove unused `synapse_replication_tcp_resource_invalidate_cache` prometheus metric.
diff --git a/changelog.d/7879.feature b/changelog.d/7879.feature
deleted file mode 100644
index c89655f000..0000000000
--- a/changelog.d/7879.feature
+++ /dev/null
@@ -1 +0,0 @@
-Report CPU metrics to prometheus for time spent processing replication commands.
diff --git a/changelog.d/7880.bugfix b/changelog.d/7880.bugfix
deleted file mode 100644
index 356add0996..0000000000
--- a/changelog.d/7880.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix "TypeError in `synapse.notifier`" exceptions.
diff --git a/changelog.d/7881.misc b/changelog.d/7881.misc
deleted file mode 100644
index 6799117099..0000000000
--- a/changelog.d/7881.misc
+++ /dev/null
@@ -1 +0,0 @@
-Change "unknown room version" logging from 'error' to 'warning'.
diff --git a/changelog.d/7882.misc b/changelog.d/7882.misc
deleted file mode 100644
index 9002749335..0000000000
--- a/changelog.d/7882.misc
+++ /dev/null
@@ -1 +0,0 @@
-Stop using `device_max_stream_id` table and just use `device_inbox.stream_id`.
diff --git a/changelog.d/7885.doc b/changelog.d/7885.doc
deleted file mode 100644
index cbe9de4082..0000000000
--- a/changelog.d/7885.doc
+++ /dev/null
@@ -1 +0,0 @@
-Provide instructions on using `register_new_matrix_user` via docker.
diff --git a/changelog.d/7888.misc b/changelog.d/7888.misc
deleted file mode 100644
index 5328d2dcca..0000000000
--- a/changelog.d/7888.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove Ubuntu Eoan from the list of `.deb` packages that we build as it is now end-of-life. Contributed by @gary-kim.
diff --git a/changelog.d/7889.doc b/changelog.d/7889.doc
deleted file mode 100644
index d91f62fd39..0000000000
--- a/changelog.d/7889.doc
+++ /dev/null
@@ -1 +0,0 @@
-Change the sample config postgres user section to use `synapse_user` instead of `synapse` to align with the documentation.
\ No newline at end of file
diff --git a/changelog.d/7890.misc b/changelog.d/7890.misc
deleted file mode 100644
index 8c127084bc..0000000000
--- a/changelog.d/7890.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix typo in generated config file. Contributed by @ThiefMaster.
diff --git a/changelog.d/7892.misc b/changelog.d/7892.misc
deleted file mode 100644
index ef4cfa04fd..0000000000
--- a/changelog.d/7892.misc
+++ /dev/null
@@ -1 +0,0 @@
-Import ABC from `collections.abc` for Python 3.10 compatibility.
diff --git a/changelog.d/7895.bugfix b/changelog.d/7895.bugfix
deleted file mode 100644
index 1ae7f8ca7c..0000000000
--- a/changelog.d/7895.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix deprecation warning due to invalid escape sequences.
\ No newline at end of file
diff --git a/changelog.d/7897.misc b/changelog.d/7897.misc
deleted file mode 100644
index 77772533fd..0000000000
--- a/changelog.d/7897.misc
+++ /dev/null
@@ -1,2 +0,0 @@
-Remove unused functions `time_function`, `trace_function`, `get_previous_frames`
-and `get_previous_frame` from `synapse.logging.utils` module.
\ No newline at end of file
diff --git a/changelog.d/7899.doc b/changelog.d/7899.doc
new file mode 100644
index 0000000000..847c2cb62c
--- /dev/null
+++ b/changelog.d/7899.doc
@@ -0,0 +1 @@
+Document how to set up a Client Well-Known file and fix several pieces of outdated documentation.
diff --git a/changelog.d/7902.feature b/changelog.d/7902.feature
new file mode 100644
index 0000000000..4feae8cc29
--- /dev/null
+++ b/changelog.d/7902.feature
@@ -0,0 +1 @@
+Add option to allow server admins to join rooms which fail complexity checks. Contributed by @lugino-emeritus.
diff --git a/changelog.d/7912.misc b/changelog.d/7912.misc
deleted file mode 100644
index d619590070..0000000000
--- a/changelog.d/7912.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert `RoomListHandler` to async/await.
diff --git a/changelog.d/7914.misc b/changelog.d/7914.misc
deleted file mode 100644
index 710553249c..0000000000
--- a/changelog.d/7914.misc
+++ /dev/null
@@ -1 +0,0 @@
-Lint the `contrib/` directory in CI and linting scripts, add `synctl` to the linting script for consistency with CI.
diff --git a/changelog.d/7919.misc b/changelog.d/7919.misc
deleted file mode 100644
index addaa35183..0000000000
--- a/changelog.d/7919.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use Element CSS and logo in notification emails when app name is Element.
diff --git a/changelog.d/7927.misc b/changelog.d/7927.misc
deleted file mode 100644
index 3b864da03d..0000000000
--- a/changelog.d/7927.misc
+++ /dev/null
@@ -1 +0,0 @@
-Optimisation to /sync handling: skip serializing the response if the client has already disconnected.
diff --git a/changelog.d/7929.misc b/changelog.d/7929.misc
deleted file mode 100644
index d72856fe03..0000000000
--- a/changelog.d/7929.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add debugging to `/sync` response generation (disabled by default).
diff --git a/changelog.d/7936.misc b/changelog.d/7936.misc
new file mode 100644
index 0000000000..4304bbdd25
--- /dev/null
+++ b/changelog.d/7936.misc
@@ -0,0 +1 @@
+Switch to the JSON implementation from the standard library and bump the minimum version of the canonicaljson library to 1.2.0.
diff --git a/changelog.d/7947.misc b/changelog.d/7947.misc
new file mode 100644
index 0000000000..dfe4c03171
--- /dev/null
+++ b/changelog.d/7947.misc
@@ -0,0 +1 @@
+Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7948.misc b/changelog.d/7948.misc
new file mode 100644
index 0000000000..dfe4c03171
--- /dev/null
+++ b/changelog.d/7948.misc
@@ -0,0 +1 @@
+Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7949.misc b/changelog.d/7949.misc
new file mode 100644
index 0000000000..dfe4c03171
--- /dev/null
+++ b/changelog.d/7949.misc
@@ -0,0 +1 @@
+Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7951.misc b/changelog.d/7951.misc
new file mode 100644
index 0000000000..dfe4c03171
--- /dev/null
+++ b/changelog.d/7951.misc
@@ -0,0 +1 @@
+Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7952.misc b/changelog.d/7952.misc
new file mode 100644
index 0000000000..93c25cb386
--- /dev/null
+++ b/changelog.d/7952.misc
@@ -0,0 +1 @@
+Move some database-related log lines from the default logger to the database/transaction loggers.
\ No newline at end of file
diff --git a/changelog.d/7963.misc b/changelog.d/7963.misc
new file mode 100644
index 0000000000..dfe4c03171
--- /dev/null
+++ b/changelog.d/7963.misc
@@ -0,0 +1 @@
+Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7964.feature b/changelog.d/7964.feature
new file mode 100644
index 0000000000..ffe861650c
--- /dev/null
+++ b/changelog.d/7964.feature
@@ -0,0 +1 @@
+Add an option to purge room or not with delete room admin endpoint (`POST /_synapse/admin/v1/rooms/<room_id>/delete`). Contributed by @dklimpel.
\ No newline at end of file
diff --git a/changelog.d/7965.misc b/changelog.d/7965.misc
new file mode 100644
index 0000000000..ee9f1a7114
--- /dev/null
+++ b/changelog.d/7965.misc
@@ -0,0 +1 @@
+Add a script to detect source code files using non-unix line terminators.
\ No newline at end of file
diff --git a/changelog.d/7970.misc b/changelog.d/7970.misc
new file mode 100644
index 0000000000..ee9f1a7114
--- /dev/null
+++ b/changelog.d/7970.misc
@@ -0,0 +1 @@
+Add a script to detect source code files using non-unix line terminators.
\ No newline at end of file
diff --git a/changelog.d/7971.misc b/changelog.d/7971.misc
new file mode 100644
index 0000000000..87a4eb1f4d
--- /dev/null
+++ b/changelog.d/7971.misc
@@ -0,0 +1 @@
+Log the SAML session ID during creation.
diff --git a/changelog.d/7973.misc b/changelog.d/7973.misc
new file mode 100644
index 0000000000..dfe4c03171
--- /dev/null
+++ b/changelog.d/7973.misc
@@ -0,0 +1 @@
+Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7975.misc b/changelog.d/7975.misc
new file mode 100644
index 0000000000..dfe4c03171
--- /dev/null
+++ b/changelog.d/7975.misc
@@ -0,0 +1 @@
+Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7976.misc b/changelog.d/7976.misc
new file mode 100644
index 0000000000..dfe4c03171
--- /dev/null
+++ b/changelog.d/7976.misc
@@ -0,0 +1 @@
+Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7978.bugfix b/changelog.d/7978.bugfix
new file mode 100644
index 0000000000..247b18db20
--- /dev/null
+++ b/changelog.d/7978.bugfix
@@ -0,0 +1 @@
+Fix a long standing bug: 'Duplicate key value violates unique constraint "event_relations_id"' when message retention is configured.
diff --git a/changelog.d/7979.misc b/changelog.d/7979.misc
new file mode 100644
index 0000000000..4304bbdd25
--- /dev/null
+++ b/changelog.d/7979.misc
@@ -0,0 +1 @@
+Switch to the JSON implementation from the standard library and bump the minimum version of the canonicaljson library to 1.2.0.
diff --git a/changelog.d/7980.bugfix b/changelog.d/7980.bugfix
new file mode 100644
index 0000000000..fa351b4b77
--- /dev/null
+++ b/changelog.d/7980.bugfix
@@ -0,0 +1 @@
+Fix "no create event in auth events" when trying to reject invitation after inviter leaves. Bug introduced in Synapse v1.10.0.
diff --git a/changelog.d/7981.misc b/changelog.d/7981.misc
new file mode 100644
index 0000000000..dfe4c03171
--- /dev/null
+++ b/changelog.d/7981.misc
@@ -0,0 +1 @@
+Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7990.doc b/changelog.d/7990.doc
new file mode 100644
index 0000000000..8d8fd926e9
--- /dev/null
+++ b/changelog.d/7990.doc
@@ -0,0 +1 @@
+Improve workers docs.
diff --git a/changelog.d/7992.doc b/changelog.d/7992.doc
new file mode 100644
index 0000000000..3368fb5912
--- /dev/null
+++ b/changelog.d/7992.doc
@@ -0,0 +1 @@
+Fix typo in `docs/workers.md`.
diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py
index 77422f5e5d..dfc1d294dc 100755
--- a/contrib/cmdclient/console.py
+++ b/contrib/cmdclient/console.py
@@ -609,13 +609,15 @@ class SynapseCmd(cmd.Cmd):
 
     @defer.inlineCallbacks
     def _do_event_stream(self, timeout):
-        res = yield self.http_client.get_json(
-            self._url() + "/events",
-            {
-                "access_token": self._tok(),
-                "timeout": str(timeout),
-                "from": self.event_stream_token,
-            },
+        res = yield defer.ensureDeferred(
+            self.http_client.get_json(
+                self._url() + "/events",
+                {
+                    "access_token": self._tok(),
+                    "timeout": str(timeout),
+                    "from": self.event_stream_token,
+                },
+            )
         )
         print(json.dumps(res, indent=4))
 
diff --git a/debian/changelog b/debian/changelog
index 3825603ae4..a0af2b78a8 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,19 @@
+matrix-synapse-py3 (1.xx.0) stable; urgency=medium
+
+  [ Synapse Packaging team ]
+  * New synapse release 1.xx.0.
+
+  [ Aaron Raimist ]
+  * Fix outdated documentation for SYNAPSE_CACHE_FACTOR
+
+ -- Synapse Packaging team <packages@matrix.org>  XXXXX
+
+matrix-synapse-py3 (1.18.0) stable; urgency=medium
+
+  * New synapse release 1.18.0.
+
+ -- Synapse Packaging team <packages@matrix.org>  Thu, 30 Jul 2020 10:55:53 +0100
+
 matrix-synapse-py3 (1.17.0) stable; urgency=medium
 
   * New synapse release 1.17.0.
diff --git a/debian/matrix-synapse.default b/debian/matrix-synapse.default
index 65dc2f33d8..f402d73bbf 100644
--- a/debian/matrix-synapse.default
+++ b/debian/matrix-synapse.default
@@ -1,2 +1,2 @@
 # Specify environment variables used when running Synapse
-# SYNAPSE_CACHE_FACTOR=1 (default)
+# SYNAPSE_CACHE_FACTOR=0.5 (default)
diff --git a/debian/synctl.ronn b/debian/synctl.ronn
index a73c832f62..1bad6094f3 100644
--- a/debian/synctl.ronn
+++ b/debian/synctl.ronn
@@ -46,19 +46,20 @@ Configuration file may be generated as follows:
 ## ENVIRONMENT
 
   * `SYNAPSE_CACHE_FACTOR`:
-    Synapse's architecture is quite RAM hungry currently - a lot of
-    recent room data and metadata is deliberately cached in RAM in
-    order to speed up common requests.  This will be improved in
-    future, but for now the easiest way to either reduce the RAM usage
-    (at the risk of slowing things down) is to set the
-    SYNAPSE_CACHE_FACTOR environment variable. Roughly speaking, a
-    SYNAPSE_CACHE_FACTOR of 1.0 will max out at around 3-4GB of
-    resident memory - this is what we currently run the matrix.org
-    on. The default setting is currently 0.1, which is probably around
-    a ~700MB footprint. You can dial it down further to 0.02 if
-    desired, which targets roughly ~512MB. Conversely you can dial it
-    up if you need performance for lots of users and have a box with a
-    lot of RAM.
+    Synapse's architecture is quite RAM hungry currently - we deliberately
+    cache a lot of recent room data and metadata in RAM in order to speed up
+    common requests. We'll improve this in the future, but for now the easiest
+    way to either reduce the RAM usage (at the risk of slowing things down)
+    is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
+    variable. The default is 0.5, which can be decreased to reduce RAM usage
+    in memory constrained enviroments, or increased if performance starts to
+    degrade.
+
+    However, degraded performance due to a low cache factor, common on
+    machines with slow disks, often leads to explosions in memory use due
+    backlogged requests. In this case, reducing the cache factor will make
+    things worse. Instead, try increasing it drastically. 2.0 is a good
+    starting value.
 
 ## COPYRIGHT
 
diff --git a/docs/.sample_config_header.yaml b/docs/.sample_config_header.yaml
index 35a591d042..8c9b31acdb 100644
--- a/docs/.sample_config_header.yaml
+++ b/docs/.sample_config_header.yaml
@@ -10,5 +10,16 @@
 # homeserver.yaml. Instead, if you are starting from scratch, please generate
 # a fresh config using Synapse by following the instructions in INSTALL.md.
 
+# Configuration options that take a time period can be set using a number
+# followed by a letter. Letters have the following meanings:
+# s = second
+# m = minute
+# h = hour
+# d = day
+# w = week
+# y = year
+# For example, setting redaction_retention_period: 5m would remove redacted
+# messages from the database after 5 minutes, rather than 5 months.
+
 ################################################################################
 
diff --git a/docs/ACME.md b/docs/ACME.md
index f4c4740476..a7a498f575 100644
--- a/docs/ACME.md
+++ b/docs/ACME.md
@@ -12,13 +12,14 @@ introduced support for automatically provisioning certificates through
 In [March 2019](https://community.letsencrypt.org/t/end-of-life-plan-for-acmev1/88430),
 Let's Encrypt announced that they were deprecating version 1 of the ACME
 protocol, with the plan to disable the use of it for new accounts in
-November 2019, and for existing accounts in June 2020.
+November 2019, for new domains in June 2020, and for existing accounts and
+domains in June 2021.
 
 Synapse doesn't currently support version 2 of the ACME protocol, which
 means that:
 
 * for existing installs, Synapse's built-in ACME support will continue
-  to work until June 2020.
+  to work until June 2021.
 * for new installs, this feature will not work at all.
 
 Either way, it is recommended to move from Synapse's ACME support
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
index 15b83e9824..0f267d2b7b 100644
--- a/docs/admin_api/rooms.md
+++ b/docs/admin_api/rooms.md
@@ -369,7 +369,9 @@ to the new room will have power level `-10` by default, and thus be unable to sp
 If `block` is `True` it prevents new joins to the old room.
 
 This API will remove all trace of the old room from your database after removing
-all local users.
+all local users. If `purge` is `true` (the default), all traces of the old room will
+be removed from your database after removing all local users. If you do not want
+this to happen, set `purge` to `false`.
 Depending on the amount of history being purged a call to the API may take
 several minutes or longer.
 
@@ -388,7 +390,8 @@ with a body of:
     "new_room_user_id": "@someuser:example.com",
     "room_name": "Content Violation Notification",
     "message": "Bad Room has been shutdown due to content violations on this server. Please review our Terms of Service.",
-    "block": true
+    "block": true,
+    "purge": true
 }
 ```
 
@@ -430,8 +433,10 @@ The following JSON body parameters are available:
               `new_room_user_id` in the new room. Ideally this will clearly convey why the
                original room was shut down. Defaults to `Sharing illegal content on this server
                is not permitted and rooms in violation will be blocked.`
-* `block` - Optional. If set to `true`, this room will be added to a blocking list, preventing future attempts to
-  join the room. Defaults to `false`.
+* `block` - Optional. If set to `true`, this room will be added to a blocking list, preventing
+            future attempts to join the room. Defaults to `false`.
+* `purge` - Optional. If set to `true`, it will remove all traces of the room from your database.
+            Defaults to `true`.
 
 The JSON body must not be empty. The body must be at least `{}`.
 
diff --git a/docs/metrics-howto.md b/docs/metrics-howto.md
index cf69938a2a..b386ec91c1 100644
--- a/docs/metrics-howto.md
+++ b/docs/metrics-howto.md
@@ -27,7 +27,7 @@
     different thread to Synapse. This can make it more resilient to
     heavy load meaning metrics cannot be retrieved, and can be exposed
     to just internal networks easier. The served metrics are available
-    over HTTP only, and will be available at `/`.
+    over HTTP only, and will be available at `/_synapse/metrics`.
 
     Add a new listener to homeserver.yaml:
 
diff --git a/docs/password_auth_providers.md b/docs/password_auth_providers.md
index 5d9ae67041..fef1d47e85 100644
--- a/docs/password_auth_providers.md
+++ b/docs/password_auth_providers.md
@@ -19,102 +19,103 @@ password auth provider module implementations:
 
 Password auth provider classes must provide the following methods:
 
-*class* `SomeProvider.parse_config`(*config*)
+* `parse_config(config)`
+  This method is passed the `config` object for this module from the
+  homeserver configuration file.
 
-> This method is passed the `config` object for this module from the
-> homeserver configuration file.
->
-> It should perform any appropriate sanity checks on the provided
-> configuration, and return an object which is then passed into
-> `__init__`.
+  It should perform any appropriate sanity checks on the provided
+  configuration, and return an object which is then passed into
 
-*class* `SomeProvider`(*config*, *account_handler*)
+  This method should have the `@staticmethod` decoration.
 
-> The constructor is passed the config object returned by
-> `parse_config`, and a `synapse.module_api.ModuleApi` object which
-> allows the password provider to check if accounts exist and/or create
-> new ones.
+* `__init__(self, config, account_handler)`
+
+  The constructor is passed the config object returned by
+  `parse_config`, and a `synapse.module_api.ModuleApi` object which
+  allows the password provider to check if accounts exist and/or create
+  new ones.
 
 ## Optional methods
 
-Password auth provider classes may optionally provide the following
-methods.
-
-*class* `SomeProvider.get_db_schema_files`()
-
-> This method, if implemented, should return an Iterable of
-> `(name, stream)` pairs of database schema files. Each file is applied
-> in turn at initialisation, and a record is then made in the database
-> so that it is not re-applied on the next start.
-
-`someprovider.get_supported_login_types`()
-
-> This method, if implemented, should return a `dict` mapping from a
-> login type identifier (such as `m.login.password`) to an iterable
-> giving the fields which must be provided by the user in the submission
-> to the `/login` api. These fields are passed in the `login_dict`
-> dictionary to `check_auth`.
->
-> For example, if a password auth provider wants to implement a custom
-> login type of `com.example.custom_login`, where the client is expected
-> to pass the fields `secret1` and `secret2`, the provider should
-> implement this method and return the following dict:
->
->     {"com.example.custom_login": ("secret1", "secret2")}
-
-`someprovider.check_auth`(*username*, *login_type*, *login_dict*)
-
-> This method is the one that does the real work. If implemented, it
-> will be called for each login attempt where the login type matches one
-> of the keys returned by `get_supported_login_types`.
->
-> It is passed the (possibly UNqualified) `user` provided by the client,
-> the login type, and a dictionary of login secrets passed by the
-> client.
->
-> The method should return a Twisted `Deferred` object, which resolves
-> to the canonical `@localpart:domain` user id if authentication is
-> successful, and `None` if not.
->
-> Alternatively, the `Deferred` can resolve to a `(str, func)` tuple, in
-> which case the second field is a callback which will be called with
-> the result from the `/login` call (including `access_token`,
-> `device_id`, etc.)
-
-`someprovider.check_3pid_auth`(*medium*, *address*, *password*)
-
-> This method, if implemented, is called when a user attempts to
-> register or log in with a third party identifier, such as email. It is
-> passed the medium (ex. "email"), an address (ex.
-> "<jdoe@example.com>") and the user's password.
->
-> The method should return a Twisted `Deferred` object, which resolves
-> to a `str` containing the user's (canonical) User ID if
-> authentication was successful, and `None` if not.
->
-> As with `check_auth`, the `Deferred` may alternatively resolve to a
-> `(user_id, callback)` tuple.
-
-`someprovider.check_password`(*user_id*, *password*)
-
-> This method provides a simpler interface than
-> `get_supported_login_types` and `check_auth` for password auth
-> providers that just want to provide a mechanism for validating
-> `m.login.password` logins.
->
-> Iif implemented, it will be called to check logins with an
-> `m.login.password` login type. It is passed a qualified
-> `@localpart:domain` user id, and the password provided by the user.
->
-> The method should return a Twisted `Deferred` object, which resolves
-> to `True` if authentication is successful, and `False` if not.
-
-`someprovider.on_logged_out`(*user_id*, *device_id*, *access_token*)
-
-> This method, if implemented, is called when a user logs out. It is
-> passed the qualified user ID, the ID of the deactivated device (if
-> any: access tokens are occasionally created without an associated
-> device ID), and the (now deactivated) access token.
->
-> It may return a Twisted `Deferred` object; the logout request will
-> wait for the deferred to complete but the result is ignored.
+Password auth provider classes may optionally provide the following methods:
+
+* `get_db_schema_files(self)`
+
+  This method, if implemented, should return an Iterable of
+  `(name, stream)` pairs of database schema files. Each file is applied
+  in turn at initialisation, and a record is then made in the database
+  so that it is not re-applied on the next start.
+
+* `get_supported_login_types(self)`
+
+  This method, if implemented, should return a `dict` mapping from a
+  login type identifier (such as `m.login.password`) to an iterable
+  giving the fields which must be provided by the user in the submission
+  to [the `/login` API](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-login).
+  These fields are passed in the `login_dict` dictionary to `check_auth`.
+
+  For example, if a password auth provider wants to implement a custom
+  login type of `com.example.custom_login`, where the client is expected
+  to pass the fields `secret1` and `secret2`, the provider should
+  implement this method and return the following dict:
+
+  ```python
+  {"com.example.custom_login": ("secret1", "secret2")}
+  ```
+
+* `check_auth(self, username, login_type, login_dict)`
+
+  This method does the real work. If implemented, it
+  will be called for each login attempt where the login type matches one
+  of the keys returned by `get_supported_login_types`.
+
+  It is passed the (possibly unqualified) `user` field provided by the client,
+  the login type, and a dictionary of login secrets passed by the
+  client.
+
+  The method should return an `Awaitable` object, which resolves
+  to the canonical `@localpart:domain` user ID if authentication is
+  successful, and `None` if not.
+
+  Alternatively, the `Awaitable` can resolve to a `(str, func)` tuple, in
+  which case the second field is a callback which will be called with
+  the result from the `/login` call (including `access_token`,
+  `device_id`, etc.)
+
+* `check_3pid_auth(self, medium, address, password)`
+
+  This method, if implemented, is called when a user attempts to
+  register or log in with a third party identifier, such as email. It is
+  passed the medium (ex. "email"), an address (ex.
+  "<jdoe@example.com>") and the user's password.
+
+  The method should return an `Awaitable` object, which resolves
+  to a `str` containing the user's (canonical) User id if
+  authentication was successful, and `None` if not.
+
+  As with `check_auth`, the `Awaitable` may alternatively resolve to a
+  `(user_id, callback)` tuple.
+
+* `check_password(self, user_id, password)`
+
+  This method provides a simpler interface than
+  `get_supported_login_types` and `check_auth` for password auth
+  providers that just want to provide a mechanism for validating
+  `m.login.password` logins.
+
+  If implemented, it will be called to check logins with an
+  `m.login.password` login type. It is passed a qualified
+  `@localpart:domain` user id, and the password provided by the user.
+
+  The method should return an `Awaitable` object, which resolves
+  to `True` if authentication is successful, and `False` if not.
+
+* `on_logged_out(self, user_id, device_id, access_token)`
+
+  This method, if implemented, is called when a user logs out. It is
+  passed the qualified user ID, the ID of the deactivated device (if
+  any: access tokens are occasionally created without an associated
+  device ID), and the (now deactivated) access token.
+
+  It may return an `Awaitable` object; the logout request will
+  wait for the `Awaitable` to complete, but the result is ignored.
diff --git a/docs/postgres.md b/docs/postgres.md
index 70fe29cdcc..e71a1975d8 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -188,6 +188,9 @@ to do step 2.
 
 It is safe to at any time kill the port script and restart it.
 
+Note that the database may take up significantly more (25% - 100% more)
+space on disk after porting to Postgres.
+
 ### Using the port script
 
 Firstly, shut down the currently running synapse server and copy its
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 3227294e0b..341bd2f858 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -10,6 +10,17 @@
 # homeserver.yaml. Instead, if you are starting from scratch, please generate
 # a fresh config using Synapse by following the instructions in INSTALL.md.
 
+# Configuration options that take a time period can be set using a number
+# followed by a letter. Letters have the following meanings:
+# s = second
+# m = minute
+# h = hour
+# d = day
+# w = week
+# y = year
+# For example, setting redaction_retention_period: 5m would remove redacted
+# messages from the database after 5 minutes, rather than 5 months.
+
 ################################################################################
 
 # Configuration file for Synapse.
@@ -314,6 +325,10 @@ limit_remote_rooms:
   #
   #complexity_error: "This room is too complex."
 
+  # allow server admins to join complex rooms. Default is false.
+  #
+  #admins_can_join: true
+
 # Whether to require a user to be in the room to add an alias to it.
 # Defaults to 'true'.
 #
@@ -1145,24 +1160,6 @@ account_validity:
 #
 #default_identity_server: https://matrix.org
 
-# The list of identity servers trusted to verify third party
-# identifiers by this server.
-#
-# Also defines the ID server which will be called when an account is
-# deactivated (one will be picked arbitrarily).
-#
-# Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity
-# server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a
-# background migration script, informing itself that the identity server all of its
-# 3PIDs have been bound to is likely one of the below.
-#
-# As of Synapse v1.4.0, all other functionality of this option has been deprecated, and
-# it is now solely used for the purposes of the background migration script, and can be
-# removed once it has run.
-#trusted_third_party_id_servers:
-#  - matrix.org
-#  - vector.im
-
 # Handle threepid (email/phone etc) registration and password resets through a set of
 # *trusted* identity servers. Note that this allows the configured identity server to
 # reset passwords for accounts!
@@ -2398,3 +2395,57 @@ opentracing:
     #
     #  logging:
     #    false
+
+
+## Workers ##
+
+# Disables sending of outbound federation transactions on the main process.
+# Uncomment if using a federation sender worker.
+#
+#send_federation: false
+
+# It is possible to run multiple federation sender workers, in which case the
+# work is balanced across them.
+#
+# This configuration must be shared between all federation sender workers, and if
+# changed all federation sender workers must be stopped at the same time and then
+# started, to ensure that all instances are running with the same config (otherwise
+# events may be dropped).
+#
+#federation_sender_instances:
+#  - federation_sender1
+
+# When using workers this should be a map from `worker_name` to the
+# HTTP replication listener of the worker, if configured.
+#
+#instance_map:
+#  worker1:
+#    host: localhost
+#    port: 8034
+
+# Experimental: When using workers you can define which workers should
+# handle event persistence and typing notifications. Any worker
+# specified here must also be in the `instance_map`.
+#
+#stream_writers:
+#  events: worker1
+#  typing: worker1
+
+
+# Configuration for Redis when using workers. This *must* be enabled when
+# using workers (unless using old style direct TCP configuration).
+#
+redis:
+  # Uncomment the below to enable Redis support.
+  #
+  #enabled: true
+
+  # Optional host and port to use to connect to redis. Defaults to
+  # localhost and 6379
+  #
+  #host: localhost
+  #port: 6379
+
+  # Optional password if configured on the Redis instance
+  #
+  #password: <secret_password>
diff --git a/docs/synctl_workers.md b/docs/synctl_workers.md
new file mode 100644
index 0000000000..8da4a31852
--- /dev/null
+++ b/docs/synctl_workers.md
@@ -0,0 +1,32 @@
+### Using synctl with workers
+
+If you want to use `synctl` to manage your synapse processes, you will need to
+create an an additional configuration file for the main synapse process. That
+configuration should look like this:
+
+```yaml
+worker_app: synapse.app.homeserver
+```
+
+Additionally, each worker app must be configured with the name of a "pid file",
+to which it will write its process ID when it starts. For example, for a
+synchrotron, you might write:
+
+```yaml
+worker_pid_file: /home/matrix/synapse/worker1.pid
+```
+
+Finally, to actually run your worker-based synapse, you must pass synctl the `-a`
+commandline option to tell it to operate on all the worker configurations found
+in the given directory, e.g.:
+
+    synctl -a $CONFIG/workers start
+
+Currently one should always restart all workers when restarting or upgrading
+synapse, unless you explicitly know it's safe not to.  For instance, restarting
+synapse without restarting all the synchrotrons may result in broken typing
+notifications.
+
+To manipulate a specific worker, you pass the -w option to synctl:
+
+    synctl -w $CONFIG/workers/worker1.yaml restart
diff --git a/docs/workers.md b/docs/workers.md
index f4cbbc0400..80b65a0cec 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -1,10 +1,10 @@
 # Scaling synapse via workers
 
-For small instances it recommended to run Synapse in monolith mode (the
-default). For larger instances where performance is a concern it can be helpful
-to split out functionality into multiple separate python processes. These
-processes are called 'workers', and are (eventually) intended to scale
-horizontally independently.
+For small instances it recommended to run Synapse in the default monolith mode.
+For larger instances where performance is a concern it can be helpful to split
+out functionality into multiple separate python processes. These processes are
+called 'workers', and are (eventually) intended to scale horizontally
+independently.
 
 Synapse's worker support is under active development and subject to change as
 we attempt to rapidly scale ever larger Synapse instances. However we are
@@ -16,69 +16,115 @@ workers only work with PostgreSQL-based Synapse deployments. SQLite should only
 be used for demo purposes and any admin considering workers should already be
 running PostgreSQL.
 
-## Master/worker communication
+## Main process/worker communication
+
+The processes communicate with each other via a Synapse-specific protocol called
+'replication' (analogous to MySQL- or Postgres-style database replication) which
+feeds streams of newly written data between processes so they can be kept in
+sync with the database state.
+
+When configured to do so, Synapse uses a 
+[Redis pub/sub channel](https://redis.io/topics/pubsub) to send the replication
+stream between all configured Synapse processes. Additionally, processes may
+make HTTP requests to each other, primarily for operations which need to wait
+for a reply ─ such as sending an event.
+
+Redis support was added in v1.13.0 with it becoming the recommended method in
+v1.18.0. It replaced the old direct TCP connections (which is deprecated as of
+v1.18.0) to the main process. With Redis, rather than all the workers connecting
+to the main process, all the workers and the main process connect to Redis,
+which relays replication commands between processes. This can give a significant
+cpu saving on the main process and will be a prerequisite for upcoming
+performance improvements.
+
+See the [Architectural diagram](#architectural-diagram) section at the end for
+a visualisation of what this looks like.
+
 
-The workers communicate with the master process via a Synapse-specific protocol
-called 'replication' (analogous to MySQL- or Postgres-style database
-replication) which feeds a stream of relevant data from the master to the
-workers so they can be kept in sync with the master process and database state.
+## Setting up workers
 
-Additionally, workers may make HTTP requests to the master, to send information
-in the other direction. Typically this is used for operations which need to
-wait for a reply - such as sending an event.
+A Redis server is required to manage the communication between the processes.
+The Redis server should be installed following the normal procedure for your
+distribution (e.g. `apt install redis-server` on Debian). It is safe to use an
+existing Redis deployment if you have one.
 
-## Configuration
+Once installed, check that Redis is running and accessible from the host running
+Synapse, for example by executing `echo PING | nc -q1 localhost 6379` and seeing
+a response of `+PONG`.
+
+The appropriate dependencies must also be installed for Synapse. If using a
+virtualenv, these can be installed with:
+
+```sh
+pip install matrix-synapse[redis]
+```
+
+Note that these dependencies are included when synapse is installed with `pip
+install matrix-synapse[all]`. They are also included in the debian packages from
+`matrix.org` and in the docker images at
+https://hub.docker.com/r/matrixdotorg/synapse/.
 
 To make effective use of the workers, you will need to configure an HTTP
 reverse-proxy such as nginx or haproxy, which will direct incoming requests to
-the correct worker, or to the main synapse instance. Note that this includes
-requests made to the federation port. See [reverse_proxy.md](reverse_proxy.md)
-for information on setting up a reverse proxy.
+the correct worker, or to the main synapse instance. See 
+[reverse_proxy.md](reverse_proxy.md) for information on setting up a reverse
+proxy.
+
+To enable workers you should create a configuration file for each worker
+process. Each worker configuration file inherits the configuration of the shared
+homeserver configuration file.  You can then override configuration specific to
+that worker, e.g. the HTTP listener that it provides (if any); logging
+configuration; etc.  You should minimise the number of overrides though to
+maintain a usable config.
+
+
+### Shared Configuration
 
-To enable workers, you need to add *two* replication listeners to the
-main Synapse configuration file (`homeserver.yaml`). For example:
+Next you need to add both a HTTP replication listener, used for HTTP requests
+between processes, and redis config to the shared Synapse configuration file
+(`homeserver.yaml`). For example:
 
 ```yaml
+# extend the existing `listeners` section. This defines the ports that the
+# main process will listen on.
 listeners:
-  # The TCP replication port
-  - port: 9092
-    bind_address: '127.0.0.1'
-    type: replication
-
   # The HTTP replication port
   - port: 9093
     bind_address: '127.0.0.1'
     type: http
     resources:
      - names: [replication]
+
+redis:
+    enabled: true
 ```
 
-Under **no circumstances** should these replication API listeners be exposed to
-the public internet; they have no authentication and are unencrypted.
+See the sample config for the full documentation of each option.
 
-You should then create a set of configs for the various worker processes.  Each
-worker configuration file inherits the configuration of the main homeserver
-configuration file.  You can then override configuration specific to that
-worker, e.g. the HTTP listener that it provides (if any); logging
-configuration; etc.  You should minimise the number of overrides though to
-maintain a usable config.
+Under **no circumstances** should the replication listener be exposed to the
+public internet; it has no authentication and is unencrypted.
+
+
+### Worker Configuration
 
 In the config file for each worker, you must specify the type of worker
-application (`worker_app`). The currently available worker applications are
-listed below. You must also specify the replication endpoints that it should
-talk to on the main synapse process.  `worker_replication_host` should specify
-the host of the main synapse, `worker_replication_port` should point to the TCP
-replication listener port and `worker_replication_http_port` should point to
-the HTTP replication port.
+application (`worker_app`), and you should specify a unqiue name for the worker
+(`worker_name`). The currently available worker applications are listed below.
+You must also specify the HTTP replication endpoint that it should talk to on
+the main synapse process.  `worker_replication_host` should specify the host of
+the main synapse and `worker_replication_http_port` should point to the HTTP
+replication port. If the worker will handle HTTP requests then the
+`worker_listeners` option should be set with a `http` listener, in the same way
+as the `listeners` option in the shared config.
 
 For example:
 
 ```yaml
-worker_app: synapse.app.synchrotron
+worker_app: synapse.app.generic_worker
+worker_name: worker1
 
-# The replication listener on the synapse to talk to.
+# The replication listener on the main synapse process.
 worker_replication_host: 127.0.0.1
-worker_replication_port: 9092
 worker_replication_http_port: 9093
 
 worker_listeners:
@@ -87,13 +133,14 @@ worker_listeners:
    resources:
      - names:
        - client
+       - federation
 
-worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
+worker_log_config: /home/matrix/synapse/config/worker1_log_config.yaml
 ```
 
-...is a full configuration for a synchrotron worker instance, which will expose a
-plain HTTP `/sync` endpoint on port 8083 separately from the `/sync` endpoint provided
-by the main synapse.
+...is a full configuration for a generic worker instance, which will expose a
+plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
+`/sync`, which are listed below.
 
 Obviously you should configure your reverse-proxy to route the relevant
 endpoints to the worker (`localhost:8083` in the above example).
@@ -102,127 +149,24 @@ Finally, you need to start your worker processes. This can be done with either
 `synctl` or your distribution's preferred service manager such as `systemd`. We
 recommend the use of `systemd` where available: for information on setting up
 `systemd` to start synapse workers, see
-[systemd-with-workers](systemd-with-workers). To use `synctl`, see below.
+[systemd-with-workers](systemd-with-workers). To use `synctl`, see
+[synctl_workers.md](synctl_workers.md).
 
-### **Experimental** support for replication over redis
-
-As of Synapse v1.13.0, it is possible to configure Synapse to send replication
-via a [Redis pub/sub channel](https://redis.io/topics/pubsub). This is an
-alternative to direct TCP connections to the master: rather than all the
-workers connecting to the master, all the workers and the master connect to
-Redis, which relays replication commands between processes. This can give a
-significant cpu saving on the master and will be a prerequisite for upcoming
-performance improvements.
-
-Note that this support is currently experimental; you may experience lost
-messages and similar problems! It is strongly recommended that admins setting
-up workers for the first time use direct TCP replication as above.
-
-To configure Synapse to use Redis:
-
-1. Install Redis following the normal procedure for your distribution - for
-   example, on Debian, `apt install redis-server`. (It is safe to use an
-   existing Redis deployment if you have one: we use a pub/sub stream named
-   according to the `server_name` of your synapse server.)
-2. Check Redis is running and accessible: you should be able to `echo PING | nc -q1
-   localhost 6379` and get a response of `+PONG`.
-3. Install the python prerequisites. If you installed synapse into a
-   virtualenv, this can be done with:
-   ```sh
-   pip install matrix-synapse[redis]
-   ```
-   The debian packages from matrix.org already include the required
-   dependencies.
-4. Add config to the shared configuration (`homeserver.yaml`):
-    ```yaml
-    redis:
-      enabled: true
-    ```
-    Optional parameters which can go alongside `enabled` are `host`, `port`,
-    `password`. Normally none of these are required.
-5. Restart master and all workers.
-
-Once redis replication is in use, `worker_replication_port` is redundant and
-can be removed from the worker configuration files. Similarly, the
-configuration for the `listener` for the TCP replication port can be removed
-from the main configuration file. Note that the HTTP replication port is
-still required.
-
-### Using synctl
-
-If you want to use `synctl` to manage your synapse processes, you will need to
-create an an additional configuration file for the master synapse process. That
-configuration should look like this:
-
-```yaml
-worker_app: synapse.app.homeserver
-```
-
-Additionally, each worker app must be configured with the name of a "pid file",
-to which it will write its process ID when it starts. For example, for a
-synchrotron, you might write:
-
-```yaml
-worker_pid_file: /home/matrix/synapse/synchrotron.pid
-```
-
-Finally, to actually run your worker-based synapse, you must pass synctl the `-a`
-commandline option to tell it to operate on all the worker configurations found
-in the given directory, e.g.:
-
-    synctl -a $CONFIG/workers start
-
-Currently one should always restart all workers when restarting or upgrading
-synapse, unless you explicitly know it's safe not to.  For instance, restarting
-synapse without restarting all the synchrotrons may result in broken typing
-notifications.
-
-To manipulate a specific worker, you pass the -w option to synctl:
-
-    synctl -w $CONFIG/workers/synchrotron.yaml restart
 
 ## Available worker applications
 
-### `synapse.app.pusher`
-
-Handles sending push notifications to sygnal and email. Doesn't handle any
-REST endpoints itself, but you should set `start_pushers: False` in the
-shared configuration file to stop the main synapse sending these notifications.
-
-Note this worker cannot be load-balanced: only one instance should be active.
-
-### `synapse.app.synchrotron`
+### `synapse.app.generic_worker`
 
-The synchrotron handles `sync` requests from clients. In particular, it can
-handle REST endpoints matching the following regular expressions:
+This worker can handle API requests matching the following regular
+expressions:
 
+    # Sync requests
     ^/_matrix/client/(v2_alpha|r0)/sync$
     ^/_matrix/client/(api/v1|v2_alpha|r0)/events$
     ^/_matrix/client/(api/v1|r0)/initialSync$
     ^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$
 
-The above endpoints should all be routed to the synchrotron worker by the
-reverse-proxy configuration.
-
-It is possible to run multiple instances of the synchrotron to scale
-horizontally. In this case the reverse-proxy should be configured to
-load-balance across the instances, though it will be more efficient if all
-requests from a particular user are routed to a single instance. Extracting
-a userid from the access token is currently left as an exercise for the reader.
-
-### `synapse.app.appservice`
-
-Handles sending output traffic to Application Services. Doesn't handle any
-REST endpoints itself, but you should set `notify_appservices: False` in the
-shared configuration file to stop the main synapse sending these notifications.
-
-Note this worker cannot be load-balanced: only one instance should be active.
-
-### `synapse.app.federation_reader`
-
-Handles a subset of federation endpoints. In particular, it can handle REST
-endpoints matching the following regular expressions:
-
+    # Federation requests
     ^/_matrix/federation/v1/event/
     ^/_matrix/federation/v1/state/
     ^/_matrix/federation/v1/state_ids/
@@ -242,40 +186,145 @@ endpoints matching the following regular expressions:
     ^/_matrix/federation/v1/event_auth/
     ^/_matrix/federation/v1/exchange_third_party_invite/
     ^/_matrix/federation/v1/user/devices/
-    ^/_matrix/federation/v1/send/
     ^/_matrix/federation/v1/get_groups_publicised$
     ^/_matrix/key/v2/query
 
+    # Inbound federation transaction request
+    ^/_matrix/federation/v1/send/
+
+    # Client API requests
+    ^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
+    ^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
+    ^/_matrix/client/(api/v1|r0|unstable)/keys/query$
+    ^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
+    ^/_matrix/client/versions$
+    ^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
+    ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
+    ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
+    ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
+
+    # Registration/login requests
+    ^/_matrix/client/(api/v1|r0|unstable)/login$
+    ^/_matrix/client/(r0|unstable)/register$
+    ^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$
+
+    # Event sending requests
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
+    ^/_matrix/client/(api/v1|r0|unstable)/join/
+    ^/_matrix/client/(api/v1|r0|unstable)/profile/
+
+
 Additionally, the following REST endpoints can be handled for GET requests:
 
     ^/_matrix/federation/v1/groups/
 
-The above endpoints should all be routed to the federation_reader worker by the
-reverse-proxy configuration.
+Pagination requests can also be handled, but all requests for a given
+room must be routed to the same instance. Additionally, care must be taken to
+ensure that the purge history admin API is not used while pagination requests
+for the room are in flight:
+
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/messages$
+
+Note that a HTTP listener with `client` and `federation` resources must be
+configured in the `worker_listeners` option in the worker config.
+
+
+#### Load balancing
+
+It is possible to run multiple instances of this worker app, with incoming requests
+being load-balanced between them by the reverse-proxy. However, different endpoints
+have different characteristics and so admins
+may wish to run multiple groups of workers handling different endpoints so that
+load balancing can be done in different ways.
+
+For `/sync` and `/initialSync` requests it will be more efficient if all
+requests from a particular user are routed to a single instance. Extracting a
+user ID from the access token or `Authorization` header is currently left as an
+exercise for the reader. Admins may additionally wish to separate out `/sync`
+requests that have a `since` query parameter from those that don't (and
+`/initialSync`), as requests that don't are known as "initial sync" that happens
+when a user logs in on a new device and can be *very* resource intensive, so
+isolating these requests will stop them from interfering with other users ongoing
+syncs.
+
+Federation and client requests can be balanced via simple round robin.
+
+The inbound federation transaction request `^/_matrix/federation/v1/send/`
+should be balanced by source IP so that transactions from the same remote server
+go to the same process.
 
-The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
-instance.
+Registration/login requests can be handled separately purely to help ensure that
+unexpected load doesn't affect new logins and sign ups.
 
-Note that `federation` must be added to the listener resources in the worker config:
+Finally, event sending requests can be balanced by the room ID in the URI (or
+the full URI, or even just round robin), the room ID is the path component after
+`/rooms/`. If there is a large bridge connected that is sending or may send lots
+of events, then a dedicated set of workers can be provisioned to limit the
+effects of bursts of events from that bridge on events sent by normal users.
+
+#### Stream writers
+
+Additionally, there is *experimental* support for moving writing of specific
+streams (such as events) off of the main process to a particular worker. (This
+is only supported with Redis-based replication.)
+
+Currently support streams are `events` and `typing`.
+
+To enable this, the worker must have a HTTP replication listener configured,
+have a `worker_name` and be listed in the `instance_map` config. For example to
+move event persistence off to a dedicated worker, the shared configuration would
+include:
 
 ```yaml
-worker_app: synapse.app.federation_reader
-...
-worker_listeners:
- - type: http
-   port: <port>
-   resources:
-     - names:
-       - federation
+instance_map:
+    event_persister1:
+        host: localhost
+        port: 8034
+
+stream_writers:
+    events: event_persister1
 ```
 
+
+### `synapse.app.pusher`
+
+Handles sending push notifications to sygnal and email. Doesn't handle any
+REST endpoints itself, but you should set `start_pushers: False` in the
+shared configuration file to stop the main synapse sending push notifications.
+
+Note this worker cannot be load-balanced: only one instance should be active.
+
+### `synapse.app.appservice`
+
+Handles sending output traffic to Application Services. Doesn't handle any
+REST endpoints itself, but you should set `notify_appservices: False` in the
+shared configuration file to stop the main synapse sending appservice notifications.
+
+Note this worker cannot be load-balanced: only one instance should be active.
+
+
 ### `synapse.app.federation_sender`
 
 Handles sending federation traffic to other servers. Doesn't handle any
 REST endpoints itself, but you should set `send_federation: False` in the
 shared configuration file to stop the main synapse sending this traffic.
 
-Note this worker cannot be load-balanced: only one instance should be active.
+If running multiple federation senders then you must list each
+instance in the `federation_sender_instances` option by their `worker_name`.
+All instances must be stopped and started when adding or removing instances.
+For example:
+
+```yaml
+federation_sender_instances:
+    - federation_sender1
+    - federation_sender2
+```
 
 ### `synapse.app.media_repository`
 
@@ -314,46 +363,6 @@ and you must configure a single instance to run the background tasks, e.g.:
     media_instance_running_background_jobs: "media-repository-1"
 ```
 
-### `synapse.app.client_reader`
-
-Handles client API endpoints. It can handle REST endpoints matching the
-following regular expressions:
-
-    ^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
-    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
-    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
-    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
-    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
-    ^/_matrix/client/(api/v1|r0|unstable)/login$
-    ^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
-    ^/_matrix/client/(api/v1|r0|unstable)/keys/query$
-    ^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
-    ^/_matrix/client/versions$
-    ^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
-    ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
-    ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
-    ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
-
-Additionally, the following REST endpoints can be handled for GET requests:
-
-    ^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
-    ^/_matrix/client/(api/v1|r0|unstable)/groups/.*$
-    ^/_matrix/client/(api/v1|r0|unstable)/user/[^/]*/account_data/
-    ^/_matrix/client/(api/v1|r0|unstable)/user/[^/]*/rooms/[^/]*/account_data/
-
-Additionally, the following REST endpoints can be handled, but all requests must
-be routed to the same instance:
-
-    ^/_matrix/client/(r0|unstable)/register$
-    ^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$
-
-Pagination requests can also be handled, but all requests with the same path
-room must be routed to the same instance. Additionally, care must be taken to
-ensure that the purge history admin API is not used while pagination requests
-for the room are in flight:
-
-    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/messages$
-
 ### `synapse.app.user_dir`
 
 Handles searches in the user directory. It can handle REST endpoints matching
@@ -388,15 +397,48 @@ file. For example:
 
     worker_main_http_uri: http://127.0.0.1:8008
 
-### `synapse.app.event_creator`
+### Historical apps
 
-Handles some event creation. It can handle REST endpoints matching:
+*Note:* Historically there used to be more apps, however they have been
+amalgamated into a single `synapse.app.generic_worker` app. The remaining apps
+are ones that do specific processing unrelated to requests, e.g. the `pusher`
+that handles sending out push notifications for new events. The intention is for
+all these to be folded into the `generic_worker` app and to use config to define
+which processes handle the various proccessing such as push notifications.
 
-    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
-    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/
-    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
-    ^/_matrix/client/(api/v1|r0|unstable)/join/
-    ^/_matrix/client/(api/v1|r0|unstable)/profile/
 
-It will create events locally and then send them on to the main synapse
-instance to be persisted and handled.
+## Architectural diagram
+
+The following shows an example setup using Redis and a reverse proxy:
+
+```
+                     Clients & Federation
+                              |
+                              v
+                        +-----------+
+                        |           |
+                        |  Reverse  |
+                        |  Proxy    |
+                        |           |
+                        +-----------+
+                            | | |
+                            | | | HTTP requests
+        +-------------------+ | +-----------+
+        |                 +---+             |
+        |                 |                 |
+        v                 v                 v
++--------------+  +--------------+  +--------------+  +--------------+
+|   Main       |  |   Generic    |  |   Generic    |  |  Event       |
+|   Process    |  |   Worker 1   |  |   Worker 2   |  |  Persister   |
++--------------+  +--------------+  +--------------+  +--------------+
+      ^    ^          |   ^   |         |   ^   |          ^    ^
+      |    |          |   |   |         |   |   |          |    |
+      |    |          |   |   |  HTTP   |   |   |          |    |
+      |    +----------+<--|---|---------+   |   |          |    |
+      |                   |   +-------------|-->+----------+    |
+      |                   |                 |                   |
+      |                   |                 |                   |
+      v                   v                 v                   v
+====================================================================
+                                                         Redis pub/sub channel
+```
diff --git a/scripts-dev/check_line_terminators.sh b/scripts-dev/check_line_terminators.sh
new file mode 100755
index 0000000000..c983956231
--- /dev/null
+++ b/scripts-dev/check_line_terminators.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This script checks that line terminators in all repository files (excluding
+# those in the .git directory) feature unix line terminators.
+#
+# Usage:
+#
+# ./check_line_terminators.sh
+#
+# The script will emit exit code 1 if any files that do not use unix line
+# terminators are found, 0 otherwise.
+
+# cd to the root of the repository
+cd `dirname $0`/..
+
+# Find and print files with non-unix line terminators
+if find . -path './.git/*' -prune -o -type f -print0 | xargs -0 grep -I -l $'\r$'; then
+    echo -e '\e[31mERROR: found files with CRLF line endings. See above.\e[39m'
+    exit 1
+fi
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 22a6abd7d2..bee525197f 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -69,7 +69,7 @@ logger = logging.getLogger("synapse_port_db")
 
 
 BOOLEAN_COLUMNS = {
-    "events": ["processed", "outlier", "contains_url"],
+    "events": ["processed", "outlier", "contains_url", "count_as_unread"],
     "rooms": ["is_public"],
     "event_edges": ["is_state"],
     "presence_list": ["accepted"],
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 8592dee179..f70381bc71 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -17,6 +17,7 @@
 """ This is a reference implementation of a Matrix homeserver.
 """
 
+import json
 import os
 import sys
 
@@ -25,6 +26,9 @@ if sys.version_info < (3, 5):
     print("Synapse requires Python 3.5 or above.")
     sys.exit(1)
 
+# Twisted and canonicaljson will fail to import when this file is executed to
+# get the __version__ during a fresh install. That's OK and subsequent calls to
+# actually start Synapse will import these libraries fine.
 try:
     from twisted.internet import protocol
     from twisted.internet.protocol import Factory
@@ -36,7 +40,15 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.17.0"
+# Use the standard library json implementation instead of simplejson.
+try:
+    from canonicaljson import set_json_library
+
+    set_json_library(json)
+except ImportError:
+    pass
+
+__version__ = "1.18.0"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 40dc62ef6c..2178e623da 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -82,7 +82,7 @@ class Auth(object):
 
     @defer.inlineCallbacks
     def check_from_context(self, room_version: str, event, context, do_sig_check=True):
-        prev_state_ids = yield context.get_prev_state_ids()
+        prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids())
         auth_events_ids = yield self.compute_auth_events(
             event, prev_state_ids, for_verification=True
         )
@@ -127,8 +127,10 @@ class Auth(object):
         if current_state:
             member = current_state.get((EventTypes.Member, user_id), None)
         else:
-            member = yield self.state.get_current_state(
-                room_id=room_id, event_type=EventTypes.Member, state_key=user_id
+            member = yield defer.ensureDeferred(
+                self.state.get_current_state(
+                    room_id=room_id, event_type=EventTypes.Member, state_key=user_id
+                )
             )
         membership = member.membership if member else None
 
@@ -665,8 +667,10 @@ class Auth(object):
             )
             return member_event.membership, member_event.event_id
         except AuthError:
-            visibility = yield self.state.get_current_state(
-                room_id, EventTypes.RoomHistoryVisibility, ""
+            visibility = yield defer.ensureDeferred(
+                self.state.get_current_state(
+                    room_id, EventTypes.RoomHistoryVisibility, ""
+                )
             )
             if (
                 visibility
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index c1b76d827b..c478df53be 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -87,7 +87,6 @@ from synapse.replication.tcp.streams import (
     ReceiptsStream,
     TagAccountDataStream,
     ToDeviceStream,
-    TypingStream,
 )
 from synapse.rest.admin import register_servlets_for_media_repo
 from synapse.rest.client.v1 import events
@@ -629,7 +628,7 @@ class GenericWorkerServer(HomeServer):
 
         self.get_tcp_replication().start_replication(self)
 
-    def remove_pusher(self, app_id, push_key, user_id):
+    async def remove_pusher(self, app_id, push_key, user_id):
         self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
 
     def build_replication_data_handler(self):
@@ -644,7 +643,6 @@ class GenericWorkerReplicationHandler(ReplicationDataHandler):
         super(GenericWorkerReplicationHandler, self).__init__(hs)
 
         self.store = hs.get_datastore()
-        self.typing_handler = hs.get_typing_handler()
         self.presence_handler = hs.get_presence_handler()  # type: GenericWorkerPresence
         self.notifier = hs.get_notifier()
 
@@ -681,11 +679,6 @@ class GenericWorkerReplicationHandler(ReplicationDataHandler):
                 await self.pusher_pool.on_new_receipts(
                     token, token, {row.room_id for row in rows}
                 )
-            elif stream_name == TypingStream.NAME:
-                self.typing_handler.process_replication_rows(token, rows)
-                self.notifier.on_new_event(
-                    "typing_key", token, rooms=[row.room_id for row in rows]
-                )
             elif stream_name == ToDeviceStream.NAME:
                 entities = [row.entity for row in rows if row.entity.startswith("@")]
                 if entities:
@@ -947,7 +940,7 @@ def start(config_options):
         config.server.update_user_directory = False
 
     if config.worker_app == "synapse.app.federation_sender":
-        if config.federation.send_federation:
+        if config.worker.send_federation:
             sys.stderr.write(
                 "\nThe send_federation must be disabled in the main synapse process"
                 "\nbefore they can be run in a separate worker."
@@ -957,10 +950,10 @@ def start(config_options):
             sys.exit(1)
 
         # Force the pushers to start since they will be disabled in the main config
-        config.federation.send_federation = True
+        config.worker.send_federation = True
     else:
         # For other worker types we force this to off.
-        config.federation.send_federation = False
+        config.worker.send_federation = False
 
     synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
 
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 0323256472..1ffdc1ed95 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -15,11 +15,9 @@
 import logging
 import re
 
-from twisted.internet import defer
-
 from synapse.api.constants import EventTypes
 from synapse.types import GroupID, get_domain_from_id
-from synapse.util.caches.descriptors import cachedInlineCallbacks
+from synapse.util.caches.descriptors import cached
 
 logger = logging.getLogger(__name__)
 
@@ -43,7 +41,7 @@ class AppServiceTransaction(object):
         Args:
             as_api(ApplicationServiceApi): The API to use to send.
         Returns:
-            A Deferred which resolves to True if the transaction was sent.
+            An Awaitable which resolves to True if the transaction was sent.
         """
         return as_api.push_bulk(
             service=self.service, events=self.events, txn_id=self.id
@@ -172,8 +170,7 @@ class ApplicationService(object):
             return regex_obj["exclusive"]
         return False
 
-    @defer.inlineCallbacks
-    def _matches_user(self, event, store):
+    async def _matches_user(self, event, store):
         if not event:
             return False
 
@@ -188,12 +185,12 @@ class ApplicationService(object):
         if not store:
             return False
 
-        does_match = yield self._matches_user_in_member_list(event.room_id, store)
+        does_match = await self._matches_user_in_member_list(event.room_id, store)
         return does_match
 
-    @cachedInlineCallbacks(num_args=1, cache_context=True)
-    def _matches_user_in_member_list(self, room_id, store, cache_context):
-        member_list = yield store.get_users_in_room(
+    @cached(num_args=1, cache_context=True)
+    async def _matches_user_in_member_list(self, room_id, store, cache_context):
+        member_list = await store.get_users_in_room(
             room_id, on_invalidate=cache_context.invalidate
         )
 
@@ -208,35 +205,33 @@ class ApplicationService(object):
             return self.is_interested_in_room(event.room_id)
         return False
 
-    @defer.inlineCallbacks
-    def _matches_aliases(self, event, store):
+    async def _matches_aliases(self, event, store):
         if not store or not event:
             return False
 
-        alias_list = yield store.get_aliases_for_room(event.room_id)
+        alias_list = await store.get_aliases_for_room(event.room_id)
         for alias in alias_list:
             if self.is_interested_in_alias(alias):
                 return True
         return False
 
-    @defer.inlineCallbacks
-    def is_interested(self, event, store=None):
+    async def is_interested(self, event, store=None) -> bool:
         """Check if this service is interested in this event.
 
         Args:
             event(Event): The event to check.
             store(DataStore)
         Returns:
-            bool: True if this service would like to know about this event.
+            True if this service would like to know about this event.
         """
         # Do cheap checks first
         if self._matches_room_id(event):
             return True
 
-        if (yield self._matches_aliases(event, store)):
+        if await self._matches_aliases(event, store):
             return True
 
-        if (yield self._matches_user(event, store)):
+        if await self._matches_user(event, store):
             return True
 
         return False
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index 1e0e4d497d..db578bda79 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -93,13 +93,12 @@ class ApplicationServiceApi(SimpleHttpClient):
             hs, "as_protocol_meta", timeout_ms=HOUR_IN_MS
         )
 
-    @defer.inlineCallbacks
-    def query_user(self, service, user_id):
+    async def query_user(self, service, user_id):
         if service.url is None:
             return False
         uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
         try:
-            response = yield self.get_json(uri, {"access_token": service.hs_token})
+            response = await self.get_json(uri, {"access_token": service.hs_token})
             if response is not None:  # just an empty json object
                 return True
         except CodeMessageException as e:
@@ -110,14 +109,12 @@ class ApplicationServiceApi(SimpleHttpClient):
             logger.warning("query_user to %s threw exception %s", uri, ex)
         return False
 
-    @defer.inlineCallbacks
-    def query_alias(self, service, alias):
+    async def query_alias(self, service, alias):
         if service.url is None:
             return False
         uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
-        response = None
         try:
-            response = yield self.get_json(uri, {"access_token": service.hs_token})
+            response = await self.get_json(uri, {"access_token": service.hs_token})
             if response is not None:  # just an empty json object
                 return True
         except CodeMessageException as e:
@@ -128,8 +125,7 @@ class ApplicationServiceApi(SimpleHttpClient):
             logger.warning("query_alias to %s threw exception %s", uri, ex)
         return False
 
-    @defer.inlineCallbacks
-    def query_3pe(self, service, kind, protocol, fields):
+    async def query_3pe(self, service, kind, protocol, fields):
         if kind == ThirdPartyEntityKind.USER:
             required_field = "userid"
         elif kind == ThirdPartyEntityKind.LOCATION:
@@ -146,7 +142,7 @@ class ApplicationServiceApi(SimpleHttpClient):
             urllib.parse.quote(protocol),
         )
         try:
-            response = yield self.get_json(uri, fields)
+            response = await self.get_json(uri, fields)
             if not isinstance(response, list):
                 logger.warning(
                     "query_3pe to %s returned an invalid response %r", uri, response
@@ -202,8 +198,7 @@ class ApplicationServiceApi(SimpleHttpClient):
         key = (service.id, protocol)
         return self.protocol_meta_cache.wrap(key, _get)
 
-    @defer.inlineCallbacks
-    def push_bulk(self, service, events, txn_id=None):
+    async def push_bulk(self, service, events, txn_id=None):
         if service.url is None:
             return True
 
@@ -218,7 +213,7 @@ class ApplicationServiceApi(SimpleHttpClient):
 
         uri = service.url + ("/transactions/%s" % urllib.parse.quote(txn_id))
         try:
-            yield self.put_json(
+            await self.put_json(
                 uri=uri,
                 json_body={"events": events},
                 args={"access_token": service.hs_token},
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index 9998f822f1..d5204b1314 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -50,8 +50,6 @@ components.
 """
 import logging
 
-from twisted.internet import defer
-
 from synapse.appservice import ApplicationServiceState
 from synapse.logging.context import run_in_background
 from synapse.metrics.background_process_metrics import run_as_background_process
@@ -73,12 +71,11 @@ class ApplicationServiceScheduler(object):
         self.txn_ctrl = _TransactionController(self.clock, self.store, self.as_api)
         self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock)
 
-    @defer.inlineCallbacks
-    def start(self):
+    async def start(self):
         logger.info("Starting appservice scheduler")
 
         # check for any DOWN ASes and start recoverers for them.
-        services = yield self.store.get_appservices_by_state(
+        services = await self.store.get_appservices_by_state(
             ApplicationServiceState.DOWN
         )
 
@@ -117,8 +114,7 @@ class _ServiceQueuer(object):
             "as-sender-%s" % (service.id,), self._send_request, service
         )
 
-    @defer.inlineCallbacks
-    def _send_request(self, service):
+    async def _send_request(self, service):
         # sanity-check: we shouldn't get here if this service already has a sender
         # running.
         assert service.id not in self.requests_in_flight
@@ -130,7 +126,7 @@ class _ServiceQueuer(object):
                 if not events:
                     return
                 try:
-                    yield self.txn_ctrl.send(service, events)
+                    await self.txn_ctrl.send(service, events)
                 except Exception:
                     logger.exception("AS request failed")
         finally:
@@ -162,36 +158,33 @@ class _TransactionController(object):
         # for UTs
         self.RECOVERER_CLASS = _Recoverer
 
-    @defer.inlineCallbacks
-    def send(self, service, events):
+    async def send(self, service, events):
         try:
-            txn = yield self.store.create_appservice_txn(service=service, events=events)
-            service_is_up = yield self._is_service_up(service)
+            txn = await self.store.create_appservice_txn(service=service, events=events)
+            service_is_up = await self._is_service_up(service)
             if service_is_up:
-                sent = yield txn.send(self.as_api)
+                sent = await txn.send(self.as_api)
                 if sent:
-                    yield txn.complete(self.store)
+                    await txn.complete(self.store)
                 else:
                     run_in_background(self._on_txn_fail, service)
         except Exception:
             logger.exception("Error creating appservice transaction")
             run_in_background(self._on_txn_fail, service)
 
-    @defer.inlineCallbacks
-    def on_recovered(self, recoverer):
+    async def on_recovered(self, recoverer):
         logger.info(
             "Successfully recovered application service AS ID %s", recoverer.service.id
         )
         self.recoverers.pop(recoverer.service.id)
         logger.info("Remaining active recoverers: %s", len(self.recoverers))
-        yield self.store.set_appservice_state(
+        await self.store.set_appservice_state(
             recoverer.service, ApplicationServiceState.UP
         )
 
-    @defer.inlineCallbacks
-    def _on_txn_fail(self, service):
+    async def _on_txn_fail(self, service):
         try:
-            yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
+            await self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
             self.start_recoverer(service)
         except Exception:
             logger.exception("Error starting AS recoverer")
@@ -211,9 +204,8 @@ class _TransactionController(object):
         recoverer.recover()
         logger.info("Now %i active recoverers", len(self.recoverers))
 
-    @defer.inlineCallbacks
-    def _is_service_up(self, service):
-        state = yield self.store.get_appservice_state(service)
+    async def _is_service_up(self, service):
+        state = await self.store.get_appservice_state(service)
         return state == ApplicationServiceState.UP or state is None
 
 
@@ -254,25 +246,24 @@ class _Recoverer(object):
             self.backoff_counter += 1
         self.recover()
 
-    @defer.inlineCallbacks
-    def retry(self):
+    async def retry(self):
         logger.info("Starting retries on %s", self.service.id)
         try:
             while True:
-                txn = yield self.store.get_oldest_unsent_txn(self.service)
+                txn = await self.store.get_oldest_unsent_txn(self.service)
                 if not txn:
                     # nothing left: we're done!
-                    self.callback(self)
+                    await self.callback(self)
                     return
 
                 logger.info(
                     "Retrying transaction %s for AS ID %s", txn.id, txn.service.id
                 )
-                sent = yield txn.send(self.as_api)
+                sent = await txn.send(self.as_api)
                 if not sent:
                     break
 
-                yield txn.complete(self.store)
+                await txn.complete(self.store)
 
                 # reset the backoff counter and then process the next transaction
                 self.backoff_counter = 1
diff --git a/synapse/config/federation.py b/synapse/config/federation.py
index 82ff9664de..2c77d8f85b 100644
--- a/synapse/config/federation.py
+++ b/synapse/config/federation.py
@@ -17,23 +17,13 @@ from typing import Optional
 
 from netaddr import IPSet
 
-from ._base import Config, ConfigError, ShardedWorkerHandlingConfig
+from ._base import Config, ConfigError
 
 
 class FederationConfig(Config):
     section = "federation"
 
     def read_config(self, config, **kwargs):
-        # Whether to send federation traffic out in this process. This only
-        # applies to some federation traffic, and so shouldn't be used to
-        # "disable" federation
-        self.send_federation = config.get("send_federation", True)
-
-        federation_sender_instances = config.get("federation_sender_instances") or []
-        self.federation_shard_config = ShardedWorkerHandlingConfig(
-            federation_sender_instances
-        )
-
         # FIXME: federation_domain_whitelist needs sytests
         self.federation_domain_whitelist = None  # type: Optional[dict]
         federation_domain_whitelist = config.get("federation_domain_whitelist", None)
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 8e93d31394..556e291495 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -78,7 +78,6 @@ class HomeServerConfig(RootConfig):
         JWTConfig,
         PasswordConfig,
         EmailConfig,
-        WorkerConfig,
         PasswordAuthProviderConfig,
         PushConfig,
         SpamCheckerConfig,
@@ -91,6 +90,7 @@ class HomeServerConfig(RootConfig):
         RoomDirectoryConfig,
         ThirdPartyRulesConfig,
         TracerConfig,
+        WorkerConfig,
         RedisConfig,
         FederationConfig,
     ]
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 49f6c32beb..dd775a97e8 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -214,7 +214,7 @@ def setup_logging(
     Set up the logging subsystem.
 
     Args:
-        config (LoggingConfig | synapse.config.workers.WorkerConfig):
+        config (LoggingConfig | synapse.config.worker.WorkerConfig):
             configuration data
 
         use_worker_options (bool): True to use the 'worker_log_config' option
diff --git a/synapse/config/redis.py b/synapse/config/redis.py
index d5d3ca1c9e..1373302335 100644
--- a/synapse/config/redis.py
+++ b/synapse/config/redis.py
@@ -21,7 +21,7 @@ class RedisConfig(Config):
     section = "redis"
 
     def read_config(self, config, **kwargs):
-        redis_config = config.get("redis", {})
+        redis_config = config.get("redis") or {}
         self.redis_enabled = redis_config.get("enabled", False)
 
         if not self.redis_enabled:
@@ -32,3 +32,24 @@ class RedisConfig(Config):
         self.redis_host = redis_config.get("host", "localhost")
         self.redis_port = redis_config.get("port", 6379)
         self.redis_password = redis_config.get("password")
+
+    def generate_config_section(self, config_dir_path, server_name, **kwargs):
+        return """\
+        # Configuration for Redis when using workers. This *must* be enabled when
+        # using workers (unless using old style direct TCP configuration).
+        #
+        redis:
+          # Uncomment the below to enable Redis support.
+          #
+          #enabled: true
+
+          # Optional host and port to use to connect to redis. Defaults to
+          # localhost and 6379
+          #
+          #host: localhost
+          #port: 6379
+
+          # Optional password if configured on the Redis instance
+          #
+          #password: <secret_password>
+        """
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 6badf4e75d..a185655774 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -333,24 +333,6 @@ class RegistrationConfig(Config):
         #
         #default_identity_server: https://matrix.org
 
-        # The list of identity servers trusted to verify third party
-        # identifiers by this server.
-        #
-        # Also defines the ID server which will be called when an account is
-        # deactivated (one will be picked arbitrarily).
-        #
-        # Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity
-        # server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a
-        # background migration script, informing itself that the identity server all of its
-        # 3PIDs have been bound to is likely one of the below.
-        #
-        # As of Synapse v1.4.0, all other functionality of this option has been deprecated, and
-        # it is now solely used for the purposes of the background migration script, and can be
-        # removed once it has run.
-        #trusted_third_party_id_servers:
-        #  - matrix.org
-        #  - vector.im
-
         # Handle threepid (email/phone etc) registration and password resets through a set of
         # *trusted* identity servers. Note that this allows the configured identity server to
         # reset passwords for accounts!
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 3747a01ca7..848587d232 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -439,6 +439,9 @@ class ServerConfig(Config):
                 validator=attr.validators.instance_of(str),
                 default=ROOM_COMPLEXITY_TOO_GREAT,
             )
+            admins_can_join = attr.ib(
+                validator=attr.validators.instance_of(bool), default=False
+            )
 
         self.limit_remote_rooms = LimitRemoteRoomsConfig(
             **(config.get("limit_remote_rooms") or {})
@@ -893,6 +896,10 @@ class ServerConfig(Config):
           #
           #complexity_error: "This room is too complex."
 
+          # allow server admins to join complex rooms. Default is false.
+          #
+          #admins_can_join: true
+
         # Whether to require a user to be in the room to add an alias to it.
         # Defaults to 'true'.
         #
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 2574cd3aa1..c784a71508 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -15,7 +15,7 @@
 
 import attr
 
-from ._base import Config, ConfigError
+from ._base import Config, ConfigError, ShardedWorkerHandlingConfig
 from .server import ListenerConfig, parse_listener_def
 
 
@@ -85,6 +85,16 @@ class WorkerConfig(Config):
                 )
             )
 
+        # Whether to send federation traffic out in this process. This only
+        # applies to some federation traffic, and so shouldn't be used to
+        # "disable" federation
+        self.send_federation = config.get("send_federation", True)
+
+        federation_sender_instances = config.get("federation_sender_instances") or []
+        self.federation_shard_config = ShardedWorkerHandlingConfig(
+            federation_sender_instances
+        )
+
         # A map from instance name to host/port of their HTTP replication endpoint.
         instance_map = config.get("instance_map") or {}
         self.instance_map = {
@@ -105,6 +115,43 @@ class WorkerConfig(Config):
                     % (instance, stream)
                 )
 
+    def generate_config_section(self, config_dir_path, server_name, **kwargs):
+        return """\
+        ## Workers ##
+
+        # Disables sending of outbound federation transactions on the main process.
+        # Uncomment if using a federation sender worker.
+        #
+        #send_federation: false
+
+        # It is possible to run multiple federation sender workers, in which case the
+        # work is balanced across them.
+        #
+        # This configuration must be shared between all federation sender workers, and if
+        # changed all federation sender workers must be stopped at the same time and then
+        # started, to ensure that all instances are running with the same config (otherwise
+        # events may be dropped).
+        #
+        #federation_sender_instances:
+        #  - federation_sender1
+
+        # When using workers this should be a map from `worker_name` to the
+        # HTTP replication listener of the worker, if configured.
+        #
+        #instance_map:
+        #  worker1:
+        #    host: localhost
+        #    port: 8034
+
+        # Experimental: When using workers you can define which workers should
+        # handle event persistence and typing notifications. Any worker
+        # specified here must also be in the `instance_map`.
+        #
+        #stream_writers:
+        #  events: worker1
+        #  typing: worker1
+        """
+
     def read_arguments(self, args):
         # We support a bunch of command line arguments that override options in
         # the config. A lot of these options have a worker_* prefix when running
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index dbfc3e8972..443cde0b6d 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -632,18 +632,20 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
         )
 
         try:
-            query_response = yield self.client.post_json(
-                destination=perspective_name,
-                path="/_matrix/key/v2/query",
-                data={
-                    "server_keys": {
-                        server_name: {
-                            key_id: {"minimum_valid_until_ts": min_valid_ts}
-                            for key_id, min_valid_ts in server_keys.items()
+            query_response = yield defer.ensureDeferred(
+                self.client.post_json(
+                    destination=perspective_name,
+                    path="/_matrix/key/v2/query",
+                    data={
+                        "server_keys": {
+                            server_name: {
+                                key_id: {"minimum_valid_until_ts": min_valid_ts}
+                                for key_id, min_valid_ts in server_keys.items()
+                            }
+                            for server_name, server_keys in keys_to_fetch.items()
                         }
-                        for server_name, server_keys in keys_to_fetch.items()
-                    }
-                },
+                    },
+                )
             )
         except (NotRetryingDestination, RequestSendFailed) as e:
             # these both have str() representations which we can't really improve upon
@@ -792,23 +794,25 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
 
             time_now_ms = self.clock.time_msec()
             try:
-                response = yield self.client.get_json(
-                    destination=server_name,
-                    path="/_matrix/key/v2/server/"
-                    + urllib.parse.quote(requested_key_id),
-                    ignore_backoff=True,
-                    # we only give the remote server 10s to respond. It should be an
-                    # easy request to handle, so if it doesn't reply within 10s, it's
-                    # probably not going to.
-                    #
-                    # Furthermore, when we are acting as a notary server, we cannot
-                    # wait all day for all of the origin servers, as the requesting
-                    # server will otherwise time out before we can respond.
-                    #
-                    # (Note that get_json may make 4 attempts, so this can still take
-                    # almost 45 seconds to fetch the headers, plus up to another 60s to
-                    # read the response).
-                    timeout=10000,
+                response = yield defer.ensureDeferred(
+                    self.client.get_json(
+                        destination=server_name,
+                        path="/_matrix/key/v2/server/"
+                        + urllib.parse.quote(requested_key_id),
+                        ignore_backoff=True,
+                        # we only give the remote server 10s to respond. It should be an
+                        # easy request to handle, so if it doesn't reply within 10s, it's
+                        # probably not going to.
+                        #
+                        # Furthermore, when we are acting as a notary server, we cannot
+                        # wait all day for all of the origin servers, as the requesting
+                        # server will otherwise time out before we can respond.
+                        #
+                        # (Note that get_json may make 4 attempts, so this can still take
+                        # almost 45 seconds to fetch the headers, plus up to another 60s to
+                        # read the response).
+                        timeout=10000,
+                    )
                 )
             except (NotRetryingDestination, RequestSendFailed) as e:
                 # these both have str() representations which we can't really improve
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index 92aadfe7ef..69b53ca2bc 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -17,8 +17,6 @@ from typing import Optional
 import attr
 from nacl.signing import SigningKey
 
-from twisted.internet import defer
-
 from synapse.api.constants import MAX_DEPTH
 from synapse.api.errors import UnsupportedRoomVersionError
 from synapse.api.room_versions import (
@@ -95,31 +93,30 @@ class EventBuilder(object):
     def is_state(self):
         return self._state_key is not None
 
-    @defer.inlineCallbacks
-    def build(self, prev_event_ids):
+    async def build(self, prev_event_ids):
         """Transform into a fully signed and hashed event
 
         Args:
             prev_event_ids (list[str]): The event IDs to use as the prev events
 
         Returns:
-            Deferred[FrozenEvent]
+            FrozenEvent
         """
 
-        state_ids = yield self._state.get_current_state_ids(
+        state_ids = await self._state.get_current_state_ids(
             self.room_id, prev_event_ids
         )
-        auth_ids = yield self._auth.compute_auth_events(self, state_ids)
+        auth_ids = await self._auth.compute_auth_events(self, state_ids)
 
         format_version = self.room_version.event_format
         if format_version == EventFormatVersions.V1:
-            auth_events = yield self._store.add_event_hashes(auth_ids)
-            prev_events = yield self._store.add_event_hashes(prev_event_ids)
+            auth_events = await self._store.add_event_hashes(auth_ids)
+            prev_events = await self._store.add_event_hashes(prev_event_ids)
         else:
             auth_events = auth_ids
             prev_events = prev_event_ids
 
-        old_depth = yield self._store.get_max_depth_of(prev_event_ids)
+        old_depth = await self._store.get_max_depth_of(prev_event_ids)
         depth = old_depth + 1
 
         # we cap depth of generated events, to ensure that they are not
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index f94cdcbaba..cca93e3a46 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -12,17 +12,19 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from typing import Optional, Union
+from typing import TYPE_CHECKING, Optional, Union
 
 import attr
 from frozendict import frozendict
 
-from twisted.internet import defer
-
 from synapse.appservice import ApplicationService
+from synapse.events import EventBase
 from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.types import StateMap
 
+if TYPE_CHECKING:
+    from synapse.storage.data_stores.main import DataStore
+
 
 @attr.s(slots=True)
 class EventContext:
@@ -129,8 +131,7 @@ class EventContext:
             delta_ids=delta_ids,
         )
 
-    @defer.inlineCallbacks
-    def serialize(self, event, store):
+    async def serialize(self, event: EventBase, store: "DataStore") -> dict:
         """Converts self to a type that can be serialized as JSON, and then
         deserialized by `deserialize`
 
@@ -146,7 +147,7 @@ class EventContext:
         # the prev_state_ids, so if we're a state event we include the event
         # id that we replaced in the state.
         if event.is_state():
-            prev_state_ids = yield self.get_prev_state_ids()
+            prev_state_ids = await self.get_prev_state_ids()
             prev_state_id = prev_state_ids.get((event.type, event.state_key))
         else:
             prev_state_id = None
@@ -214,8 +215,7 @@ class EventContext:
 
         return self._state_group
 
-    @defer.inlineCallbacks
-    def get_current_state_ids(self):
+    async def get_current_state_ids(self) -> Optional[StateMap[str]]:
         """
         Gets the room state map, including this event - ie, the state in ``state_group``
 
@@ -224,32 +224,31 @@ class EventContext:
         ``rejected`` is set.
 
         Returns:
-            Deferred[dict[(str, str), str]|None]: Returns None if state_group
-                is None, which happens when the associated event is an outlier.
+            Returns None if state_group is None, which happens when the associated
+            event is an outlier.
 
-                Maps a (type, state_key) to the event ID of the state event matching
-                this tuple.
+            Maps a (type, state_key) to the event ID of the state event matching
+            this tuple.
         """
         if self.rejected:
             raise RuntimeError("Attempt to access state_ids of rejected event")
 
-        yield self._ensure_fetched()
+        await self._ensure_fetched()
         return self._current_state_ids
 
-    @defer.inlineCallbacks
-    def get_prev_state_ids(self):
+    async def get_prev_state_ids(self):
         """
         Gets the room state map, excluding this event.
 
         For a non-state event, this will be the same as get_current_state_ids().
 
         Returns:
-            Deferred[dict[(str, str), str]|None]: Returns None if state_group
+            dict[(str, str), str]|None: Returns None if state_group
                 is None, which happens when the associated event is an outlier.
                 Maps a (type, state_key) to the event ID of the state event matching
                 this tuple.
         """
-        yield self._ensure_fetched()
+        await self._ensure_fetched()
         return self._prev_state_ids
 
     def get_cached_current_state_ids(self):
@@ -269,8 +268,8 @@ class EventContext:
 
         return self._current_state_ids
 
-    def _ensure_fetched(self):
-        return defer.succeed(None)
+    async def _ensure_fetched(self):
+        return None
 
 
 @attr.s(slots=True)
@@ -303,21 +302,20 @@ class _AsyncEventContextImpl(EventContext):
     _event_state_key = attr.ib(default=None)
     _fetching_state_deferred = attr.ib(default=None)
 
-    def _ensure_fetched(self):
+    async def _ensure_fetched(self):
         if not self._fetching_state_deferred:
             self._fetching_state_deferred = run_in_background(self._fill_out_state)
 
-        return make_deferred_yieldable(self._fetching_state_deferred)
+        return await make_deferred_yieldable(self._fetching_state_deferred)
 
-    @defer.inlineCallbacks
-    def _fill_out_state(self):
+    async def _fill_out_state(self):
         """Called to populate the _current_state_ids and _prev_state_ids
         attributes by loading from the database.
         """
         if self.state_group is None:
             return
 
-        self._current_state_ids = yield self._storage.state.get_state_ids_for_group(
+        self._current_state_ids = await self._storage.state.get_state_ids_for_group(
             self.state_group
         )
         if self._event_state_key is not None:
diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index 459132d388..2956a64234 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -13,7 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
+from synapse.events import EventBase
+from synapse.events.snapshot import EventContext
+from synapse.types import Requester
 
 
 class ThirdPartyEventRules(object):
@@ -39,76 +41,79 @@ class ThirdPartyEventRules(object):
                 config=config, http_client=hs.get_simple_http_client()
             )
 
-    @defer.inlineCallbacks
-    def check_event_allowed(self, event, context):
+    async def check_event_allowed(
+        self, event: EventBase, context: EventContext
+    ) -> bool:
         """Check if a provided event should be allowed in the given context.
 
         Args:
-            event (synapse.events.EventBase): The event to be checked.
-            context (synapse.events.snapshot.EventContext): The context of the event.
+            event: The event to be checked.
+            context: The context of the event.
 
         Returns:
-            defer.Deferred[bool]: True if the event should be allowed, False if not.
+            True if the event should be allowed, False if not.
         """
         if self.third_party_rules is None:
             return True
 
-        prev_state_ids = yield context.get_prev_state_ids()
+        prev_state_ids = await context.get_prev_state_ids()
 
         # Retrieve the state events from the database.
         state_events = {}
         for key, event_id in prev_state_ids.items():
-            state_events[key] = yield self.store.get_event(event_id, allow_none=True)
+            state_events[key] = await self.store.get_event(event_id, allow_none=True)
 
-        ret = yield self.third_party_rules.check_event_allowed(event, state_events)
+        ret = await self.third_party_rules.check_event_allowed(event, state_events)
         return ret
 
-    @defer.inlineCallbacks
-    def on_create_room(self, requester, config, is_requester_admin):
+    async def on_create_room(
+        self, requester: Requester, config: dict, is_requester_admin: bool
+    ) -> bool:
         """Intercept requests to create room to allow, deny or update the
         request config.
 
         Args:
-            requester (Requester)
-            config (dict): The creation config from the client.
-            is_requester_admin (bool): If the requester is an admin
+            requester
+            config: The creation config from the client.
+            is_requester_admin: If the requester is an admin
 
         Returns:
-            defer.Deferred[bool]: Whether room creation is allowed or denied.
+            Whether room creation is allowed or denied.
         """
 
         if self.third_party_rules is None:
             return True
 
-        ret = yield self.third_party_rules.on_create_room(
+        ret = await self.third_party_rules.on_create_room(
             requester, config, is_requester_admin
         )
         return ret
 
-    @defer.inlineCallbacks
-    def check_threepid_can_be_invited(self, medium, address, room_id):
+    async def check_threepid_can_be_invited(
+        self, medium: str, address: str, room_id: str
+    ) -> bool:
         """Check if a provided 3PID can be invited in the given room.
 
         Args:
-            medium (str): The 3PID's medium.
-            address (str): The 3PID's address.
-            room_id (str): The room we want to invite the threepid to.
+            medium: The 3PID's medium.
+            address: The 3PID's address.
+            room_id: The room we want to invite the threepid to.
 
         Returns:
-            defer.Deferred[bool], True if the 3PID can be invited, False if not.
+            True if the 3PID can be invited, False if not.
         """
 
         if self.third_party_rules is None:
             return True
 
-        state_ids = yield self.store.get_filtered_current_state_ids(room_id)
-        room_state_events = yield self.store.get_events(state_ids.values())
+        state_ids = await self.store.get_filtered_current_state_ids(room_id)
+        room_state_events = await self.store.get_events(state_ids.values())
 
         state_events = {}
         for key, event_id in state_ids.items():
             state_events[key] = room_state_events[event_id]
 
-        ret = yield self.third_party_rules.check_threepid_can_be_invited(
+        ret = await self.third_party_rules.check_threepid_can_be_invited(
             medium, address, state_events
         )
         return ret
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 11f0d34ec8..2d42e268c6 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -18,8 +18,6 @@ from typing import Any, Mapping, Union
 
 from frozendict import frozendict
 
-from twisted.internet import defer
-
 from synapse.api.constants import EventTypes, RelationTypes
 from synapse.api.errors import Codes, SynapseError
 from synapse.api.room_versions import RoomVersion
@@ -337,8 +335,9 @@ class EventClientSerializer(object):
             hs.config.experimental_msc1849_support_enabled
         )
 
-    @defer.inlineCallbacks
-    def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs):
+    async def serialize_event(
+        self, event, time_now, bundle_aggregations=True, **kwargs
+    ):
         """Serializes a single event.
 
         Args:
@@ -348,7 +347,7 @@ class EventClientSerializer(object):
             **kwargs: Arguments to pass to `serialize_event`
 
         Returns:
-            Deferred[dict]: The serialized event
+            dict: The serialized event
         """
         # To handle the case of presence events and the like
         if not isinstance(event, EventBase):
@@ -363,8 +362,8 @@ class EventClientSerializer(object):
         if not event.internal_metadata.is_redacted() and (
             self.experimental_msc1849_support_enabled and bundle_aggregations
         ):
-            annotations = yield self.store.get_aggregation_groups_for_event(event_id)
-            references = yield self.store.get_relations_for_event(
+            annotations = await self.store.get_aggregation_groups_for_event(event_id)
+            references = await self.store.get_relations_for_event(
                 event_id, RelationTypes.REFERENCE, direction="f"
             )
 
@@ -378,7 +377,7 @@ class EventClientSerializer(object):
 
             edit = None
             if event.type == EventTypes.Message:
-                edit = yield self.store.get_applicable_edit(event_id)
+                edit = await self.store.get_applicable_edit(event_id)
 
             if edit:
                 # If there is an edit replace the content, preserving existing
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 994e6c8d5a..38ac7ec699 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -135,7 +135,7 @@ class FederationClient(FederationBase):
                 and try the request anyway.
 
         Returns:
-            a Deferred which will eventually yield a JSON object from the
+            a Awaitable which will eventually yield a JSON object from the
             response
         """
         sent_queries_counter.labels(query_type).inc()
@@ -157,7 +157,7 @@ class FederationClient(FederationBase):
             content (dict): The query content.
 
         Returns:
-            a Deferred which will eventually yield a JSON object from the
+            an Awaitable which will eventually yield a JSON object from the
             response
         """
         sent_queries_counter.labels("client_device_keys").inc()
@@ -180,7 +180,7 @@ class FederationClient(FederationBase):
             content (dict): The query content.
 
         Returns:
-            a Deferred which will eventually yield a JSON object from the
+            an Awaitable which will eventually yield a JSON object from the
             response
         """
         sent_queries_counter.labels("client_one_time_keys").inc()
@@ -900,7 +900,7 @@ class FederationClient(FederationBase):
                 party instance
 
         Returns:
-            Deferred[Dict[str, Any]]: The response from the remote server, or None if
+            Awaitable[Dict[str, Any]]: The response from the remote server, or None if
             `remote_server` is the same as the local server_name
 
         Raises:
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 23625ba995..11c5d63298 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -109,6 +109,9 @@ class FederationServer(FederationBase):
         # We cache responses to state queries, as they take a while and often
         # come in waves.
         self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
+        self._state_ids_resp_cache = ResponseCache(
+            hs, "state_ids_resp", timeout_ms=30000
+        )
 
     async def on_backfill_request(
         self, origin: str, room_id: str, versions: List[str], limit: int
@@ -376,10 +379,16 @@ class FederationServer(FederationBase):
         if not in_room:
             raise AuthError(403, "Host not in room.")
 
+        resp = await self._state_ids_resp_cache.wrap(
+            (room_id, event_id), self._on_state_ids_request_compute, room_id, event_id,
+        )
+
+        return 200, resp
+
+    async def _on_state_ids_request_compute(self, room_id, event_id):
         state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
         auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
-
-        return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
+        return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
 
     async def _on_context_state_request_compute(
         self, room_id: str, event_id: str
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index 4fc9ff92e5..2b0ab2dcbf 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -57,7 +57,7 @@ class FederationRemoteSendQueue(object):
 
         # We may have multiple federation sender instances, so we need to track
         # their positions separately.
-        self._sender_instances = hs.config.federation.federation_shard_config.instances
+        self._sender_instances = hs.config.worker.federation_shard_config.instances
         self._sender_positions = {}
 
         # Pending presence map user_id -> UserPresenceState
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index b328a4df09..94cc63001e 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -70,7 +70,7 @@ class FederationSender(object):
         self._transaction_manager = TransactionManager(hs)
 
         self._instance_name = hs.get_instance_name()
-        self._federation_shard_config = hs.config.federation.federation_shard_config
+        self._federation_shard_config = hs.config.worker.federation_shard_config
 
         # map from destination to PerDestinationQueue
         self._per_destination_queues = {}  # type: Dict[str, PerDestinationQueue]
@@ -288,8 +288,7 @@ class FederationSender(object):
         for destination in destinations:
             self._get_per_destination_queue(destination).send_pdu(pdu, order)
 
-    @defer.inlineCallbacks
-    def send_read_receipt(self, receipt: ReadReceipt):
+    async def send_read_receipt(self, receipt: ReadReceipt) -> None:
         """Send a RR to any other servers in the room
 
         Args:
@@ -330,7 +329,7 @@ class FederationSender(object):
         room_id = receipt.room_id
 
         # Work out which remote servers should be poked and poke them.
-        domains = yield self.state.get_current_hosts_in_room(room_id)
+        domains = await self.state.get_current_hosts_in_room(room_id)
         domains = [
             d
             for d in domains
@@ -385,8 +384,7 @@ class FederationSender(object):
             queue.flush_read_receipts_for_room(room_id)
 
     @preserve_fn  # the caller should not yield on this
-    @defer.inlineCallbacks
-    def send_presence(self, states: List[UserPresenceState]):
+    async def send_presence(self, states: List[UserPresenceState]):
         """Send the new presence states to the appropriate destinations.
 
         This actually queues up the presence states ready for sending and
@@ -421,7 +419,7 @@ class FederationSender(object):
                 if not states_map:
                     break
 
-                yield self._process_presence_inner(list(states_map.values()))
+                await self._process_presence_inner(list(states_map.values()))
         except Exception:
             logger.exception("Error sending presence states to servers")
         finally:
@@ -448,12 +446,11 @@ class FederationSender(object):
             self._get_per_destination_queue(destination).send_presence(states)
 
     @measure_func("txnqueue._process_presence")
-    @defer.inlineCallbacks
-    def _process_presence_inner(self, states: List[UserPresenceState]):
+    async def _process_presence_inner(self, states: List[UserPresenceState]):
         """Given a list of states populate self.pending_presence_by_dest and
         poke to send a new transaction to each destination
         """
-        hosts_and_states = yield get_interested_remotes(self.store, states, self.state)
+        hosts_and_states = await get_interested_remotes(self.store, states, self.state)
 
         for destinations, states in hosts_and_states:
             for destination in destinations:
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index 3436741783..dd150f89a6 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -75,7 +75,7 @@ class PerDestinationQueue(object):
         self._store = hs.get_datastore()
         self._transaction_manager = transaction_manager
         self._instance_name = hs.get_instance_name()
-        self._federation_shard_config = hs.config.federation.federation_shard_config
+        self._federation_shard_config = hs.config.worker.federation_shard_config
 
         self._should_send_on_this_instance = True
         if not self._federation_shard_config.should_handle(
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index cfdf23d366..9ea821dbb2 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -18,8 +18,6 @@ import logging
 import urllib
 from typing import Any, Dict, Optional
 
-from twisted.internet import defer
-
 from synapse.api.constants import Membership
 from synapse.api.errors import Codes, HttpResponseException, SynapseError
 from synapse.api.urls import (
@@ -51,7 +49,7 @@ class TransportLayerClient(object):
             event_id (str): The event we want the context at.
 
         Returns:
-            Deferred: Results in a dict received from the remote homeserver.
+            Awaitable: Results in a dict received from the remote homeserver.
         """
         logger.debug("get_room_state_ids dest=%s, room=%s", destination, room_id)
 
@@ -75,7 +73,7 @@ class TransportLayerClient(object):
                 giving up. None indicates no timeout.
 
         Returns:
-            Deferred: Results in a dict received from the remote homeserver.
+            Awaitable: Results in a dict received from the remote homeserver.
         """
         logger.debug("get_pdu dest=%s, event_id=%s", destination, event_id)
 
@@ -96,7 +94,7 @@ class TransportLayerClient(object):
             limit (int)
 
         Returns:
-            Deferred: Results in a dict received from the remote homeserver.
+            Awaitable: Results in a dict received from the remote homeserver.
         """
         logger.debug(
             "backfill dest=%s, room_id=%s, event_tuples=%r, limit=%s",
@@ -118,16 +116,15 @@ class TransportLayerClient(object):
             destination, path=path, args=args, try_trailing_slash_on_400=True
         )
 
-    @defer.inlineCallbacks
     @log_function
-    def send_transaction(self, transaction, json_data_callback=None):
+    async def send_transaction(self, transaction, json_data_callback=None):
         """ Sends the given Transaction to its destination
 
         Args:
             transaction (Transaction)
 
         Returns:
-            Deferred: Succeeds when we get a 2xx HTTP response. The result
+            Succeeds when we get a 2xx HTTP response. The result
             will be the decoded JSON body.
 
             Fails with ``HTTPRequestException`` if we get an HTTP response
@@ -154,7 +151,7 @@ class TransportLayerClient(object):
 
         path = _create_v1_path("/send/%s", transaction.transaction_id)
 
-        response = yield self.client.put_json(
+        response = await self.client.put_json(
             transaction.destination,
             path=path,
             data=json_data,
@@ -166,14 +163,13 @@ class TransportLayerClient(object):
 
         return response
 
-    @defer.inlineCallbacks
     @log_function
-    def make_query(
+    async def make_query(
         self, destination, query_type, args, retry_on_dns_fail, ignore_backoff=False
     ):
         path = _create_v1_path("/query/%s", query_type)
 
-        content = yield self.client.get_json(
+        content = await self.client.get_json(
             destination=destination,
             path=path,
             args=args,
@@ -184,9 +180,10 @@ class TransportLayerClient(object):
 
         return content
 
-    @defer.inlineCallbacks
     @log_function
-    def make_membership_event(self, destination, room_id, user_id, membership, params):
+    async def make_membership_event(
+        self, destination, room_id, user_id, membership, params
+    ):
         """Asks a remote server to build and sign us a membership event
 
         Note that this does not append any events to any graphs.
@@ -200,7 +197,7 @@ class TransportLayerClient(object):
                 request.
 
         Returns:
-            Deferred: Succeeds when we get a 2xx HTTP response. The result
+            Succeeds when we get a 2xx HTTP response. The result
             will be the decoded JSON body (ie, the new event).
 
             Fails with ``HTTPRequestException`` if we get an HTTP response
@@ -231,7 +228,7 @@ class TransportLayerClient(object):
             ignore_backoff = True
             retry_on_dns_fail = True
 
-        content = yield self.client.get_json(
+        content = await self.client.get_json(
             destination=destination,
             path=path,
             args=params,
@@ -242,34 +239,31 @@ class TransportLayerClient(object):
 
         return content
 
-    @defer.inlineCallbacks
     @log_function
-    def send_join_v1(self, destination, room_id, event_id, content):
+    async def send_join_v1(self, destination, room_id, event_id, content):
         path = _create_v1_path("/send_join/%s/%s", room_id, event_id)
 
-        response = yield self.client.put_json(
+        response = await self.client.put_json(
             destination=destination, path=path, data=content
         )
 
         return response
 
-    @defer.inlineCallbacks
     @log_function
-    def send_join_v2(self, destination, room_id, event_id, content):
+    async def send_join_v2(self, destination, room_id, event_id, content):
         path = _create_v2_path("/send_join/%s/%s", room_id, event_id)
 
-        response = yield self.client.put_json(
+        response = await self.client.put_json(
             destination=destination, path=path, data=content
         )
 
         return response
 
-    @defer.inlineCallbacks
     @log_function
-    def send_leave_v1(self, destination, room_id, event_id, content):
+    async def send_leave_v1(self, destination, room_id, event_id, content):
         path = _create_v1_path("/send_leave/%s/%s", room_id, event_id)
 
-        response = yield self.client.put_json(
+        response = await self.client.put_json(
             destination=destination,
             path=path,
             data=content,
@@ -282,12 +276,11 @@ class TransportLayerClient(object):
 
         return response
 
-    @defer.inlineCallbacks
     @log_function
-    def send_leave_v2(self, destination, room_id, event_id, content):
+    async def send_leave_v2(self, destination, room_id, event_id, content):
         path = _create_v2_path("/send_leave/%s/%s", room_id, event_id)
 
-        response = yield self.client.put_json(
+        response = await self.client.put_json(
             destination=destination,
             path=path,
             data=content,
@@ -300,31 +293,28 @@ class TransportLayerClient(object):
 
         return response
 
-    @defer.inlineCallbacks
     @log_function
-    def send_invite_v1(self, destination, room_id, event_id, content):
+    async def send_invite_v1(self, destination, room_id, event_id, content):
         path = _create_v1_path("/invite/%s/%s", room_id, event_id)
 
-        response = yield self.client.put_json(
+        response = await self.client.put_json(
             destination=destination, path=path, data=content, ignore_backoff=True
         )
 
         return response
 
-    @defer.inlineCallbacks
     @log_function
-    def send_invite_v2(self, destination, room_id, event_id, content):
+    async def send_invite_v2(self, destination, room_id, event_id, content):
         path = _create_v2_path("/invite/%s/%s", room_id, event_id)
 
-        response = yield self.client.put_json(
+        response = await self.client.put_json(
             destination=destination, path=path, data=content, ignore_backoff=True
         )
 
         return response
 
-    @defer.inlineCallbacks
     @log_function
-    def get_public_rooms(
+    async def get_public_rooms(
         self,
         remote_server: str,
         limit: Optional[int] = None,
@@ -355,7 +345,7 @@ class TransportLayerClient(object):
             data["filter"] = search_filter
 
             try:
-                response = yield self.client.post_json(
+                response = await self.client.post_json(
                     destination=remote_server, path=path, data=data, ignore_backoff=True
                 )
             except HttpResponseException as e:
@@ -381,7 +371,7 @@ class TransportLayerClient(object):
                 args["since"] = [since_token]
 
             try:
-                response = yield self.client.get_json(
+                response = await self.client.get_json(
                     destination=remote_server, path=path, args=args, ignore_backoff=True
                 )
             except HttpResponseException as e:
@@ -396,29 +386,26 @@ class TransportLayerClient(object):
 
         return response
 
-    @defer.inlineCallbacks
     @log_function
-    def exchange_third_party_invite(self, destination, room_id, event_dict):
+    async def exchange_third_party_invite(self, destination, room_id, event_dict):
         path = _create_v1_path("/exchange_third_party_invite/%s", room_id)
 
-        response = yield self.client.put_json(
+        response = await self.client.put_json(
             destination=destination, path=path, data=event_dict
         )
 
         return response
 
-    @defer.inlineCallbacks
     @log_function
-    def get_event_auth(self, destination, room_id, event_id):
+    async def get_event_auth(self, destination, room_id, event_id):
         path = _create_v1_path("/event_auth/%s/%s", room_id, event_id)
 
-        content = yield self.client.get_json(destination=destination, path=path)
+        content = await self.client.get_json(destination=destination, path=path)
 
         return content
 
-    @defer.inlineCallbacks
     @log_function
-    def query_client_keys(self, destination, query_content, timeout):
+    async def query_client_keys(self, destination, query_content, timeout):
         """Query the device keys for a list of user ids hosted on a remote
         server.
 
@@ -453,14 +440,13 @@ class TransportLayerClient(object):
         """
         path = _create_v1_path("/user/keys/query")
 
-        content = yield self.client.post_json(
+        content = await self.client.post_json(
             destination=destination, path=path, data=query_content, timeout=timeout
         )
         return content
 
-    @defer.inlineCallbacks
     @log_function
-    def query_user_devices(self, destination, user_id, timeout):
+    async def query_user_devices(self, destination, user_id, timeout):
         """Query the devices for a user id hosted on a remote server.
 
         Response:
@@ -493,14 +479,13 @@ class TransportLayerClient(object):
         """
         path = _create_v1_path("/user/devices/%s", user_id)
 
-        content = yield self.client.get_json(
+        content = await self.client.get_json(
             destination=destination, path=path, timeout=timeout
         )
         return content
 
-    @defer.inlineCallbacks
     @log_function
-    def claim_client_keys(self, destination, query_content, timeout):
+    async def claim_client_keys(self, destination, query_content, timeout):
         """Claim one-time keys for a list of devices hosted on a remote server.
 
         Request:
@@ -532,14 +517,13 @@ class TransportLayerClient(object):
 
         path = _create_v1_path("/user/keys/claim")
 
-        content = yield self.client.post_json(
+        content = await self.client.post_json(
             destination=destination, path=path, data=query_content, timeout=timeout
         )
         return content
 
-    @defer.inlineCallbacks
     @log_function
-    def get_missing_events(
+    async def get_missing_events(
         self,
         destination,
         room_id,
@@ -551,7 +535,7 @@ class TransportLayerClient(object):
     ):
         path = _create_v1_path("/get_missing_events/%s", room_id)
 
-        content = yield self.client.post_json(
+        content = await self.client.post_json(
             destination=destination,
             path=path,
             data={
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 24f7d4b3bc..5e111aa902 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -338,6 +338,12 @@ class BaseFederationServlet(object):
                 if origin:
                     with ratelimiter.ratelimit(origin) as d:
                         await d
+                        if request._disconnected:
+                            logger.warning(
+                                "client disconnected before we started processing "
+                                "request"
+                            )
+                            return -1, None
                         response = await func(
                             origin, content, request.args, *args, **kwargs
                         )
diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py
index dab13c243f..e674bf44a2 100644
--- a/synapse/groups/attestations.py
+++ b/synapse/groups/attestations.py
@@ -41,8 +41,6 @@ from typing import Tuple
 
 from signedjson.sign import sign_json
 
-from twisted.internet import defer
-
 from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.types import get_domain_from_id
@@ -72,8 +70,9 @@ class GroupAttestationSigning(object):
         self.server_name = hs.hostname
         self.signing_key = hs.signing_key
 
-    @defer.inlineCallbacks
-    def verify_attestation(self, attestation, group_id, user_id, server_name=None):
+    async def verify_attestation(
+        self, attestation, group_id, user_id, server_name=None
+    ):
         """Verifies that the given attestation matches the given parameters.
 
         An optional server_name can be supplied to explicitly set which server's
@@ -102,7 +101,7 @@ class GroupAttestationSigning(object):
         if valid_until_ms < now:
             raise SynapseError(400, "Attestation expired")
 
-        yield self.keyring.verify_json_for_server(
+        await self.keyring.verify_json_for_server(
             server_name, attestation, now, "Group attestation"
         )
 
@@ -142,8 +141,7 @@ class GroupAttestionRenewer(object):
                 self._start_renew_attestations, 30 * 60 * 1000
             )
 
-    @defer.inlineCallbacks
-    def on_renew_attestation(self, group_id, user_id, content):
+    async def on_renew_attestation(self, group_id, user_id, content):
         """When a remote updates an attestation
         """
         attestation = content["attestation"]
@@ -151,11 +149,11 @@ class GroupAttestionRenewer(object):
         if not self.is_mine_id(group_id) and not self.is_mine_id(user_id):
             raise SynapseError(400, "Neither user not group are on this server")
 
-        yield self.attestations.verify_attestation(
+        await self.attestations.verify_attestation(
             attestation, user_id=user_id, group_id=group_id
         )
 
-        yield self.store.update_remote_attestion(group_id, user_id, attestation)
+        await self.store.update_remote_attestion(group_id, user_id, attestation)
 
         return {}
 
@@ -172,8 +170,7 @@ class GroupAttestionRenewer(object):
             now + UPDATE_ATTESTATION_TIME_MS
         )
 
-        @defer.inlineCallbacks
-        def _renew_attestation(group_user: Tuple[str, str]):
+        async def _renew_attestation(group_user: Tuple[str, str]):
             group_id, user_id = group_user
             try:
                 if not self.is_mine_id(group_id):
@@ -186,16 +183,16 @@ class GroupAttestionRenewer(object):
                         user_id,
                         group_id,
                     )
-                    yield self.store.remove_attestation_renewal(group_id, user_id)
+                    await self.store.remove_attestation_renewal(group_id, user_id)
                     return
 
                 attestation = self.attestations.create_attestation(group_id, user_id)
 
-                yield self.transport_client.renew_group_attestation(
+                await self.transport_client.renew_group_attestation(
                     destination, group_id, user_id, content={"attestation": attestation}
                 )
 
-                yield self.store.update_attestation_renewal(
+                await self.store.update_attestation_renewal(
                     group_id, user_id, attestation
                 )
             except (RequestSendFailed, HttpResponseException) as e:
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index f3c0aeceb6..506bb2b275 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -72,7 +72,7 @@ class AdminHandler(BaseHandler):
             writer (ExfiltrationWriter)
 
         Returns:
-            defer.Deferred: Resolves when all data for a user has been written.
+            Resolves when all data for a user has been written.
             The returned value is that returned by `writer.finished()`.
         """
         # Get all rooms the user is in or has been in
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 92d4c6e16c..fbc56c351b 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -27,7 +27,6 @@ from synapse.metrics import (
     event_processing_loop_room_count,
 )
 from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.util import log_failure
 from synapse.util.metrics import Measure
 
 logger = logging.getLogger(__name__)
@@ -100,10 +99,11 @@ class ApplicationServicesHandler(object):
 
                         if not self.started_scheduler:
 
-                            def start_scheduler():
-                                return self.scheduler.start().addErrback(
-                                    log_failure, "Application Services Failure"
-                                )
+                            async def start_scheduler():
+                                try:
+                                    return self.scheduler.start()
+                                except Exception:
+                                    logger.error("Application Services Failure")
 
                             run_as_background_process("as_scheduler", start_scheduler)
                             self.started_scheduler = True
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index a162392e4c..c7d921c21a 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -13,6 +13,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import inspect
 import logging
 import time
 import unicodedata
@@ -863,11 +864,15 @@ class AuthHandler(BaseHandler):
         # see if any of our auth providers want to know about this
         for provider in self.password_providers:
             if hasattr(provider, "on_logged_out"):
-                await provider.on_logged_out(
+                # This might return an awaitable, if it does block the log out
+                # until it completes.
+                result = provider.on_logged_out(
                     user_id=str(user_info["user"]),
                     device_id=user_info["device_id"],
                     access_token=access_token,
                 )
+                if inspect.isawaitable(result):
+                    await result
 
         # delete pushers associated with this access token
         if user_info["token_id"] is not None:
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 696d85b5f9..25169157c1 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -30,6 +30,7 @@ class DeactivateAccountHandler(BaseHandler):
 
     def __init__(self, hs):
         super(DeactivateAccountHandler, self).__init__(hs)
+        self.hs = hs
         self._auth_handler = hs.get_auth_handler()
         self._device_handler = hs.get_device_handler()
         self._room_member_handler = hs.get_room_member_handler()
@@ -222,13 +223,26 @@ class DeactivateAccountHandler(BaseHandler):
         """
         Activate an account that was previously deactivated.
 
-        This simply marks the user as activate in the database and does not
-        attempt to rejoin rooms, re-add threepids, etc.
+        This marks the user as active and not erased in the database, but does
+        not attempt to rejoin rooms, re-add threepids, etc.
+
+        If enabled, the user will be re-added to the user directory.
 
         The user will also need a password hash set to actually login.
 
         Args:
-            user_id: ID of user to be deactivated
+            user_id: ID of user to be re-activated
         """
-        # Mark the user as activate.
+        # Add the user to the directory, if necessary.
+        user = UserID.from_string(user_id)
+        if self.hs.config.user_directory_search_all_users:
+            profile = await self.store.get_profileinfo(user.localpart)
+            await self.user_directory_handler.handle_local_profile_change(
+                user_id, profile
+            )
+
+        # Ensure the user is not marked as erased.
+        await self.store.mark_user_not_erased(user_id)
+
+        # Mark the user as active.
         await self.store.set_user_deactivated_status(user_id, False)
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 361dd64cd2..84169c1022 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -16,10 +16,11 @@
 # limitations under the License.
 
 import logging
+from typing import Dict, List, Optional, Tuple
 
 import attr
 from canonicaljson import encode_canonical_json, json
-from signedjson.key import decode_verify_key_bytes
+from signedjson.key import VerifyKey, decode_verify_key_bytes
 from signedjson.sign import SignatureVerifyException, verify_signed_json
 from unpaddedbase64 import decode_base64
 
@@ -265,7 +266,9 @@ class E2eKeysHandler(object):
 
         return ret
 
-    async def get_cross_signing_keys_from_cache(self, query, from_user_id):
+    async def get_cross_signing_keys_from_cache(
+        self, query, from_user_id
+    ) -> Dict[str, Dict[str, dict]]:
         """Get cross-signing keys for users from the database
 
         Args:
@@ -277,8 +280,7 @@ class E2eKeysHandler(object):
                 can see.
 
         Returns:
-            defer.Deferred[dict[str, dict[str, dict]]]: map from
-                (master_keys|self_signing_keys|user_signing_keys) -> user_id -> key
+            A map from (master_keys|self_signing_keys|user_signing_keys) -> user_id -> key
         """
         master_keys = {}
         self_signing_keys = {}
@@ -312,16 +314,17 @@ class E2eKeysHandler(object):
         }
 
     @trace
-    async def query_local_devices(self, query):
+    async def query_local_devices(
+        self, query: Dict[str, Optional[List[str]]]
+    ) -> Dict[str, Dict[str, dict]]:
         """Get E2E device keys for local users
 
         Args:
-            query (dict[string, list[string]|None): map from user_id to a list
+            query: map from user_id to a list
                  of devices to query (None for all devices)
 
         Returns:
-            defer.Deferred: (resolves to dict[string, dict[string, dict]]):
-                 map from user_id -> device_id -> device details
+            A map from user_id -> device_id -> device details
         """
         set_tag("local_query", query)
         local_query = []
@@ -1004,7 +1007,7 @@ class E2eKeysHandler(object):
 
     async def _retrieve_cross_signing_keys_for_remote_user(
         self, user: UserID, desired_key_type: str,
-    ):
+    ) -> Tuple[Optional[dict], Optional[str], Optional[VerifyKey]]:
         """Queries cross-signing keys for a remote user and saves them to the database
 
         Only the key specified by `key_type` will be returned, while all retrieved keys
@@ -1015,8 +1018,7 @@ class E2eKeysHandler(object):
             desired_key_type: The type of key to receive. One of "master", "self_signing"
 
         Returns:
-            Deferred[Tuple[Optional[Dict], Optional[str], Optional[VerifyKey]]]: A tuple
-            of the retrieved key content, the key's ID and the matching VerifyKey.
+            A tuple of the retrieved key content, the key's ID and the matching VerifyKey.
             If the key cannot be retrieved, all values in the tuple will instead be None.
         """
         try:
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 71ac5dca99..0d7d1adcea 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1394,7 +1394,7 @@ class FederationHandler(BaseHandler):
             # it's just a best-effort thing at this point. We do want to do
             # them roughly in order, though, otherwise we'll end up making
             # lots of requests for missing prev_events which we do actually
-            # have. Hence we fire off the deferred, but don't wait for it.
+            # have. Hence we fire off the background task, but don't wait for it.
 
             run_in_background(self._handle_queued_pdus, room_queue)
 
@@ -1887,9 +1887,6 @@ class FederationHandler(BaseHandler):
             origin, event, state=state, auth_events=auth_events, backfilled=backfilled
         )
 
-        # reraise does not allow inlineCallbacks to preserve the stacktrace, so we
-        # hack around with a try/finally instead.
-        success = False
         try:
             if (
                 not event.internal_metadata.is_outlier()
@@ -1903,12 +1900,11 @@ class FederationHandler(BaseHandler):
             await self.persist_events_and_notify(
                 [(event, context)], backfilled=backfilled
             )
-            success = True
-        finally:
-            if not success:
-                run_in_background(
-                    self.store.remove_push_actions_from_staging, event.event_id
-                )
+        except Exception:
+            run_in_background(
+                self.store.remove_push_actions_from_staging, event.event_id
+            )
+            raise
 
         return context
 
@@ -2474,7 +2470,7 @@ class FederationHandler(BaseHandler):
         }
 
         current_state_ids = await context.get_current_state_ids()
-        current_state_ids = dict(current_state_ids)
+        current_state_ids = dict(current_state_ids)  # type: ignore
 
         current_state_ids.update(state_updates)
 
@@ -2994,7 +2990,9 @@ class FederationHandler(BaseHandler):
         else:
             user_joined_room(self.distributor, user, room_id)
 
-    async def get_room_complexity(self, remote_room_hosts, room_id):
+    async def get_room_complexity(
+        self, remote_room_hosts: List[str], room_id: str
+    ) -> Optional[dict]:
         """
         Fetch the complexity of a remote room over federation.
 
@@ -3003,7 +3001,7 @@ class FederationHandler(BaseHandler):
             room_id (str): The room ID to ask about.
 
         Returns:
-            Deferred[dict] or Deferred[None]: Dict contains the complexity
+            Dict contains the complexity
             metric versions, while None means we could not fetch the complexity.
         """
 
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index ecdb12a7bf..0e2656ccb3 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -23,39 +23,32 @@ logger = logging.getLogger(__name__)
 
 
 def _create_rerouter(func_name):
-    """Returns a function that looks at the group id and calls the function
+    """Returns an async function that looks at the group id and calls the function
     on federation or the local group server if the group is local
     """
 
-    def f(self, group_id, *args, **kwargs):
+    async def f(self, group_id, *args, **kwargs):
         if self.is_mine_id(group_id):
-            return getattr(self.groups_server_handler, func_name)(
+            return await getattr(self.groups_server_handler, func_name)(
                 group_id, *args, **kwargs
             )
         else:
             destination = get_domain_from_id(group_id)
-            d = getattr(self.transport_client, func_name)(
-                destination, group_id, *args, **kwargs
-            )
 
-            # Capture errors returned by the remote homeserver and
-            # re-throw specific errors as SynapseErrors. This is so
-            # when the remote end responds with things like 403 Not
-            # In Group, we can communicate that to the client instead
-            # of a 500.
-            def http_response_errback(failure):
-                failure.trap(HttpResponseException)
-                e = failure.value
+            try:
+                return await getattr(self.transport_client, func_name)(
+                    destination, group_id, *args, **kwargs
+                )
+            except HttpResponseException as e:
+                # Capture errors returned by the remote homeserver and
+                # re-throw specific errors as SynapseErrors. This is so
+                # when the remote end responds with things like 403 Not
+                # In Group, we can communicate that to the client instead
+                # of a 500.
                 raise e.to_synapse_error()
-
-            def request_failed_errback(failure):
-                failure.trap(RequestSendFailed)
+            except RequestSendFailed:
                 raise SynapseError(502, "Failed to contact group server")
 
-            d.addErrback(http_response_errback)
-            d.addErrback(request_failed_errback)
-            return d
-
     return f
 
 
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 701233ebb4..0bd2c3e37a 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -19,6 +19,7 @@
 
 import logging
 import urllib.parse
+from typing import Awaitable, Callable, Dict, List, Optional, Tuple
 
 from canonicaljson import json
 from signedjson.key import decode_verify_key_bytes
@@ -36,6 +37,7 @@ from synapse.api.errors import (
 )
 from synapse.config.emailconfig import ThreepidBehaviour
 from synapse.http.client import SimpleHttpClient
+from synapse.types import JsonDict, Requester
 from synapse.util.hash import sha256_and_url_safe_base64
 from synapse.util.stringutils import assert_valid_client_secret, random_string
 
@@ -59,23 +61,23 @@ class IdentityHandler(BaseHandler):
         self.federation_http_client = hs.get_http_client()
         self.hs = hs
 
-    async def threepid_from_creds(self, id_server, creds):
+    async def threepid_from_creds(
+        self, id_server: str, creds: Dict[str, str]
+    ) -> Optional[JsonDict]:
         """
         Retrieve and validate a threepid identifier from a "credentials" dictionary against a
         given identity server
 
         Args:
-            id_server (str): The identity server to validate 3PIDs against. Must be a
+            id_server: The identity server to validate 3PIDs against. Must be a
                 complete URL including the protocol (http(s)://)
-
-            creds (dict[str, str]): Dictionary containing the following keys:
+            creds: Dictionary containing the following keys:
                 * client_secret|clientSecret: A unique secret str provided by the client
                 * sid: The ID of the validation session
 
         Returns:
-            Deferred[dict[str,str|int]|None]: A dictionary consisting of response params to
-                the /getValidated3pid endpoint of the Identity Service API, or None if the
-                threepid was not found
+            A dictionary consisting of response params to the /getValidated3pid
+            endpoint of the Identity Service API, or None if the threepid was not found
         """
         client_secret = creds.get("client_secret") or creds.get("clientSecret")
         if not client_secret:
@@ -119,26 +121,27 @@ class IdentityHandler(BaseHandler):
         return None
 
     async def bind_threepid(
-        self, client_secret, sid, mxid, id_server, id_access_token=None, use_v2=True
-    ):
+        self,
+        client_secret: str,
+        sid: str,
+        mxid: str,
+        id_server: str,
+        id_access_token: Optional[str] = None,
+        use_v2: bool = True,
+    ) -> JsonDict:
         """Bind a 3PID to an identity server
 
         Args:
-            client_secret (str): A unique secret provided by the client
-
-            sid (str): The ID of the validation session
-
-            mxid (str): The MXID to bind the 3PID to
-
-            id_server (str): The domain of the identity server to query
-
-            id_access_token (str): The access token to authenticate to the identity
+            client_secret: A unique secret provided by the client
+            sid: The ID of the validation session
+            mxid: The MXID to bind the 3PID to
+            id_server: The domain of the identity server to query
+            id_access_token: The access token to authenticate to the identity
                 server with, if necessary. Required if use_v2 is true
-
-            use_v2 (bool): Whether to use v2 Identity Service API endpoints. Defaults to True
+            use_v2: Whether to use v2 Identity Service API endpoints. Defaults to True
 
         Returns:
-            Deferred[dict]: The response from the identity server
+            The response from the identity server
         """
         logger.debug("Proxying threepid bind request for %s to %s", mxid, id_server)
 
@@ -151,7 +154,7 @@ class IdentityHandler(BaseHandler):
         bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid}
         if use_v2:
             bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,)
-            headers["Authorization"] = create_id_access_token_header(id_access_token)
+            headers["Authorization"] = create_id_access_token_header(id_access_token)  # type: ignore
         else:
             bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,)
 
@@ -187,20 +190,20 @@ class IdentityHandler(BaseHandler):
         )
         return res
 
-    async def try_unbind_threepid(self, mxid, threepid):
+    async def try_unbind_threepid(self, mxid: str, threepid: dict) -> bool:
         """Attempt to remove a 3PID from an identity server, or if one is not provided, all
         identity servers we're aware the binding is present on
 
         Args:
-            mxid (str): Matrix user ID of binding to be removed
-            threepid (dict): Dict with medium & address of binding to be
+            mxid: Matrix user ID of binding to be removed
+            threepid: Dict with medium & address of binding to be
                 removed, and an optional id_server.
 
         Raises:
             SynapseError: If we failed to contact the identity server
 
         Returns:
-            Deferred[bool]: True on success, otherwise False if the identity
+            True on success, otherwise False if the identity
             server doesn't support unbinding (or no identity server found to
             contact).
         """
@@ -223,19 +226,21 @@ class IdentityHandler(BaseHandler):
 
         return changed
 
-    async def try_unbind_threepid_with_id_server(self, mxid, threepid, id_server):
+    async def try_unbind_threepid_with_id_server(
+        self, mxid: str, threepid: dict, id_server: str
+    ) -> bool:
         """Removes a binding from an identity server
 
         Args:
-            mxid (str): Matrix user ID of binding to be removed
-            threepid (dict): Dict with medium & address of binding to be removed
-            id_server (str): Identity server to unbind from
+            mxid: Matrix user ID of binding to be removed
+            threepid: Dict with medium & address of binding to be removed
+            id_server: Identity server to unbind from
 
         Raises:
             SynapseError: If we failed to contact the identity server
 
         Returns:
-            Deferred[bool]: True on success, otherwise False if the identity
+            True on success, otherwise False if the identity
             server doesn't support unbinding
         """
         url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
@@ -287,23 +292,23 @@ class IdentityHandler(BaseHandler):
 
     async def send_threepid_validation(
         self,
-        email_address,
-        client_secret,
-        send_attempt,
-        send_email_func,
-        next_link=None,
-    ):
+        email_address: str,
+        client_secret: str,
+        send_attempt: int,
+        send_email_func: Callable[[str, str, str, str], Awaitable],
+        next_link: Optional[str] = None,
+    ) -> str:
         """Send a threepid validation email for password reset or
         registration purposes
 
         Args:
-            email_address (str): The user's email address
-            client_secret (str): The provided client secret
-            send_attempt (int): Which send attempt this is
-            send_email_func (func): A function that takes an email address, token,
-                                    client_secret and session_id, sends an email
-                                    and returns a Deferred.
-            next_link (str|None): The URL to redirect the user to after validation
+            email_address: The user's email address
+            client_secret: The provided client secret
+            send_attempt: Which send attempt this is
+            send_email_func: A function that takes an email address, token,
+                             client_secret and session_id, sends an email
+                             and returns an Awaitable.
+            next_link: The URL to redirect the user to after validation
 
         Returns:
             The new session_id upon success
@@ -372,17 +377,22 @@ class IdentityHandler(BaseHandler):
         return session_id
 
     async def requestEmailToken(
-        self, id_server, email, client_secret, send_attempt, next_link=None
-    ):
+        self,
+        id_server: str,
+        email: str,
+        client_secret: str,
+        send_attempt: int,
+        next_link: Optional[str] = None,
+    ) -> JsonDict:
         """
         Request an external server send an email on our behalf for the purposes of threepid
         validation.
 
         Args:
-            id_server (str): The identity server to proxy to
-            email (str): The email to send the message to
-            client_secret (str): The unique client_secret sends by the user
-            send_attempt (int): Which attempt this is
+            id_server: The identity server to proxy to
+            email: The email to send the message to
+            client_secret: The unique client_secret sends by the user
+            send_attempt: Which attempt this is
             next_link: A link to redirect the user to once they submit the token
 
         Returns:
@@ -419,22 +429,22 @@ class IdentityHandler(BaseHandler):
 
     async def requestMsisdnToken(
         self,
-        id_server,
-        country,
-        phone_number,
-        client_secret,
-        send_attempt,
-        next_link=None,
-    ):
+        id_server: str,
+        country: str,
+        phone_number: str,
+        client_secret: str,
+        send_attempt: int,
+        next_link: Optional[str] = None,
+    ) -> JsonDict:
         """
         Request an external server send an SMS message on our behalf for the purposes of
         threepid validation.
         Args:
-            id_server (str): The identity server to proxy to
-            country (str): The country code of the phone number
-            phone_number (str): The number to send the message to
-            client_secret (str): The unique client_secret sends by the user
-            send_attempt (int): Which attempt this is
+            id_server: The identity server to proxy to
+            country: The country code of the phone number
+            phone_number: The number to send the message to
+            client_secret: The unique client_secret sends by the user
+            send_attempt: Which attempt this is
             next_link: A link to redirect the user to once they submit the token
 
         Returns:
@@ -480,17 +490,18 @@ class IdentityHandler(BaseHandler):
         )
         return data
 
-    async def validate_threepid_session(self, client_secret, sid):
+    async def validate_threepid_session(
+        self, client_secret: str, sid: str
+    ) -> Optional[JsonDict]:
         """Validates a threepid session with only the client secret and session ID
         Tries validating against any configured account_threepid_delegates as well as locally.
 
         Args:
-            client_secret (str): A secret provided by the client
-
-            sid (str): The ID of the session
+            client_secret: A secret provided by the client
+            sid: The ID of the session
 
         Returns:
-            Dict[str, str|int] if validation was successful, otherwise None
+            The json response if validation was successful, otherwise None
         """
         # XXX: We shouldn't need to keep wrapping and unwrapping this value
         threepid_creds = {"client_secret": client_secret, "sid": sid}
@@ -523,23 +534,22 @@ class IdentityHandler(BaseHandler):
 
         return validation_session
 
-    async def proxy_msisdn_submit_token(self, id_server, client_secret, sid, token):
+    async def proxy_msisdn_submit_token(
+        self, id_server: str, client_secret: str, sid: str, token: str
+    ) -> JsonDict:
         """Proxy a POST submitToken request to an identity server for verification purposes
 
         Args:
-            id_server (str): The identity server URL to contact
-
-            client_secret (str): Secret provided by the client
-
-            sid (str): The ID of the session
-
-            token (str): The verification token
+            id_server: The identity server URL to contact
+            client_secret: Secret provided by the client
+            sid: The ID of the session
+            token: The verification token
 
         Raises:
             SynapseError: If we failed to contact the identity server
 
         Returns:
-            Deferred[dict]: The response dict from the identity server
+            The response dict from the identity server
         """
         body = {"client_secret": client_secret, "sid": sid, "token": token}
 
@@ -554,19 +564,25 @@ class IdentityHandler(BaseHandler):
             logger.warning("Error contacting msisdn account_threepid_delegate: %s", e)
             raise SynapseError(400, "Error contacting the identity server")
 
-    async def lookup_3pid(self, id_server, medium, address, id_access_token=None):
+    async def lookup_3pid(
+        self,
+        id_server: str,
+        medium: str,
+        address: str,
+        id_access_token: Optional[str] = None,
+    ) -> Optional[str]:
         """Looks up a 3pid in the passed identity server.
 
         Args:
-            id_server (str): The server name (including port, if required)
+            id_server: The server name (including port, if required)
                 of the identity server to use.
-            medium (str): The type of the third party identifier (e.g. "email").
-            address (str): The third party identifier (e.g. "foo@example.com").
-            id_access_token (str|None): The access token to authenticate to the identity
+            medium: The type of the third party identifier (e.g. "email").
+            address: The third party identifier (e.g. "foo@example.com").
+            id_access_token: The access token to authenticate to the identity
                 server with
 
         Returns:
-            str|None: the matrix ID of the 3pid, or None if it is not recognized.
+            the matrix ID of the 3pid, or None if it is not recognized.
         """
         if id_access_token is not None:
             try:
@@ -591,17 +607,19 @@ class IdentityHandler(BaseHandler):
 
         return await self._lookup_3pid_v1(id_server, medium, address)
 
-    async def _lookup_3pid_v1(self, id_server, medium, address):
+    async def _lookup_3pid_v1(
+        self, id_server: str, medium: str, address: str
+    ) -> Optional[str]:
         """Looks up a 3pid in the passed identity server using v1 lookup.
 
         Args:
-            id_server (str): The server name (including port, if required)
+            id_server: The server name (including port, if required)
                 of the identity server to use.
-            medium (str): The type of the third party identifier (e.g. "email").
-            address (str): The third party identifier (e.g. "foo@example.com").
+            medium: The type of the third party identifier (e.g. "email").
+            address: The third party identifier (e.g. "foo@example.com").
 
         Returns:
-            str: the matrix ID of the 3pid, or None if it is not recognized.
+            the matrix ID of the 3pid, or None if it is not recognized.
         """
         try:
             data = await self.blacklisting_http_client.get_json(
@@ -621,18 +639,20 @@ class IdentityHandler(BaseHandler):
 
         return None
 
-    async def _lookup_3pid_v2(self, id_server, id_access_token, medium, address):
+    async def _lookup_3pid_v2(
+        self, id_server: str, id_access_token: str, medium: str, address: str
+    ) -> Optional[str]:
         """Looks up a 3pid in the passed identity server using v2 lookup.
 
         Args:
-            id_server (str): The server name (including port, if required)
+            id_server: The server name (including port, if required)
                 of the identity server to use.
-            id_access_token (str): The access token to authenticate to the identity server with
-            medium (str): The type of the third party identifier (e.g. "email").
-            address (str): The third party identifier (e.g. "foo@example.com").
+            id_access_token: The access token to authenticate to the identity server with
+            medium: The type of the third party identifier (e.g. "email").
+            address: The third party identifier (e.g. "foo@example.com").
 
         Returns:
-            Deferred[str|None]: the matrix ID of the 3pid, or None if it is not recognised.
+            the matrix ID of the 3pid, or None if it is not recognised.
         """
         # Check what hashing details are supported by this identity server
         try:
@@ -757,49 +777,48 @@ class IdentityHandler(BaseHandler):
 
     async def ask_id_server_for_third_party_invite(
         self,
-        requester,
-        id_server,
-        medium,
-        address,
-        room_id,
-        inviter_user_id,
-        room_alias,
-        room_avatar_url,
-        room_join_rules,
-        room_name,
-        inviter_display_name,
-        inviter_avatar_url,
-        id_access_token=None,
-    ):
+        requester: Requester,
+        id_server: str,
+        medium: str,
+        address: str,
+        room_id: str,
+        inviter_user_id: str,
+        room_alias: str,
+        room_avatar_url: str,
+        room_join_rules: str,
+        room_name: str,
+        inviter_display_name: str,
+        inviter_avatar_url: str,
+        id_access_token: Optional[str] = None,
+    ) -> Tuple[str, List[Dict[str, str]], Dict[str, str], str]:
         """
         Asks an identity server for a third party invite.
 
         Args:
-            requester (Requester)
-            id_server (str): hostname + optional port for the identity server.
-            medium (str): The literal string "email".
-            address (str): The third party address being invited.
-            room_id (str): The ID of the room to which the user is invited.
-            inviter_user_id (str): The user ID of the inviter.
-            room_alias (str): An alias for the room, for cosmetic notifications.
-            room_avatar_url (str): The URL of the room's avatar, for cosmetic
+            requester
+            id_server: hostname + optional port for the identity server.
+            medium: The literal string "email".
+            address: The third party address being invited.
+            room_id: The ID of the room to which the user is invited.
+            inviter_user_id: The user ID of the inviter.
+            room_alias: An alias for the room, for cosmetic notifications.
+            room_avatar_url: The URL of the room's avatar, for cosmetic
                 notifications.
-            room_join_rules (str): The join rules of the email (e.g. "public").
-            room_name (str): The m.room.name of the room.
-            inviter_display_name (str): The current display name of the
+            room_join_rules: The join rules of the email (e.g. "public").
+            room_name: The m.room.name of the room.
+            inviter_display_name: The current display name of the
                 inviter.
-            inviter_avatar_url (str): The URL of the inviter's avatar.
+            inviter_avatar_url: The URL of the inviter's avatar.
             id_access_token (str|None): The access token to authenticate to the identity
                 server with
 
         Returns:
-            A deferred tuple containing:
-                token (str): The token which must be signed to prove authenticity.
+            A tuple containing:
+                token: The token which must be signed to prove authenticity.
                 public_keys ([{"public_key": str, "key_validity_url": str}]):
                     public_key is a base64-encoded ed25519 public key.
                 fallback_public_key: One element from public_keys.
-                display_name (str): A user-friendly name to represent the invited
-                    user.
+                display_name: A user-friendly name to represent the invited user.
         """
         invite_config = {
             "medium": medium,
@@ -896,15 +915,15 @@ class IdentityHandler(BaseHandler):
         return token, public_keys, fallback_public_key, display_name
 
 
-def create_id_access_token_header(id_access_token):
+def create_id_access_token_header(id_access_token: str) -> List[str]:
     """Create an Authorization header for passing to SimpleHttpClient as the header value
     of an HTTP request.
 
     Args:
-        id_access_token (str): An identity server access token.
+        id_access_token: An identity server access token.
 
     Returns:
-        list[str]: The ascii-encoded bearer token encased in a list.
+        The ascii-encoded bearer token encased in a list.
     """
     # Prefix with Bearer
     bearer_token = "Bearer %s" % id_access_token
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index c47764a4ce..e451d6dc86 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -15,12 +15,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import TYPE_CHECKING, Optional, Tuple
+from typing import TYPE_CHECKING, List, Optional, Tuple
 
 from canonicaljson import encode_canonical_json, json
 
-from twisted.internet import defer
-from twisted.internet.defer import succeed
 from twisted.internet.interfaces import IDelayedCall
 
 from synapse import event_auth
@@ -41,13 +39,22 @@ from synapse.api.errors import (
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
 from synapse.api.urls import ConsentURIBuilder
 from synapse.events import EventBase
+from synapse.events.builder import EventBuilder
+from synapse.events.snapshot import EventContext
 from synapse.events.validator import EventValidator
 from synapse.logging.context import run_in_background
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.replication.http.send_event import ReplicationSendEventRestServlet
 from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
 from synapse.storage.state import StateFilter
-from synapse.types import Collection, RoomAlias, UserID, create_requester
+from synapse.types import (
+    Collection,
+    Requester,
+    RoomAlias,
+    StreamToken,
+    UserID,
+    create_requester,
+)
 from synapse.util.async_helpers import Linearizer
 from synapse.util.frozenutils import frozendict_json_encoder
 from synapse.util.metrics import measure_func
@@ -84,14 +91,22 @@ class MessageHandler(object):
                 "_schedule_next_expiry", self._schedule_next_expiry
             )
 
-    @defer.inlineCallbacks
-    def get_room_data(
-        self, user_id=None, room_id=None, event_type=None, state_key="", is_guest=False
-    ):
+    async def get_room_data(
+        self,
+        user_id: str = None,
+        room_id: str = None,
+        event_type: Optional[str] = None,
+        state_key: str = "",
+        is_guest: bool = False,
+    ) -> dict:
         """ Get data from a room.
 
         Args:
-            event : The room path event
+            user_id
+            room_id
+            event_type
+            state_key
+            is_guest
         Returns:
             The path data content.
         Raises:
@@ -100,30 +115,29 @@ class MessageHandler(object):
         (
             membership,
             membership_event_id,
-        ) = yield self.auth.check_user_in_room_or_world_readable(
+        ) = await self.auth.check_user_in_room_or_world_readable(
             room_id, user_id, allow_departed_users=True
         )
 
         if membership == Membership.JOIN:
-            data = yield self.state.get_current_state(room_id, event_type, state_key)
+            data = await self.state.get_current_state(room_id, event_type, state_key)
         elif membership == Membership.LEAVE:
             key = (event_type, state_key)
-            room_state = yield self.state_store.get_state_for_events(
+            room_state = await self.state_store.get_state_for_events(
                 [membership_event_id], StateFilter.from_types([key])
             )
             data = room_state[membership_event_id].get(key)
 
         return data
 
-    @defer.inlineCallbacks
-    def get_state_events(
+    async def get_state_events(
         self,
-        user_id,
-        room_id,
-        state_filter=StateFilter.all(),
-        at_token=None,
-        is_guest=False,
-    ):
+        user_id: str,
+        room_id: str,
+        state_filter: StateFilter = StateFilter.all(),
+        at_token: Optional[StreamToken] = None,
+        is_guest: bool = False,
+    ) -> List[dict]:
         """Retrieve all state events for a given room. If the user is
         joined to the room then return the current state. If the user has
         left the room return the state events from when they left. If an explicit
@@ -131,15 +145,14 @@ class MessageHandler(object):
         visible.
 
         Args:
-            user_id(str): The user requesting state events.
-            room_id(str): The room ID to get all state events from.
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
-            at_token(StreamToken|None): the stream token of the at which we are requesting
+            user_id: The user requesting state events.
+            room_id: The room ID to get all state events from.
+            state_filter: The state filter used to fetch state from the database.
+            at_token: the stream token of the at which we are requesting
                 the stats. If the user is not allowed to view the state as of that
                 stream token, we raise a 403 SynapseError. If None, returns the current
                 state based on the current_state_events table.
-            is_guest(bool): whether this user is a guest
+            is_guest: whether this user is a guest
         Returns:
             A list of dicts representing state events. [{}, {}, {}]
         Raises:
@@ -153,20 +166,20 @@ class MessageHandler(object):
             # get_recent_events_for_room operates by topo ordering. This therefore
             # does not reliably give you the state at the given stream position.
             # (https://github.com/matrix-org/synapse/issues/3305)
-            last_events, _ = yield self.store.get_recent_events_for_room(
+            last_events, _ = await self.store.get_recent_events_for_room(
                 room_id, end_token=at_token.room_key, limit=1
             )
 
             if not last_events:
                 raise NotFoundError("Can't find event for token %s" % (at_token,))
 
-            visible_events = yield filter_events_for_client(
+            visible_events = await filter_events_for_client(
                 self.storage, user_id, last_events, filter_send_to_client=False
             )
 
             event = last_events[0]
             if visible_events:
-                room_state = yield self.state_store.get_state_for_events(
+                room_state = await self.state_store.get_state_for_events(
                     [event.event_id], state_filter=state_filter
                 )
                 room_state = room_state[event.event_id]
@@ -180,23 +193,23 @@ class MessageHandler(object):
             (
                 membership,
                 membership_event_id,
-            ) = yield self.auth.check_user_in_room_or_world_readable(
+            ) = await self.auth.check_user_in_room_or_world_readable(
                 room_id, user_id, allow_departed_users=True
             )
 
             if membership == Membership.JOIN:
-                state_ids = yield self.store.get_filtered_current_state_ids(
+                state_ids = await self.store.get_filtered_current_state_ids(
                     room_id, state_filter=state_filter
                 )
-                room_state = yield self.store.get_events(state_ids.values())
+                room_state = await self.store.get_events(state_ids.values())
             elif membership == Membership.LEAVE:
-                room_state = yield self.state_store.get_state_for_events(
+                room_state = await self.state_store.get_state_for_events(
                     [membership_event_id], state_filter=state_filter
                 )
                 room_state = room_state[membership_event_id]
 
         now = self.clock.time_msec()
-        events = yield self._event_serializer.serialize_events(
+        events = await self._event_serializer.serialize_events(
             room_state.values(),
             now,
             # We don't bother bundling aggregations in when asked for state
@@ -205,15 +218,14 @@ class MessageHandler(object):
         )
         return events
 
-    @defer.inlineCallbacks
-    def get_joined_members(self, requester, room_id):
+    async def get_joined_members(self, requester: Requester, room_id: str) -> dict:
         """Get all the joined members in the room and their profile information.
 
         If the user has left the room return the state events from when they left.
 
         Args:
-            requester(Requester): The user requesting state events.
-            room_id(str): The room ID to get all state events from.
+            requester: The user requesting state events.
+            room_id: The room ID to get all state events from.
         Returns:
             A dict of user_id to profile info
         """
@@ -221,7 +233,7 @@ class MessageHandler(object):
         if not requester.app_service:
             # We check AS auth after fetching the room membership, as it
             # requires us to pull out all joined members anyway.
-            membership, _ = yield self.auth.check_user_in_room_or_world_readable(
+            membership, _ = await self.auth.check_user_in_room_or_world_readable(
                 room_id, user_id, allow_departed_users=True
             )
             if membership != Membership.JOIN:
@@ -229,7 +241,7 @@ class MessageHandler(object):
                     "Getting joined members after leaving is not implemented"
                 )
 
-        users_with_profile = yield self.state.get_current_users_in_room(room_id)
+        users_with_profile = await self.state.get_current_users_in_room(room_id)
 
         # If this is an AS, double check that they are allowed to see the members.
         # This can either be because the AS user is in the room or because there
@@ -250,7 +262,7 @@ class MessageHandler(object):
             for user_id, profile in users_with_profile.items()
         }
 
-    def maybe_schedule_expiry(self, event):
+    def maybe_schedule_expiry(self, event: EventBase):
         """Schedule the expiry of an event if there's not already one scheduled,
         or if the one running is for an event that will expire after the provided
         timestamp.
@@ -259,7 +271,7 @@ class MessageHandler(object):
         the master process, and therefore needs to be run on there.
 
         Args:
-            event (EventBase): The event to schedule the expiry of.
+            event: The event to schedule the expiry of.
         """
 
         expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
@@ -270,8 +282,7 @@ class MessageHandler(object):
         # a task scheduled for a timestamp that's sooner than the provided one.
         self._schedule_expiry_for_event(event.event_id, expiry_ts)
 
-    @defer.inlineCallbacks
-    def _schedule_next_expiry(self):
+    async def _schedule_next_expiry(self):
         """Retrieve the ID and the expiry timestamp of the next event to be expired,
         and schedule an expiry task for it.
 
@@ -279,18 +290,18 @@ class MessageHandler(object):
         future call to save_expiry_ts can schedule a new expiry task.
         """
         # Try to get the expiry timestamp of the next event to expire.
-        res = yield self.store.get_next_event_to_expire()
+        res = await self.store.get_next_event_to_expire()
         if res:
             event_id, expiry_ts = res
             self._schedule_expiry_for_event(event_id, expiry_ts)
 
-    def _schedule_expiry_for_event(self, event_id, expiry_ts):
+    def _schedule_expiry_for_event(self, event_id: str, expiry_ts: int):
         """Schedule an expiry task for the provided event if there's not already one
         scheduled at a timestamp that's sooner than the provided one.
 
         Args:
-            event_id (str): The ID of the event to expire.
-            expiry_ts (int): The timestamp at which to expire the event.
+            event_id: The ID of the event to expire.
+            expiry_ts: The timestamp at which to expire the event.
         """
         if self._scheduled_expiry:
             # If the provided timestamp refers to a time before the scheduled time of the
@@ -320,8 +331,7 @@ class MessageHandler(object):
             event_id,
         )
 
-    @defer.inlineCallbacks
-    def _expire_event(self, event_id):
+    async def _expire_event(self, event_id: str):
         """Retrieve and expire an event that needs to be expired from the database.
 
         If the event doesn't exist in the database, log it and delete the expiry date
@@ -336,12 +346,12 @@ class MessageHandler(object):
         try:
             # Expire the event if we know about it. This function also deletes the expiry
             # date from the database in the same database transaction.
-            yield self.store.expire_event(event_id)
+            await self.store.expire_event(event_id)
         except Exception as e:
             logger.error("Could not expire event %s: %r", event_id, e)
 
         # Schedule the expiry of the next event to expire.
-        yield self._schedule_next_expiry()
+        await self._schedule_next_expiry()
 
 
 # The duration (in ms) after which rooms should be removed
@@ -423,16 +433,15 @@ class EventCreationHandler(object):
 
         self._dummy_events_threshold = hs.config.dummy_events_threshold
 
-    @defer.inlineCallbacks
-    def create_event(
+    async def create_event(
         self,
-        requester,
-        event_dict,
-        token_id=None,
-        txn_id=None,
+        requester: Requester,
+        event_dict: dict,
+        token_id: Optional[str] = None,
+        txn_id: Optional[str] = None,
         prev_event_ids: Optional[Collection[str]] = None,
-        require_consent=True,
-    ):
+        require_consent: bool = True,
+    ) -> Tuple[EventBase, EventContext]:
         """
         Given a dict from a client, create a new event.
 
@@ -443,31 +452,29 @@ class EventCreationHandler(object):
 
         Args:
             requester
-            event_dict (dict): An entire event
-            token_id (str)
-            txn_id (str)
-
+            event_dict: An entire event
+            token_id
+            txn_id
             prev_event_ids:
                 the forward extremities to use as the prev_events for the
                 new event.
 
                 If None, they will be requested from the database.
-
-            require_consent (bool): Whether to check if the requester has
-                consented to privacy policy.
+            require_consent: Whether to check if the requester has
+                consented to the privacy policy.
         Raises:
             ResourceLimitError if server is blocked to some resource being
             exceeded
         Returns:
-            Tuple of created event (FrozenEvent), Context
+            Tuple of created event, Context
         """
-        yield self.auth.check_auth_blocking(requester.user.to_string())
+        await self.auth.check_auth_blocking(requester.user.to_string())
 
         if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
             room_version = event_dict["content"]["room_version"]
         else:
             try:
-                room_version = yield self.store.get_room_version_id(
+                room_version = await self.store.get_room_version_id(
                     event_dict["room_id"]
                 )
             except NotFoundError:
@@ -488,15 +495,11 @@ class EventCreationHandler(object):
 
                 try:
                     if "displayname" not in content:
-                        displayname = yield defer.ensureDeferred(
-                            profile.get_displayname(target)
-                        )
+                        displayname = await profile.get_displayname(target)
                         if displayname is not None:
                             content["displayname"] = displayname
                     if "avatar_url" not in content:
-                        avatar_url = yield defer.ensureDeferred(
-                            profile.get_avatar_url(target)
-                        )
+                        avatar_url = await profile.get_avatar_url(target)
                         if avatar_url is not None:
                             content["avatar_url"] = avatar_url
                 except Exception as e:
@@ -504,9 +507,9 @@ class EventCreationHandler(object):
                         "Failed to get profile information for %r: %s", target, e
                     )
 
-        is_exempt = yield self._is_exempt_from_privacy_policy(builder, requester)
+        is_exempt = await self._is_exempt_from_privacy_policy(builder, requester)
         if require_consent and not is_exempt:
-            yield self.assert_accepted_privacy_policy(requester)
+            await self.assert_accepted_privacy_policy(requester)
 
         if token_id is not None:
             builder.internal_metadata.token_id = token_id
@@ -514,7 +517,7 @@ class EventCreationHandler(object):
         if txn_id is not None:
             builder.internal_metadata.txn_id = txn_id
 
-        event, context = yield self.create_new_client_event(
+        event, context = await self.create_new_client_event(
             builder=builder, requester=requester, prev_event_ids=prev_event_ids,
         )
 
@@ -530,10 +533,10 @@ class EventCreationHandler(object):
             # federation as well as those created locally. As of room v3, aliases events
             # can be created by users that are not in the room, therefore we have to
             # tolerate them in event_auth.check().
-            prev_state_ids = yield context.get_prev_state_ids()
+            prev_state_ids = await context.get_prev_state_ids()
             prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
             prev_event = (
-                yield self.store.get_event(prev_event_id, allow_none=True)
+                await self.store.get_event(prev_event_id, allow_none=True)
                 if prev_event_id
                 else None
             )
@@ -556,37 +559,36 @@ class EventCreationHandler(object):
 
         return (event, context)
 
-    def _is_exempt_from_privacy_policy(self, builder, requester):
+    async def _is_exempt_from_privacy_policy(
+        self, builder: EventBuilder, requester: Requester
+    ) -> bool:
         """"Determine if an event to be sent is exempt from having to consent
         to the privacy policy
 
         Args:
-            builder (synapse.events.builder.EventBuilder): event being created
-            requester (Requster): user requesting this event
+            builder: event being created
+            requester: user requesting this event
 
         Returns:
-            Deferred[bool]: true if the event can be sent without the user
-                consenting
+            true if the event can be sent without the user consenting
         """
         # the only thing the user can do is join the server notices room.
         if builder.type == EventTypes.Member:
             membership = builder.content.get("membership", None)
             if membership == Membership.JOIN:
-                return self._is_server_notices_room(builder.room_id)
+                return await self._is_server_notices_room(builder.room_id)
             elif membership == Membership.LEAVE:
                 # the user is always allowed to leave (but not kick people)
                 return builder.state_key == requester.user.to_string()
-        return succeed(False)
+        return False
 
-    @defer.inlineCallbacks
-    def _is_server_notices_room(self, room_id):
+    async def _is_server_notices_room(self, room_id: str) -> bool:
         if self.config.server_notices_mxid is None:
             return False
-        user_ids = yield self.store.get_users_in_room(room_id)
+        user_ids = await self.store.get_users_in_room(room_id)
         return self.config.server_notices_mxid in user_ids
 
-    @defer.inlineCallbacks
-    def assert_accepted_privacy_policy(self, requester):
+    async def assert_accepted_privacy_policy(self, requester: Requester) -> None:
         """Check if a user has accepted the privacy policy
 
         Called when the given user is about to do something that requires
@@ -595,12 +597,10 @@ class EventCreationHandler(object):
         raised.
 
         Args:
-            requester (synapse.types.Requester):
-                The user making the request
+            requester: The user making the request
 
         Returns:
-            Deferred[None]: returns normally if the user has consented or is
-                exempt
+            Returns normally if the user has consented or is exempt
 
         Raises:
             ConsentNotGivenError: if the user has not given consent yet
@@ -621,7 +621,7 @@ class EventCreationHandler(object):
         ):
             return
 
-        u = yield self.store.get_user_by_id(user_id)
+        u = await self.store.get_user_by_id(user_id)
         assert u is not None
         if u["user_type"] in (UserTypes.SUPPORT, UserTypes.BOT):
             # support and bot users are not required to consent
@@ -639,16 +639,20 @@ class EventCreationHandler(object):
         raise ConsentNotGivenError(msg=msg, consent_uri=consent_uri)
 
     async def send_nonmember_event(
-        self, requester, event, context, ratelimit=True
+        self,
+        requester: Requester,
+        event: EventBase,
+        context: EventContext,
+        ratelimit: bool = True,
     ) -> int:
         """
         Persists and notifies local clients and federation of an event.
 
         Args:
-            event (FrozenEvent) the event to send.
-            context (Context) the context of the event.
-            ratelimit (bool): Whether to rate limit this send.
-            is_guest (bool): Whether the sender is a guest.
+            requester
+            event the event to send.
+            context: the context of the event.
+            ratelimit: Whether to rate limit this send.
 
         Return:
             The stream_id of the persisted event.
@@ -676,19 +680,20 @@ class EventCreationHandler(object):
             requester=requester, event=event, context=context, ratelimit=ratelimit
         )
 
-    @defer.inlineCallbacks
-    def deduplicate_state_event(self, event, context):
+    async def deduplicate_state_event(
+        self, event: EventBase, context: EventContext
+    ) -> None:
         """
         Checks whether event is in the latest resolved state in context.
 
         If so, returns the version of the event in context.
         Otherwise, returns None.
         """
-        prev_state_ids = yield context.get_prev_state_ids()
+        prev_state_ids = await context.get_prev_state_ids()
         prev_event_id = prev_state_ids.get((event.type, event.state_key))
         if not prev_event_id:
             return
-        prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
+        prev_event = await self.store.get_event(prev_event_id, allow_none=True)
         if not prev_event:
             return
 
@@ -700,7 +705,11 @@ class EventCreationHandler(object):
         return
 
     async def create_and_send_nonmember_event(
-        self, requester, event_dict, ratelimit=True, txn_id=None
+        self,
+        requester: Requester,
+        event_dict: EventBase,
+        ratelimit: bool = True,
+        txn_id: Optional[str] = None,
     ) -> Tuple[EventBase, int]:
         """
         Creates an event, then sends it.
@@ -730,17 +739,17 @@ class EventCreationHandler(object):
         return event, stream_id
 
     @measure_func("create_new_client_event")
-    @defer.inlineCallbacks
-    def create_new_client_event(
-        self, builder, requester=None, prev_event_ids: Optional[Collection[str]] = None
-    ):
+    async def create_new_client_event(
+        self,
+        builder: EventBuilder,
+        requester: Optional[Requester] = None,
+        prev_event_ids: Optional[Collection[str]] = None,
+    ) -> Tuple[EventBase, EventContext]:
         """Create a new event for a local client
 
         Args:
-            builder (EventBuilder):
-
-            requester (synapse.types.Requester|None):
-
+            builder:
+            requester:
             prev_event_ids:
                 the forward extremities to use as the prev_events for the
                 new event.
@@ -748,7 +757,7 @@ class EventCreationHandler(object):
                 If None, they will be requested from the database.
 
         Returns:
-            Deferred[(synapse.events.EventBase, synapse.events.snapshot.EventContext)]
+            Tuple of created event, context
         """
 
         if prev_event_ids is not None:
@@ -757,10 +766,10 @@ class EventCreationHandler(object):
                 % (len(prev_event_ids),)
             )
         else:
-            prev_event_ids = yield self.store.get_prev_events_for_room(builder.room_id)
+            prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
 
-        event = yield builder.build(prev_event_ids=prev_event_ids)
-        context = yield self.state.compute_event_context(event)
+        event = await builder.build(prev_event_ids=prev_event_ids)
+        context = await self.state.compute_event_context(event)
         if requester:
             context.app_service = requester.app_service
 
@@ -774,7 +783,7 @@ class EventCreationHandler(object):
             relates_to = relation["event_id"]
             aggregation_key = relation["key"]
 
-            already_exists = yield self.store.has_user_annotated_event(
+            already_exists = await self.store.has_user_annotated_event(
                 relates_to, event.type, aggregation_key, event.sender
             )
             if already_exists:
@@ -786,7 +795,12 @@ class EventCreationHandler(object):
 
     @measure_func("handle_new_client_event")
     async def handle_new_client_event(
-        self, requester, event, context, ratelimit=True, extra_users=[]
+        self,
+        requester: Requester,
+        event: EventBase,
+        context: EventContext,
+        ratelimit: bool = True,
+        extra_users: List[UserID] = [],
     ) -> int:
         """Processes a new event. This includes checking auth, persisting it,
         notifying users, sending to remote servers, etc.
@@ -795,11 +809,11 @@ class EventCreationHandler(object):
         processing.
 
         Args:
-            requester (Requester)
-            event (FrozenEvent)
-            context (EventContext)
-            ratelimit (bool)
-            extra_users (list(UserID)): Any extra users to notify about event
+            requester
+            event
+            context
+            ratelimit
+            extra_users: Any extra users to notify about event
 
         Return:
             The stream_id of the persisted event.
@@ -843,9 +857,6 @@ class EventCreationHandler(object):
 
         await self.action_generator.handle_push_actions_for_event(event, context)
 
-        # reraise does not allow inlineCallbacks to preserve the stacktrace, so we
-        # hack around with a try/finally instead.
-        success = False
         try:
             # If we're a worker we need to hit out to the master.
             if not self._is_event_writer:
@@ -861,27 +872,24 @@ class EventCreationHandler(object):
                 )
                 stream_id = result["stream_id"]
                 event.internal_metadata.stream_ordering = stream_id
-                success = True
                 return stream_id
 
             stream_id = await self.persist_and_notify_client_event(
                 requester, event, context, ratelimit=ratelimit, extra_users=extra_users
             )
 
-            success = True
             return stream_id
-        finally:
-            if not success:
-                # Ensure that we actually remove the entries in the push actions
-                # staging area, if we calculated them.
-                run_in_background(
-                    self.store.remove_push_actions_from_staging, event.event_id
-                )
+        except Exception:
+            # Ensure that we actually remove the entries in the push actions
+            # staging area, if we calculated them.
+            run_in_background(
+                self.store.remove_push_actions_from_staging, event.event_id
+            )
+            raise
 
-    @defer.inlineCallbacks
-    def _validate_canonical_alias(
-        self, directory_handler, room_alias_str, expected_room_id
-    ):
+    async def _validate_canonical_alias(
+        self, directory_handler, room_alias_str: str, expected_room_id: str
+    ) -> None:
         """
         Ensure that the given room alias points to the expected room ID.
 
@@ -892,9 +900,7 @@ class EventCreationHandler(object):
         """
         room_alias = RoomAlias.from_string(room_alias_str)
         try:
-            mapping = yield defer.ensureDeferred(
-                directory_handler.get_association(room_alias)
-            )
+            mapping = await directory_handler.get_association(room_alias)
         except SynapseError as e:
             # Turn M_NOT_FOUND errors into M_BAD_ALIAS errors.
             if e.errcode == Codes.NOT_FOUND:
@@ -913,7 +919,12 @@ class EventCreationHandler(object):
             )
 
     async def persist_and_notify_client_event(
-        self, requester, event, context, ratelimit=True, extra_users=[]
+        self,
+        requester: Requester,
+        event: EventBase,
+        context: EventContext,
+        ratelimit: bool = True,
+        extra_users: List[UserID] = [],
     ) -> int:
         """Called when we have fully built the event, have already
         calculated the push actions for the event, and checked auth.
@@ -1106,7 +1117,7 @@ class EventCreationHandler(object):
 
         return event_stream_id
 
-    async def _bump_active_time(self, user):
+    async def _bump_active_time(self, user: UserID) -> None:
         try:
             presence = self.hs.get_presence_handler()
             await presence.bump_presence_active_time(user)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index d2f25ae12a..b3a3bb8c3f 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -30,8 +30,6 @@ from typing import Dict, Iterable, List, Set, Tuple
 from prometheus_client import Counter
 from typing_extensions import ContextManager
 
-from twisted.internet import defer
-
 import synapse.metrics
 from synapse.api.constants import EventTypes, Membership, PresenceState
 from synapse.api.errors import SynapseError
@@ -39,6 +37,8 @@ from synapse.logging.context import run_in_background
 from synapse.logging.utils import log_function
 from synapse.metrics import LaterGauge
 from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.state import StateHandler
+from synapse.storage.data_stores.main import DataStore
 from synapse.storage.presence import UserPresenceState
 from synapse.types import JsonDict, UserID, get_domain_from_id
 from synapse.util.async_helpers import Linearizer
@@ -895,16 +895,9 @@ class PresenceHandler(BasePresenceHandler):
 
             await self._on_user_joined_room(room_id, state_key)
 
-    async def _on_user_joined_room(self, room_id, user_id):
+    async def _on_user_joined_room(self, room_id: str, user_id: str) -> None:
         """Called when we detect a user joining the room via the current state
         delta stream.
-
-        Args:
-            room_id (str)
-            user_id (str)
-
-        Returns:
-            Deferred
         """
 
         if self.is_mine_id(user_id):
@@ -935,8 +928,8 @@ class PresenceHandler(BasePresenceHandler):
             # TODO: Check that this is actually a new server joining the
             # room.
 
-            user_ids = await self.state.get_current_users_in_room(room_id)
-            user_ids = list(filter(self.is_mine_id, user_ids))
+            users = await self.state.get_current_users_in_room(room_id)
+            user_ids = list(filter(self.is_mine_id, users))
 
             states_d = await self.current_state_for_users(user_ids)
 
@@ -1296,22 +1289,24 @@ def handle_update(prev_state, new_state, is_mine, wheel_timer, now):
     return new_state, persist_and_notify, federation_ping
 
 
-@defer.inlineCallbacks
-def get_interested_parties(store, states):
+async def get_interested_parties(
+    store: DataStore, states: List[UserPresenceState]
+) -> Tuple[Dict[str, List[UserPresenceState]], Dict[str, List[UserPresenceState]]]:
     """Given a list of states return which entities (rooms, users)
     are interested in the given states.
 
     Args:
-        states (list(UserPresenceState))
+        store
+        states
 
     Returns:
-        2-tuple: `(room_ids_to_states, users_to_states)`,
+        A 2-tuple of `(room_ids_to_states, users_to_states)`,
         with each item being a dict of `entity_name` -> `[UserPresenceState]`
     """
     room_ids_to_states = {}  # type: Dict[str, List[UserPresenceState]]
     users_to_states = {}  # type: Dict[str, List[UserPresenceState]]
     for state in states:
-        room_ids = yield store.get_rooms_for_user(state.user_id)
+        room_ids = await store.get_rooms_for_user(state.user_id)
         for room_id in room_ids:
             room_ids_to_states.setdefault(room_id, []).append(state)
 
@@ -1321,20 +1316,22 @@ def get_interested_parties(store, states):
     return room_ids_to_states, users_to_states
 
 
-@defer.inlineCallbacks
-def get_interested_remotes(store, states, state_handler):
+async def get_interested_remotes(
+    store: DataStore, states: List[UserPresenceState], state_handler: StateHandler
+) -> List[Tuple[List[str], List[UserPresenceState]]]:
     """Given a list of presence states figure out which remote servers
     should be sent which.
 
     All the presence states should be for local users only.
 
     Args:
-        store (DataStore)
-        states (list(UserPresenceState))
+        store
+        states
+        state_handler
 
     Returns:
-        Deferred list of ([destinations], [UserPresenceState]), where for
-        each row the list of UserPresenceState should be sent to each
+        A list of 2-tuples of destinations and states, where for
+        each tuple the list of UserPresenceState should be sent to each
         destination
     """
     hosts_and_states = []
@@ -1342,10 +1339,10 @@ def get_interested_remotes(store, states, state_handler):
     # First we look up the rooms each user is in (as well as any explicit
     # subscriptions), then for each distinct room we look up the remote
     # hosts in those rooms.
-    room_ids_to_states, users_to_states = yield get_interested_parties(store, states)
+    room_ids_to_states, users_to_states = await get_interested_parties(store, states)
 
     for room_id, states in room_ids_to_states.items():
-        hosts = yield state_handler.get_current_hosts_in_room(room_id)
+        hosts = await state_handler.get_current_hosts_in_room(room_id)
         hosts_and_states.append((hosts, states))
 
     for user_id, states in users_to_states.items():
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index fb37d371ad..0c5b99234d 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -119,7 +119,7 @@ class RoomCreationHandler(BaseHandler):
 
     async def upgrade_room(
         self, requester: Requester, old_room_id: str, new_version: RoomVersion
-    ):
+    ) -> str:
         """Replace a room with a new room with a different version
 
         Args:
@@ -128,7 +128,7 @@ class RoomCreationHandler(BaseHandler):
             new_version: the new room version to use
 
         Returns:
-            Deferred[unicode]: the new room id
+            the new room id
         """
         await self.ratelimit(requester)
 
@@ -239,7 +239,7 @@ class RoomCreationHandler(BaseHandler):
         old_room_id: str,
         new_room_id: str,
         old_room_state: StateMap[str],
-    ):
+    ) -> None:
         """Send updated power levels in both rooms after an upgrade
 
         Args:
@@ -247,9 +247,6 @@ class RoomCreationHandler(BaseHandler):
             old_room_id: the id of the room to be replaced
             new_room_id: the id of the replacement room
             old_room_state: the state map for the old room
-
-        Returns:
-            Deferred
         """
         old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, ""))
 
@@ -322,7 +319,7 @@ class RoomCreationHandler(BaseHandler):
         new_room_id: str,
         new_room_version: RoomVersion,
         tombstone_event_id: str,
-    ):
+    ) -> None:
         """Populate a new room based on an old room
 
         Args:
@@ -332,8 +329,6 @@ class RoomCreationHandler(BaseHandler):
                 created with _gemerate_room_id())
             new_room_version: the new room version to use
             tombstone_event_id: the ID of the tombstone event in the old room.
-        Returns:
-            Deferred
         """
         user_id = requester.user.to_string()
 
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index a1a8fa1d3b..78586a0a1e 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -469,26 +469,39 @@ class RoomMemberHandler(object):
                     user_id=target.to_string(), room_id=room_id
                 )  # type: Optional[RoomsForUser]
                 if not invite:
+                    logger.info(
+                        "%s sent a leave request to %s, but that is not an active room "
+                        "on this server, and there is no pending invite",
+                        target,
+                        room_id,
+                    )
+
                     raise SynapseError(404, "Not a known room")
 
                 logger.info(
                     "%s rejects invite to %s from %s", target, room_id, invite.sender
                 )
 
-                if self.hs.is_mine_id(invite.sender):
-                    # the inviter was on our server, but has now left. Carry on
-                    # with the normal rejection codepath.
-                    #
-                    # This is a bit of a hack, because the room might still be
-                    # active on other servers.
-                    pass
-                else:
+                if not self.hs.is_mine_id(invite.sender):
                     # send the rejection to the inviter's HS (with fallback to
                     # local event)
                     return await self.remote_reject_invite(
                         invite.event_id, txn_id, requester, content,
                     )
 
+                # the inviter was on our server, but has now left. Carry on
+                # with the normal rejection codepath, which will also send the
+                # rejection out to any other servers we believe are still in the room.
+
+                # thanks to overzealous cleaning up of event_forward_extremities in
+                # `delete_old_current_state_events`, it's possible to end up with no
+                # forward extremities here. If that happens, let's just hang the
+                # rejection off the invite event.
+                #
+                # see: https://github.com/matrix-org/synapse/issues/7139
+                if len(latest_event_ids) == 0:
+                    latest_event_ids = [invite.event_id]
+
         return await self._local_membership_update(
             requester=requester,
             target=target,
@@ -952,7 +965,11 @@ class RoomMemberMasterHandler(RoomMemberHandler):
         if len(remote_room_hosts) == 0:
             raise SynapseError(404, "No known servers")
 
-        if self.hs.config.limit_remote_rooms.enabled:
+        check_complexity = self.hs.config.limit_remote_rooms.enabled
+        if check_complexity and self.hs.config.limit_remote_rooms.admins_can_join:
+            check_complexity = not await self.hs.auth.is_server_admin(user)
+
+        if check_complexity:
             # Fetch the room complexity
             too_complex = await self._is_remote_room_too_complex(
                 room_id, remote_room_hosts
@@ -975,7 +992,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
 
         # Check the room we just joined wasn't too large, if we didn't fetch the
         # complexity of it before.
-        if self.hs.config.limit_remote_rooms.enabled:
+        if check_complexity:
             if too_complex is False:
                 # We checked, and we're under the limit.
                 return event_id, stream_id
diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py
index abecaa8313..2d506dc1f2 100644
--- a/synapse/handlers/saml_handler.py
+++ b/synapse/handlers/saml_handler.py
@@ -96,6 +96,9 @@ class SamlHandler:
             relay_state=client_redirect_url
         )
 
+        # Since SAML sessions timeout it is useful to log when they were created.
+        logger.info("Initiating a new SAML session: %s" % (reqid,))
+
         now = self._clock.time_msec()
         self._outstanding_requests_dict[reqid] = Saml2SessionData(
             creation_time=now, ui_auth_session_id=ui_auth_session_id,
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 4d40d3ac9c..9b312a1558 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -15,6 +15,7 @@
 
 import itertools
 import logging
+from typing import Iterable
 
 from unpaddedbase64 import decode_base64, encode_base64
 
@@ -37,7 +38,7 @@ class SearchHandler(BaseHandler):
         self.state_store = self.storage.state
         self.auth = hs.get_auth()
 
-    async def get_old_rooms_from_upgraded_room(self, room_id):
+    async def get_old_rooms_from_upgraded_room(self, room_id: str) -> Iterable[str]:
         """Retrieves room IDs of old rooms in the history of an upgraded room.
 
         We do so by checking the m.room.create event of the room for a
@@ -48,10 +49,10 @@ class SearchHandler(BaseHandler):
         The full list of all found rooms in then returned.
 
         Args:
-            room_id (str): id of the room to search through.
+            room_id: id of the room to search through.
 
         Returns:
-            Deferred[iterable[str]]: predecessor room ids
+            Predecessor room ids
         """
 
         historical_room_ids = []
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 365d7323e4..eaa4eeadf7 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -103,6 +103,7 @@ class JoinedSyncResult:
     account_data = attr.ib(type=List[JsonDict])
     unread_notifications = attr.ib(type=JsonDict)
     summary = attr.ib(type=Optional[JsonDict])
+    unread_count = attr.ib(type=int)
 
     def __nonzero__(self) -> bool:
         """Make the result appear empty if there are no updates. This is used
@@ -421,10 +422,6 @@ class SyncHandler(object):
         potential_recents: Optional[List[EventBase]] = None,
         newly_joined_room: bool = False,
     ) -> TimelineBatch:
-        """
-        Returns:
-            a Deferred TimelineBatch
-        """
         with Measure(self.clock, "load_filtered_recents"):
             timeline_limit = sync_config.filter_collection.timeline_limit()
             block_all_timeline = (
@@ -1890,6 +1887,10 @@ class SyncHandler(object):
 
         if room_builder.rtype == "joined":
             unread_notifications = {}  # type: Dict[str, str]
+
+            unread_count = await self.store.get_unread_message_count_for_user(
+                room_id, sync_config.user.to_string(),
+            )
             room_sync = JoinedSyncResult(
                 room_id=room_id,
                 timeline=batch,
@@ -1898,6 +1899,7 @@ class SyncHandler(object):
                 account_data=account_data_events,
                 unread_notifications=unread_notifications,
                 summary=summary,
+                unread_count=unread_count,
             )
 
             if room_sync or always_include:
diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py
index a140e9391e..a011e9fe29 100644
--- a/synapse/handlers/ui_auth/checkers.py
+++ b/synapse/handlers/ui_auth/checkers.py
@@ -14,10 +14,10 @@
 # limitations under the License.
 
 import logging
+from typing import Any
 
 from canonicaljson import json
 
-from twisted.internet import defer
 from twisted.web.client import PartialDownloadError
 
 from synapse.api.constants import LoginType
@@ -33,25 +33,25 @@ class UserInteractiveAuthChecker:
     def __init__(self, hs):
         pass
 
-    def is_enabled(self):
+    def is_enabled(self) -> bool:
         """Check if the configuration of the homeserver allows this checker to work
 
         Returns:
-            bool: True if this login type is enabled.
+            True if this login type is enabled.
         """
 
-    def check_auth(self, authdict, clientip):
+    async def check_auth(self, authdict: dict, clientip: str) -> Any:
         """Given the authentication dict from the client, attempt to check this step
 
         Args:
-            authdict (dict): authentication dictionary from the client
-            clientip (str): The IP address of the client.
+            authdict: authentication dictionary from the client
+            clientip: The IP address of the client.
 
         Raises:
             SynapseError if authentication failed
 
         Returns:
-            Deferred: the result of authentication (to pass back to the client?)
+            The result of authentication (to pass back to the client?)
         """
         raise NotImplementedError()
 
@@ -62,8 +62,8 @@ class DummyAuthChecker(UserInteractiveAuthChecker):
     def is_enabled(self):
         return True
 
-    def check_auth(self, authdict, clientip):
-        return defer.succeed(True)
+    async def check_auth(self, authdict, clientip):
+        return True
 
 
 class TermsAuthChecker(UserInteractiveAuthChecker):
@@ -72,8 +72,8 @@ class TermsAuthChecker(UserInteractiveAuthChecker):
     def is_enabled(self):
         return True
 
-    def check_auth(self, authdict, clientip):
-        return defer.succeed(True)
+    async def check_auth(self, authdict, clientip):
+        return True
 
 
 class RecaptchaAuthChecker(UserInteractiveAuthChecker):
@@ -89,8 +89,7 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
     def is_enabled(self):
         return self._enabled
 
-    @defer.inlineCallbacks
-    def check_auth(self, authdict, clientip):
+    async def check_auth(self, authdict, clientip):
         try:
             user_response = authdict["response"]
         except KeyError:
@@ -107,7 +106,7 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
         # TODO: get this from the homeserver rather than creating a new one for
         # each request
         try:
-            resp_body = yield self._http_client.post_urlencoded_get_json(
+            resp_body = await self._http_client.post_urlencoded_get_json(
                 self._url,
                 args={
                     "secret": self._secret,
@@ -219,8 +218,8 @@ class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChec
             ThreepidBehaviour.LOCAL,
         )
 
-    def check_auth(self, authdict, clientip):
-        return defer.ensureDeferred(self._check_threepid("email", authdict))
+    async def check_auth(self, authdict, clientip):
+        return await self._check_threepid("email", authdict)
 
 
 class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker):
@@ -233,8 +232,8 @@ class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker):
     def is_enabled(self):
         return bool(self.hs.config.account_threepid_delegate_msisdn)
 
-    def check_auth(self, authdict, clientip):
-        return defer.ensureDeferred(self._check_threepid("msisdn", authdict))
+    async def check_auth(self, authdict, clientip):
+        return await self._check_threepid("msisdn", authdict)
 
 
 INTERACTIVE_AUTH_CHECKERS = [
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 6bc51202cd..155b7460d4 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -395,7 +395,9 @@ class SimpleHttpClient(object):
         if 200 <= response.code < 300:
             return json.loads(body.decode("utf-8"))
         else:
-            raise HttpResponseException(response.code, response.phrase, body)
+            raise HttpResponseException(
+                response.code, response.phrase.decode("ascii", errors="replace"), body
+            )
 
     @defer.inlineCallbacks
     def post_json_get_json(self, uri, post_json, headers=None):
@@ -436,7 +438,9 @@ class SimpleHttpClient(object):
         if 200 <= response.code < 300:
             return json.loads(body.decode("utf-8"))
         else:
-            raise HttpResponseException(response.code, response.phrase, body)
+            raise HttpResponseException(
+                response.code, response.phrase.decode("ascii", errors="replace"), body
+            )
 
     @defer.inlineCallbacks
     def get_json(self, uri, args={}, headers=None):
@@ -509,7 +513,9 @@ class SimpleHttpClient(object):
         if 200 <= response.code < 300:
             return json.loads(body.decode("utf-8"))
         else:
-            raise HttpResponseException(response.code, response.phrase, body)
+            raise HttpResponseException(
+                response.code, response.phrase.decode("ascii", errors="replace"), body
+            )
 
     @defer.inlineCallbacks
     def get_raw(self, uri, args={}, headers=None):
@@ -544,7 +550,9 @@ class SimpleHttpClient(object):
         if 200 <= response.code < 300:
             return body
         else:
-            raise HttpResponseException(response.code, response.phrase, body)
+            raise HttpResponseException(
+                response.code, response.phrase.decode("ascii", errors="replace"), body
+            )
 
     # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient.
     # The two should be factored out.
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index c5fc746f2f..0c02648015 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -15,6 +15,7 @@
 
 import logging
 import urllib
+from typing import List
 
 from netaddr import AddrFormatError, IPAddress
 from zope.interface import implementer
@@ -236,11 +237,10 @@ class MatrixHostnameEndpoint(object):
 
         return run_in_background(self._do_connect, protocol_factory)
 
-    @defer.inlineCallbacks
-    def _do_connect(self, protocol_factory):
+    async def _do_connect(self, protocol_factory):
         first_exception = None
 
-        server_list = yield self._resolve_server()
+        server_list = await self._resolve_server()
 
         for server in server_list:
             host = server.host
@@ -251,7 +251,7 @@ class MatrixHostnameEndpoint(object):
                 endpoint = HostnameEndpoint(self._reactor, host, port)
                 if self._tls_options:
                     endpoint = wrapClientTLS(self._tls_options, endpoint)
-                result = yield make_deferred_yieldable(
+                result = await make_deferred_yieldable(
                     endpoint.connect(protocol_factory)
                 )
 
@@ -271,13 +271,9 @@ class MatrixHostnameEndpoint(object):
         # to try and if that doesn't work then we'll have an exception.
         raise Exception("Failed to resolve server %r" % (self._parsed_uri.netloc,))
 
-    @defer.inlineCallbacks
-    def _resolve_server(self):
+    async def _resolve_server(self) -> List[Server]:
         """Resolves the server name to a list of hosts and ports to attempt to
         connect to.
-
-        Returns:
-            Deferred[list[Server]]
         """
 
         if self._parsed_uri.scheme != b"matrix":
@@ -298,7 +294,7 @@ class MatrixHostnameEndpoint(object):
         if port or _is_ip_literal(host):
             return [Server(host, port or 8448)]
 
-        server_list = yield self._srv_resolver.resolve_service(b"_matrix._tcp." + host)
+        server_list = await self._srv_resolver.resolve_service(b"_matrix._tcp." + host)
 
         if server_list:
             return server_list
diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py
index 021b233a7d..2ede90a9b1 100644
--- a/synapse/http/federation/srv_resolver.py
+++ b/synapse/http/federation/srv_resolver.py
@@ -17,10 +17,10 @@
 import logging
 import random
 import time
+from typing import List
 
 import attr
 
-from twisted.internet import defer
 from twisted.internet.error import ConnectError
 from twisted.names import client, dns
 from twisted.names.error import DNSNameError, DomainError
@@ -113,16 +113,14 @@ class SrvResolver(object):
         self._cache = cache
         self._get_time = get_time
 
-    @defer.inlineCallbacks
-    def resolve_service(self, service_name):
+    async def resolve_service(self, service_name: bytes) -> List[Server]:
         """Look up a SRV record
 
         Args:
             service_name (bytes): record to look up
 
         Returns:
-            Deferred[list[Server]]:
-                a list of the SRV records, or an empty list if none found
+            a list of the SRV records, or an empty list if none found
         """
         now = int(self._get_time())
 
@@ -136,7 +134,7 @@ class SrvResolver(object):
                 return _sort_server_list(servers)
 
         try:
-            answers, _, _ = yield make_deferred_yieldable(
+            answers, _, _ = await make_deferred_yieldable(
                 self._dns_client.lookupService(service_name)
             )
         except DNSNameError:
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 148eeb19dc..2a6373937a 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -121,8 +121,7 @@ class MatrixFederationRequest(object):
         return self.json
 
 
-@defer.inlineCallbacks
-def _handle_json_response(reactor, timeout_sec, request, response):
+async def _handle_json_response(reactor, timeout_sec, request, response):
     """
     Reads the JSON body of a response, with a timeout
 
@@ -141,7 +140,7 @@ def _handle_json_response(reactor, timeout_sec, request, response):
         d = treq.json_content(response)
         d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor)
 
-        body = yield make_deferred_yieldable(d)
+        body = await make_deferred_yieldable(d)
     except TimeoutError as e:
         logger.warning(
             "{%s} [%s] Timed out reading response", request.txn_id, request.destination,
@@ -224,8 +223,7 @@ class MatrixFederationHttpClient(object):
 
         self._cooperator = Cooperator(scheduler=schedule)
 
-    @defer.inlineCallbacks
-    def _send_request_with_optional_trailing_slash(
+    async def _send_request_with_optional_trailing_slash(
         self, request, try_trailing_slash_on_400=False, **send_request_args
     ):
         """Wrapper for _send_request which can optionally retry the request
@@ -246,10 +244,10 @@ class MatrixFederationHttpClient(object):
                 (except 429).
 
         Returns:
-            Deferred[Dict]: Parsed JSON response body.
+            Dict: Parsed JSON response body.
         """
         try:
-            response = yield self._send_request(request, **send_request_args)
+            response = await self._send_request(request, **send_request_args)
         except HttpResponseException as e:
             # Received an HTTP error > 300. Check if it meets the requirements
             # to retry with a trailing slash
@@ -265,12 +263,11 @@ class MatrixFederationHttpClient(object):
             logger.info("Retrying request with trailing slash")
             request.path += "/"
 
-            response = yield self._send_request(request, **send_request_args)
+            response = await self._send_request(request, **send_request_args)
 
         return response
 
-    @defer.inlineCallbacks
-    def _send_request(
+    async def _send_request(
         self,
         request,
         retry_on_dns_fail=True,
@@ -311,7 +308,7 @@ class MatrixFederationHttpClient(object):
             backoff_on_404 (bool): Back off if we get a 404
 
         Returns:
-            Deferred[twisted.web.client.Response]: resolves with the HTTP
+            twisted.web.client.Response: resolves with the HTTP
             response object on success.
 
         Raises:
@@ -335,7 +332,7 @@ class MatrixFederationHttpClient(object):
         ):
             raise FederationDeniedError(request.destination)
 
-        limiter = yield synapse.util.retryutils.get_retry_limiter(
+        limiter = await synapse.util.retryutils.get_retry_limiter(
             request.destination,
             self.clock,
             self._store,
@@ -433,7 +430,7 @@ class MatrixFederationHttpClient(object):
                                 reactor=self.reactor,
                             )
 
-                            response = yield request_deferred
+                            response = await request_deferred
                     except TimeoutError as e:
                         raise RequestSendFailed(e, can_retry=True) from e
                     except DNSLookupError as e:
@@ -447,6 +444,7 @@ class MatrixFederationHttpClient(object):
                     ).inc()
 
                     set_tag(tags.HTTP_STATUS_CODE, response.code)
+                    response_phrase = response.phrase.decode("ascii", errors="replace")
 
                     if 200 <= response.code < 300:
                         logger.debug(
@@ -454,7 +452,7 @@ class MatrixFederationHttpClient(object):
                             request.txn_id,
                             request.destination,
                             response.code,
-                            response.phrase.decode("ascii", errors="replace"),
+                            response_phrase,
                         )
                         pass
                     else:
@@ -463,7 +461,7 @@ class MatrixFederationHttpClient(object):
                             request.txn_id,
                             request.destination,
                             response.code,
-                            response.phrase.decode("ascii", errors="replace"),
+                            response_phrase,
                         )
                         # :'(
                         # Update transactions table?
@@ -473,7 +471,7 @@ class MatrixFederationHttpClient(object):
                         )
 
                         try:
-                            body = yield make_deferred_yieldable(d)
+                            body = await make_deferred_yieldable(d)
                         except Exception as e:
                             # Eh, we're already going to raise an exception so lets
                             # ignore if this fails.
@@ -487,7 +485,7 @@ class MatrixFederationHttpClient(object):
                             )
                             body = None
 
-                        e = HttpResponseException(response.code, response.phrase, body)
+                        e = HttpResponseException(response.code, response_phrase, body)
 
                         # Retry if the error is a 429 (Too Many Requests),
                         # otherwise just raise a standard HttpResponseException
@@ -527,7 +525,7 @@ class MatrixFederationHttpClient(object):
                             delay,
                         )
 
-                        yield self.clock.sleep(delay)
+                        await self.clock.sleep(delay)
                         retries_left -= 1
                     else:
                         raise
@@ -590,8 +588,7 @@ class MatrixFederationHttpClient(object):
             )
         return auth_headers
 
-    @defer.inlineCallbacks
-    def put_json(
+    async def put_json(
         self,
         destination,
         path,
@@ -635,7 +632,7 @@ class MatrixFederationHttpClient(object):
                 enabled.
 
         Returns:
-            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            dict|list: Succeeds when we get a 2xx HTTP response. The
             result will be the decoded JSON body.
 
         Raises:
@@ -657,7 +654,7 @@ class MatrixFederationHttpClient(object):
             json=data,
         )
 
-        response = yield self._send_request_with_optional_trailing_slash(
+        response = await self._send_request_with_optional_trailing_slash(
             request,
             try_trailing_slash_on_400,
             backoff_on_404=backoff_on_404,
@@ -666,14 +663,13 @@ class MatrixFederationHttpClient(object):
             timeout=timeout,
         )
 
-        body = yield _handle_json_response(
+        body = await _handle_json_response(
             self.reactor, self.default_timeout, request, response
         )
 
         return body
 
-    @defer.inlineCallbacks
-    def post_json(
+    async def post_json(
         self,
         destination,
         path,
@@ -706,7 +702,7 @@ class MatrixFederationHttpClient(object):
 
             args (dict): query params
         Returns:
-            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            dict|list: Succeeds when we get a 2xx HTTP response. The
             result will be the decoded JSON body.
 
         Raises:
@@ -724,7 +720,7 @@ class MatrixFederationHttpClient(object):
             method="POST", destination=destination, path=path, query=args, json=data
         )
 
-        response = yield self._send_request(
+        response = await self._send_request(
             request,
             long_retries=long_retries,
             timeout=timeout,
@@ -736,13 +732,12 @@ class MatrixFederationHttpClient(object):
         else:
             _sec_timeout = self.default_timeout
 
-        body = yield _handle_json_response(
+        body = await _handle_json_response(
             self.reactor, _sec_timeout, request, response
         )
         return body
 
-    @defer.inlineCallbacks
-    def get_json(
+    async def get_json(
         self,
         destination,
         path,
@@ -774,7 +769,7 @@ class MatrixFederationHttpClient(object):
                 response we should try appending a trailing slash to the end of
                 the request. Workaround for #3622 in Synapse <= v0.99.3.
         Returns:
-            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            dict|list: Succeeds when we get a 2xx HTTP response. The
             result will be the decoded JSON body.
 
         Raises:
@@ -791,7 +786,7 @@ class MatrixFederationHttpClient(object):
             method="GET", destination=destination, path=path, query=args
         )
 
-        response = yield self._send_request_with_optional_trailing_slash(
+        response = await self._send_request_with_optional_trailing_slash(
             request,
             try_trailing_slash_on_400,
             backoff_on_404=False,
@@ -800,14 +795,13 @@ class MatrixFederationHttpClient(object):
             timeout=timeout,
         )
 
-        body = yield _handle_json_response(
+        body = await _handle_json_response(
             self.reactor, self.default_timeout, request, response
         )
 
         return body
 
-    @defer.inlineCallbacks
-    def delete_json(
+    async def delete_json(
         self,
         destination,
         path,
@@ -835,7 +829,7 @@ class MatrixFederationHttpClient(object):
 
             args (dict): query params
         Returns:
-            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            dict|list: Succeeds when we get a 2xx HTTP response. The
             result will be the decoded JSON body.
 
         Raises:
@@ -852,20 +846,19 @@ class MatrixFederationHttpClient(object):
             method="DELETE", destination=destination, path=path, query=args
         )
 
-        response = yield self._send_request(
+        response = await self._send_request(
             request,
             long_retries=long_retries,
             timeout=timeout,
             ignore_backoff=ignore_backoff,
         )
 
-        body = yield _handle_json_response(
+        body = await _handle_json_response(
             self.reactor, self.default_timeout, request, response
         )
         return body
 
-    @defer.inlineCallbacks
-    def get_file(
+    async def get_file(
         self,
         destination,
         path,
@@ -885,7 +878,7 @@ class MatrixFederationHttpClient(object):
                 and try the request anyway.
 
         Returns:
-            Deferred[tuple[int, dict]]: Resolves with an (int,dict) tuple of
+            tuple[int, dict]: Resolves with an (int,dict) tuple of
             the file length and a dict of the response headers.
 
         Raises:
@@ -902,7 +895,7 @@ class MatrixFederationHttpClient(object):
             method="GET", destination=destination, path=path, query=args
         )
 
-        response = yield self._send_request(
+        response = await self._send_request(
             request, retry_on_dns_fail=retry_on_dns_fail, ignore_backoff=ignore_backoff
         )
 
@@ -911,7 +904,7 @@ class MatrixFederationHttpClient(object):
         try:
             d = _readBodyToFile(response, output_stream, max_size)
             d.addTimeout(self.default_timeout, self.reactor)
-            length = yield make_deferred_yieldable(d)
+            length = await make_deferred_yieldable(d)
         except Exception as e:
             logger.warning(
                 "{%s} [%s] Error reading response: %s",
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 8e003689c4..d4f9ad6e67 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -442,21 +442,6 @@ class StaticResource(File):
         return super().render_GET(request)
 
 
-def _options_handler(request):
-    """Request handler for OPTIONS requests
-
-    This is a request handler suitable for return from
-    _get_handler_for_request. It returns a 200 and an empty body.
-
-    Args:
-        request (twisted.web.http.Request):
-
-    Returns:
-        Tuple[int, dict]: http code, response body.
-    """
-    return 200, {}
-
-
 def _unrecognised_request_handler(request):
     """Request handler for unrecognised requests
 
@@ -490,11 +475,12 @@ class OptionsResource(resource.Resource):
     """Responds to OPTION requests for itself and all children."""
 
     def render_OPTIONS(self, request):
-        code, response_json_object = _options_handler(request)
+        request.setResponseCode(204)
+        request.setHeader(b"Content-Length", b"0")
 
-        return respond_with_json(
-            request, code, response_json_object, send_cors=True, canonical_json=False,
-        )
+        set_cors_headers(request)
+
+        return b""
 
     def getChildWithDefault(self, path, request):
         if request.method == b"OPTIONS":
diff --git a/synapse/http/site.py b/synapse/http/site.py
index cbc37eac6e..6f3b2258cc 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -215,9 +215,7 @@ class SynapseRequest(Request):
         # It's useful to log it here so that we can get an idea of when
         # the client disconnects.
         with PreserveLoggingContext(self.logcontext):
-            logger.warning(
-                "Error processing request %r: %s %s", self, reason.type, reason.value
-            )
+            logger.info("Connection from client lost before response was sent")
 
             if not self._is_processing:
                 self._finished_processing()
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index 2101517575..21dbd9f415 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -737,24 +737,14 @@ def trace(func=None, opname=None):
 
             @wraps(func)
             async def _trace_inner(*args, **kwargs):
-                if opentracing is None:
+                with start_active_span(_opname):
                     return await func(*args, **kwargs)
 
-                with start_active_span(_opname) as scope:
-                    try:
-                        return await func(*args, **kwargs)
-                    except Exception:
-                        scope.span.set_tag(tags.ERROR, True)
-                        raise
-
         else:
             # The other case here handles both sync functions and those
             # decorated with inlineDeferred.
             @wraps(func)
             def _trace_inner(*args, **kwargs):
-                if opentracing is None:
-                    return func(*args, **kwargs)
-
                 scope = start_active_span(_opname)
                 scope.__enter__()
 
@@ -767,7 +757,6 @@ def trace(func=None, opname=None):
                             return result
 
                         def err_back(result):
-                            scope.span.set_tag(tags.ERROR, True)
                             scope.__exit__(None, None, None)
                             return result
 
diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py
index dc3ab00cbb..026854b4c7 100644
--- a/synapse/logging/scopecontextmanager.py
+++ b/synapse/logging/scopecontextmanager.py
@@ -116,6 +116,8 @@ class _LogContextScope(Scope):
         if self._enter_logcontext:
             self.logcontext.__enter__()
 
+        return self
+
     def __exit__(self, type, value, traceback):
         if type == twisted.internet.defer._DefGen_Return:
             super(_LogContextScope, self).__exit__(None, None, None)
diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py
index 1ffd5e2df3..0d23142653 100644
--- a/synapse/push/action_generator.py
+++ b/synapse/push/action_generator.py
@@ -15,8 +15,6 @@
 
 import logging
 
-from twisted.internet import defer
-
 from synapse.util.metrics import Measure
 
 from .bulk_push_rule_evaluator import BulkPushRuleEvaluator
@@ -37,7 +35,6 @@ class ActionGenerator(object):
         # event stream, so we just run the rules for a client with no profile
         # tag (ie. we just need all the users).
 
-    @defer.inlineCallbacks
-    def handle_push_actions_for_event(self, event, context):
+    async def handle_push_actions_for_event(self, event, context):
         with Measure(self.clock, "action_for_event_by_user"):
-            yield self.bulk_evaluator.action_for_event_by_user(event, context)
+            await self.bulk_evaluator.action_for_event_by_user(event, context)
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 43ffe6faf0..04b9d8ac82 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -19,8 +19,6 @@ from collections import namedtuple
 
 from prometheus_client import Counter
 
-from twisted.internet import defer
-
 from synapse.api.constants import EventTypes, Membership
 from synapse.event_auth import get_user_power_level
 from synapse.state import POWER_KEY
@@ -70,8 +68,7 @@ class BulkPushRuleEvaluator(object):
             resizable=False,
         )
 
-    @defer.inlineCallbacks
-    def _get_rules_for_event(self, event, context):
+    async def _get_rules_for_event(self, event, context):
         """This gets the rules for all users in the room at the time of the event,
         as well as the push rules for the invitee if the event is an invite.
 
@@ -79,19 +76,19 @@ class BulkPushRuleEvaluator(object):
             dict of user_id -> push_rules
         """
         room_id = event.room_id
-        rules_for_room = yield self._get_rules_for_room(room_id)
+        rules_for_room = await self._get_rules_for_room(room_id)
 
-        rules_by_user = yield rules_for_room.get_rules(event, context)
+        rules_by_user = await rules_for_room.get_rules(event, context)
 
         # if this event is an invite event, we may need to run rules for the user
         # who's been invited, otherwise they won't get told they've been invited
         if event.type == "m.room.member" and event.content["membership"] == "invite":
             invited = event.state_key
             if invited and self.hs.is_mine_id(invited):
-                has_pusher = yield self.store.user_has_pusher(invited)
+                has_pusher = await self.store.user_has_pusher(invited)
                 if has_pusher:
                     rules_by_user = dict(rules_by_user)
-                    rules_by_user[invited] = yield self.store.get_push_rules_for_user(
+                    rules_by_user[invited] = await self.store.get_push_rules_for_user(
                         invited
                     )
 
@@ -114,20 +111,19 @@ class BulkPushRuleEvaluator(object):
             self.room_push_rule_cache_metrics,
         )
 
-    @defer.inlineCallbacks
-    def _get_power_levels_and_sender_level(self, event, context):
-        prev_state_ids = yield context.get_prev_state_ids()
+    async def _get_power_levels_and_sender_level(self, event, context):
+        prev_state_ids = await context.get_prev_state_ids()
         pl_event_id = prev_state_ids.get(POWER_KEY)
         if pl_event_id:
             # fastpath: if there's a power level event, that's all we need, and
             # not having a power level event is an extreme edge case
-            pl_event = yield self.store.get_event(pl_event_id)
+            pl_event = await self.store.get_event(pl_event_id)
             auth_events = {POWER_KEY: pl_event}
         else:
-            auth_events_ids = yield self.auth.compute_auth_events(
+            auth_events_ids = await self.auth.compute_auth_events(
                 event, prev_state_ids, for_verification=False
             )
-            auth_events = yield self.store.get_events(auth_events_ids)
+            auth_events = await self.store.get_events(auth_events_ids)
             auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
 
         sender_level = get_user_power_level(event.sender, auth_events)
@@ -136,23 +132,19 @@ class BulkPushRuleEvaluator(object):
 
         return pl_event.content if pl_event else {}, sender_level
 
-    @defer.inlineCallbacks
-    def action_for_event_by_user(self, event, context):
+    async def action_for_event_by_user(self, event, context) -> None:
         """Given an event and context, evaluate the push rules and insert the
         results into the event_push_actions_staging table.
-
-        Returns:
-            Deferred
         """
-        rules_by_user = yield self._get_rules_for_event(event, context)
+        rules_by_user = await self._get_rules_for_event(event, context)
         actions_by_user = {}
 
-        room_members = yield self.store.get_joined_users_from_context(event, context)
+        room_members = await self.store.get_joined_users_from_context(event, context)
 
         (
             power_levels,
             sender_power_level,
-        ) = yield self._get_power_levels_and_sender_level(event, context)
+        ) = await self._get_power_levels_and_sender_level(event, context)
 
         evaluator = PushRuleEvaluatorForEvent(
             event, len(room_members), sender_power_level, power_levels
@@ -165,7 +157,7 @@ class BulkPushRuleEvaluator(object):
                 continue
 
             if not event.is_state():
-                is_ignored = yield self.store.is_ignored_by(event.sender, uid)
+                is_ignored = await self.store.is_ignored_by(event.sender, uid)
                 if is_ignored:
                     continue
 
@@ -197,7 +189,7 @@ class BulkPushRuleEvaluator(object):
         # Mark in the DB staging area the push actions for users who should be
         # notified for this event. (This will then get handled when we persist
         # the event)
-        yield self.store.add_push_actions_to_staging(event.event_id, actions_by_user)
+        await self.store.add_push_actions_to_staging(event.event_id, actions_by_user)
 
 
 def _condition_checker(evaluator, conditions, uid, display_name, cache):
@@ -274,8 +266,7 @@ class RulesForRoom(object):
         # to self around in the callback.
         self.invalidate_all_cb = _Invalidation(rules_for_room_cache, room_id)
 
-    @defer.inlineCallbacks
-    def get_rules(self, event, context):
+    async def get_rules(self, event, context):
         """Given an event context return the rules for all users who are
         currently in the room.
         """
@@ -286,7 +277,7 @@ class RulesForRoom(object):
             self.room_push_rule_cache_metrics.inc_hits()
             return self.rules_by_user
 
-        with (yield self.linearizer.queue(())):
+        with (await self.linearizer.queue(())):
             if state_group and self.state_group == state_group:
                 logger.debug("Using cached rules for %r", self.room_id)
                 self.room_push_rule_cache_metrics.inc_hits()
@@ -304,7 +295,7 @@ class RulesForRoom(object):
 
                 push_rules_delta_state_cache_metric.inc_hits()
             else:
-                current_state_ids = yield context.get_current_state_ids()
+                current_state_ids = await context.get_current_state_ids()
                 push_rules_delta_state_cache_metric.inc_misses()
 
             push_rules_state_size_counter.inc(len(current_state_ids))
@@ -351,7 +342,7 @@ class RulesForRoom(object):
                 # If we have some memebr events we haven't seen, look them up
                 # and fetch push rules for them if appropriate.
                 logger.debug("Found new member events %r", missing_member_event_ids)
-                yield self._update_rules_with_member_event_ids(
+                await self._update_rules_with_member_event_ids(
                     ret_rules_by_user, missing_member_event_ids, state_group, event
                 )
             else:
@@ -369,8 +360,7 @@ class RulesForRoom(object):
             )
         return ret_rules_by_user
 
-    @defer.inlineCallbacks
-    def _update_rules_with_member_event_ids(
+    async def _update_rules_with_member_event_ids(
         self, ret_rules_by_user, member_event_ids, state_group, event
     ):
         """Update the partially filled rules_by_user dict by fetching rules for
@@ -386,7 +376,7 @@ class RulesForRoom(object):
         """
         sequence = self.sequence
 
-        rows = yield self.store.get_membership_from_event_ids(member_event_ids.values())
+        rows = await self.store.get_membership_from_event_ids(member_event_ids.values())
 
         members = {row["event_id"]: (row["user_id"], row["membership"]) for row in rows}
 
@@ -408,7 +398,7 @@ class RulesForRoom(object):
 
         logger.debug("Joined: %r", interested_in_user_ids)
 
-        if_users_with_pushers = yield self.store.get_if_users_have_pushers(
+        if_users_with_pushers = await self.store.get_if_users_have_pushers(
             interested_in_user_ids, on_invalidate=self.invalidate_all_cb
         )
 
@@ -418,7 +408,7 @@ class RulesForRoom(object):
 
         logger.debug("With pushers: %r", user_ids)
 
-        users_with_receipts = yield self.store.get_users_with_read_receipts_in_room(
+        users_with_receipts = await self.store.get_users_with_read_receipts_in_room(
             self.room_id, on_invalidate=self.invalidate_all_cb
         )
 
@@ -429,7 +419,7 @@ class RulesForRoom(object):
             if uid in interested_in_user_ids:
                 user_ids.add(uid)
 
-        rules_by_user = yield self.store.bulk_get_push_rules(
+        rules_by_user = await self.store.bulk_get_push_rules(
             user_ids, on_invalidate=self.invalidate_all_cb
         )
 
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 2fac07593b..4c469efb20 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -17,7 +17,6 @@ import logging
 
 from prometheus_client import Counter
 
-from twisted.internet import defer
 from twisted.internet.error import AlreadyCalled, AlreadyCancelled
 
 from synapse.api.constants import EventTypes
@@ -128,12 +127,11 @@ class HttpPusher(object):
         # but currently that's the only type of receipt anyway...
         run_as_background_process("http_pusher.on_new_receipts", self._update_badge)
 
-    @defer.inlineCallbacks
-    def _update_badge(self):
+    async def _update_badge(self):
         # XXX as per https://github.com/matrix-org/matrix-doc/issues/2627, this seems
         # to be largely redundant. perhaps we can remove it.
-        badge = yield push_tools.get_badge_count(self.hs.get_datastore(), self.user_id)
-        yield self._send_badge(badge)
+        badge = await push_tools.get_badge_count(self.hs.get_datastore(), self.user_id)
+        await self._send_badge(badge)
 
     def on_timer(self):
         self._start_processing()
@@ -152,8 +150,7 @@ class HttpPusher(object):
 
         run_as_background_process("httppush.process", self._process)
 
-    @defer.inlineCallbacks
-    def _process(self):
+    async def _process(self):
         # we should never get here if we are already processing
         assert not self._is_processing
 
@@ -164,7 +161,7 @@ class HttpPusher(object):
             while True:
                 starting_max_ordering = self.max_stream_ordering
                 try:
-                    yield self._unsafe_process()
+                    await self._unsafe_process()
                 except Exception:
                     logger.exception("Exception processing notifs")
                 if self.max_stream_ordering == starting_max_ordering:
@@ -172,8 +169,7 @@ class HttpPusher(object):
         finally:
             self._is_processing = False
 
-    @defer.inlineCallbacks
-    def _unsafe_process(self):
+    async def _unsafe_process(self):
         """
         Looks for unset notifications and dispatch them, in order
         Never call this directly: use _process which will only allow this to
@@ -181,7 +177,7 @@ class HttpPusher(object):
         """
 
         fn = self.store.get_unread_push_actions_for_user_in_range_for_http
-        unprocessed = yield fn(
+        unprocessed = await fn(
             self.user_id, self.last_stream_ordering, self.max_stream_ordering
         )
 
@@ -203,13 +199,13 @@ class HttpPusher(object):
                     "app_display_name": self.app_display_name,
                 },
             ):
-                processed = yield self._process_one(push_action)
+                processed = await self._process_one(push_action)
 
             if processed:
                 http_push_processed_counter.inc()
                 self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
                 self.last_stream_ordering = push_action["stream_ordering"]
-                pusher_still_exists = yield self.store.update_pusher_last_stream_ordering_and_success(
+                pusher_still_exists = await self.store.update_pusher_last_stream_ordering_and_success(
                     self.app_id,
                     self.pushkey,
                     self.user_id,
@@ -224,14 +220,14 @@ class HttpPusher(object):
 
                 if self.failing_since:
                     self.failing_since = None
-                    yield self.store.update_pusher_failing_since(
+                    await self.store.update_pusher_failing_since(
                         self.app_id, self.pushkey, self.user_id, self.failing_since
                     )
             else:
                 http_push_failed_counter.inc()
                 if not self.failing_since:
                     self.failing_since = self.clock.time_msec()
-                    yield self.store.update_pusher_failing_since(
+                    await self.store.update_pusher_failing_since(
                         self.app_id, self.pushkey, self.user_id, self.failing_since
                     )
 
@@ -250,7 +246,7 @@ class HttpPusher(object):
                     )
                     self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
                     self.last_stream_ordering = push_action["stream_ordering"]
-                    pusher_still_exists = yield self.store.update_pusher_last_stream_ordering(
+                    pusher_still_exists = await self.store.update_pusher_last_stream_ordering(
                         self.app_id,
                         self.pushkey,
                         self.user_id,
@@ -263,7 +259,7 @@ class HttpPusher(object):
                         return
 
                     self.failing_since = None
-                    yield self.store.update_pusher_failing_since(
+                    await self.store.update_pusher_failing_since(
                         self.app_id, self.pushkey, self.user_id, self.failing_since
                     )
                 else:
@@ -276,18 +272,17 @@ class HttpPusher(object):
                     )
                     break
 
-    @defer.inlineCallbacks
-    def _process_one(self, push_action):
+    async def _process_one(self, push_action):
         if "notify" not in push_action["actions"]:
             return True
 
         tweaks = push_rule_evaluator.tweaks_for_actions(push_action["actions"])
-        badge = yield push_tools.get_badge_count(self.hs.get_datastore(), self.user_id)
+        badge = await push_tools.get_badge_count(self.hs.get_datastore(), self.user_id)
 
-        event = yield self.store.get_event(push_action["event_id"], allow_none=True)
+        event = await self.store.get_event(push_action["event_id"], allow_none=True)
         if event is None:
             return True  # It's been redacted
-        rejected = yield self.dispatch_push(event, tweaks, badge)
+        rejected = await self.dispatch_push(event, tweaks, badge)
         if rejected is False:
             return False
 
@@ -301,11 +296,10 @@ class HttpPusher(object):
                     )
                 else:
                     logger.info("Pushkey %s was rejected: removing", pk)
-                    yield self.hs.remove_pusher(self.app_id, pk, self.user_id)
+                    await self.hs.remove_pusher(self.app_id, pk, self.user_id)
         return True
 
-    @defer.inlineCallbacks
-    def _build_notification_dict(self, event, tweaks, badge):
+    async def _build_notification_dict(self, event, tweaks, badge):
         priority = "low"
         if (
             event.type == EventTypes.Encrypted
@@ -335,7 +329,7 @@ class HttpPusher(object):
             }
             return d
 
-        ctx = yield push_tools.get_context_for_event(
+        ctx = await push_tools.get_context_for_event(
             self.storage, self.state_handler, event, self.user_id
         )
 
@@ -377,13 +371,12 @@ class HttpPusher(object):
 
         return d
 
-    @defer.inlineCallbacks
-    def dispatch_push(self, event, tweaks, badge):
-        notification_dict = yield self._build_notification_dict(event, tweaks, badge)
+    async def dispatch_push(self, event, tweaks, badge):
+        notification_dict = await self._build_notification_dict(event, tweaks, badge)
         if not notification_dict:
             return []
         try:
-            resp = yield self.http_client.post_json_get_json(
+            resp = await self.http_client.post_json_get_json(
                 self.url, notification_dict
             )
         except Exception as e:
@@ -400,8 +393,7 @@ class HttpPusher(object):
             rejected = resp["rejected"]
         return rejected
 
-    @defer.inlineCallbacks
-    def _send_badge(self, badge):
+    async def _send_badge(self, badge):
         """
         Args:
             badge (int): number of unread messages
@@ -424,7 +416,7 @@ class HttpPusher(object):
             }
         }
         try:
-            yield self.http_client.post_json_get_json(self.url, d)
+            await self.http_client.post_json_get_json(self.url, d)
             http_badges_processed_counter.inc()
         except Exception as e:
             logger.warning(
diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py
index 0644a13cfc..d8f4a453cd 100644
--- a/synapse/push/presentable_names.py
+++ b/synapse/push/presentable_names.py
@@ -16,8 +16,6 @@
 import logging
 import re
 
-from twisted.internet import defer
-
 from synapse.api.constants import EventTypes
 
 logger = logging.getLogger(__name__)
@@ -29,8 +27,7 @@ ALIAS_RE = re.compile(r"^#.*:.+$")
 ALL_ALONE = "Empty Room"
 
 
-@defer.inlineCallbacks
-def calculate_room_name(
+async def calculate_room_name(
     store,
     room_state_ids,
     user_id,
@@ -53,7 +50,7 @@ def calculate_room_name(
     """
     # does it have a name?
     if (EventTypes.Name, "") in room_state_ids:
-        m_room_name = yield store.get_event(
+        m_room_name = await store.get_event(
             room_state_ids[(EventTypes.Name, "")], allow_none=True
         )
         if m_room_name and m_room_name.content and m_room_name.content["name"]:
@@ -61,7 +58,7 @@ def calculate_room_name(
 
     # does it have a canonical alias?
     if (EventTypes.CanonicalAlias, "") in room_state_ids:
-        canon_alias = yield store.get_event(
+        canon_alias = await store.get_event(
             room_state_ids[(EventTypes.CanonicalAlias, "")], allow_none=True
         )
         if (
@@ -81,7 +78,7 @@ def calculate_room_name(
 
     my_member_event = None
     if (EventTypes.Member, user_id) in room_state_ids:
-        my_member_event = yield store.get_event(
+        my_member_event = await store.get_event(
             room_state_ids[(EventTypes.Member, user_id)], allow_none=True
         )
 
@@ -90,7 +87,7 @@ def calculate_room_name(
         and my_member_event.content["membership"] == "invite"
     ):
         if (EventTypes.Member, my_member_event.sender) in room_state_ids:
-            inviter_member_event = yield store.get_event(
+            inviter_member_event = await store.get_event(
                 room_state_ids[(EventTypes.Member, my_member_event.sender)],
                 allow_none=True,
             )
@@ -107,7 +104,7 @@ def calculate_room_name(
     # we're going to have to generate a name based on who's in the room,
     # so find out who is in the room that isn't the user.
     if EventTypes.Member in room_state_bytype_ids:
-        member_events = yield store.get_events(
+        member_events = await store.get_events(
             list(room_state_bytype_ids[EventTypes.Member].values())
         )
         all_members = [
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index 5dae4648c0..bc8f71916b 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -13,53 +13,40 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
-
 from synapse.push.presentable_names import calculate_room_name, name_from_member_event
 from synapse.storage import Storage
 
 
-@defer.inlineCallbacks
-def get_badge_count(store, user_id):
-    invites = yield store.get_invited_rooms_for_local_user(user_id)
-    joins = yield store.get_rooms_for_user(user_id)
-
-    my_receipts_by_room = yield store.get_receipts_for_user(user_id, "m.read")
+async def get_badge_count(store, user_id):
+    invites = await store.get_invited_rooms_for_local_user(user_id)
+    joins = await store.get_rooms_for_user(user_id)
 
     badge = len(invites)
 
     for room_id in joins:
-        if room_id in my_receipts_by_room:
-            last_unread_event_id = my_receipts_by_room[room_id]
-
-            notifs = yield (
-                store.get_unread_event_push_actions_by_room_for_user(
-                    room_id, user_id, last_unread_event_id
-                )
-            )
-            # return one badge count per conversation, as count per
-            # message is so noisy as to be almost useless
-            badge += 1 if notifs["notify_count"] else 0
+        unread_count = await store.get_unread_message_count_for_user(room_id, user_id)
+        # return one badge count per conversation, as count per
+        # message is so noisy as to be almost useless
+        badge += 1 if unread_count else 0
     return badge
 
 
-@defer.inlineCallbacks
-def get_context_for_event(storage: Storage, state_handler, ev, user_id):
+async def get_context_for_event(storage: Storage, state_handler, ev, user_id):
     ctx = {}
 
-    room_state_ids = yield storage.state.get_state_ids_for_event(ev.event_id)
+    room_state_ids = await storage.state.get_state_ids_for_event(ev.event_id)
 
     # we no longer bother setting room_alias, and make room_name the
     # human-readable name instead, be that m.room.name, an alias or
     # a list of people in the room
-    name = yield calculate_room_name(
+    name = await calculate_room_name(
         storage.main, room_state_ids, user_id, fallback_to_single_member=False
     )
     if name:
         ctx["name"] = name
 
     sender_state_event_id = room_state_ids[("m.room.member", ev.sender)]
-    sender_state_event = yield storage.main.get_event(sender_state_event_id)
+    sender_state_event = await storage.main.get_event(sender_state_event_id)
     ctx["sender_display_name"] = name_from_member_event(sender_state_event)
 
     return ctx
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 2456f12f46..3c3262a88c 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -19,8 +19,6 @@ from typing import TYPE_CHECKING, Dict, Union
 
 from prometheus_client import Gauge
 
-from twisted.internet import defer
-
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.push import PusherConfigException
 from synapse.push.emailpusher import EmailPusher
@@ -52,7 +50,7 @@ class PusherPool:
     Note that it is expected that each pusher will have its own 'processing' loop which
     will send out the notifications in the background, rather than blocking until the
     notifications are sent; accordingly Pusher.on_started, Pusher.on_new_notifications and
-    Pusher.on_new_receipts are not expected to return deferreds.
+    Pusher.on_new_receipts are not expected to return awaitables.
     """
 
     def __init__(self, hs: "HomeServer"):
@@ -77,8 +75,7 @@ class PusherPool:
             return
         run_as_background_process("start_pushers", self._start_pushers)
 
-    @defer.inlineCallbacks
-    def add_pusher(
+    async def add_pusher(
         self,
         user_id,
         access_token,
@@ -94,7 +91,7 @@ class PusherPool:
         """Creates a new pusher and adds it to the pool
 
         Returns:
-            Deferred[EmailPusher|HttpPusher]
+            EmailPusher|HttpPusher
         """
 
         time_now_msec = self.clock.time_msec()
@@ -124,9 +121,9 @@ class PusherPool:
         # create the pusher setting last_stream_ordering to the current maximum
         # stream ordering in event_push_actions, so it will process
         # pushes from this point onwards.
-        last_stream_ordering = yield self.store.get_latest_push_action_stream_ordering()
+        last_stream_ordering = await self.store.get_latest_push_action_stream_ordering()
 
-        yield self.store.add_pusher(
+        await self.store.add_pusher(
             user_id=user_id,
             access_token=access_token,
             kind=kind,
@@ -140,15 +137,14 @@ class PusherPool:
             last_stream_ordering=last_stream_ordering,
             profile_tag=profile_tag,
         )
-        pusher = yield self.start_pusher_by_id(app_id, pushkey, user_id)
+        pusher = await self.start_pusher_by_id(app_id, pushkey, user_id)
 
         return pusher
 
-    @defer.inlineCallbacks
-    def remove_pushers_by_app_id_and_pushkey_not_user(
+    async def remove_pushers_by_app_id_and_pushkey_not_user(
         self, app_id, pushkey, not_user_id
     ):
-        to_remove = yield self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
+        to_remove = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
         for p in to_remove:
             if p["user_name"] != not_user_id:
                 logger.info(
@@ -157,10 +153,9 @@ class PusherPool:
                     pushkey,
                     p["user_name"],
                 )
-                yield self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"])
+                await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"])
 
-    @defer.inlineCallbacks
-    def remove_pushers_by_access_token(self, user_id, access_tokens):
+    async def remove_pushers_by_access_token(self, user_id, access_tokens):
         """Remove the pushers for a given user corresponding to a set of
         access_tokens.
 
@@ -173,7 +168,7 @@ class PusherPool:
             return
 
         tokens = set(access_tokens)
-        for p in (yield self.store.get_pushers_by_user_id(user_id)):
+        for p in await self.store.get_pushers_by_user_id(user_id):
             if p["access_token"] in tokens:
                 logger.info(
                     "Removing pusher for app id %s, pushkey %s, user %s",
@@ -181,16 +176,15 @@ class PusherPool:
                     p["pushkey"],
                     p["user_name"],
                 )
-                yield self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"])
+                await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"])
 
-    @defer.inlineCallbacks
-    def on_new_notifications(self, min_stream_id, max_stream_id):
+    async def on_new_notifications(self, min_stream_id, max_stream_id):
         if not self.pushers:
             # nothing to do here.
             return
 
         try:
-            users_affected = yield self.store.get_push_action_users_in_range(
+            users_affected = await self.store.get_push_action_users_in_range(
                 min_stream_id, max_stream_id
             )
 
@@ -202,8 +196,7 @@ class PusherPool:
         except Exception:
             logger.exception("Exception in pusher on_new_notifications")
 
-    @defer.inlineCallbacks
-    def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
+    async def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
         if not self.pushers:
             # nothing to do here.
             return
@@ -211,7 +204,7 @@ class PusherPool:
         try:
             # Need to subtract 1 from the minimum because the lower bound here
             # is not inclusive
-            users_affected = yield self.store.get_users_sent_receipts_between(
+            users_affected = await self.store.get_users_sent_receipts_between(
                 min_stream_id - 1, max_stream_id
             )
 
@@ -223,12 +216,11 @@ class PusherPool:
         except Exception:
             logger.exception("Exception in pusher on_new_receipts")
 
-    @defer.inlineCallbacks
-    def start_pusher_by_id(self, app_id, pushkey, user_id):
+    async def start_pusher_by_id(self, app_id, pushkey, user_id):
         """Look up the details for the given pusher, and start it
 
         Returns:
-            Deferred[EmailPusher|HttpPusher|None]: The pusher started, if any
+            EmailPusher|HttpPusher|None: The pusher started, if any
         """
         if not self._should_start_pushers:
             return
@@ -236,7 +228,7 @@ class PusherPool:
         if not self._pusher_shard_config.should_handle(self._instance_name, user_id):
             return
 
-        resultlist = yield self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
+        resultlist = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
 
         pusher_dict = None
         for r in resultlist:
@@ -245,34 +237,29 @@ class PusherPool:
 
         pusher = None
         if pusher_dict:
-            pusher = yield self._start_pusher(pusher_dict)
+            pusher = await self._start_pusher(pusher_dict)
 
         return pusher
 
-    @defer.inlineCallbacks
-    def _start_pushers(self):
+    async def _start_pushers(self) -> None:
         """Start all the pushers
-
-        Returns:
-            Deferred
         """
-        pushers = yield self.store.get_all_pushers()
+        pushers = await self.store.get_all_pushers()
 
         # Stagger starting up the pushers so we don't completely drown the
         # process on start up.
-        yield concurrently_execute(self._start_pusher, pushers, 10)
+        await concurrently_execute(self._start_pusher, pushers, 10)
 
         logger.info("Started pushers")
 
-    @defer.inlineCallbacks
-    def _start_pusher(self, pusherdict):
+    async def _start_pusher(self, pusherdict):
         """Start the given pusher
 
         Args:
             pusherdict (dict): dict with the values pulled from the db table
 
         Returns:
-            Deferred[EmailPusher|HttpPusher]
+            EmailPusher|HttpPusher
         """
         if not self._pusher_shard_config.should_handle(
             self._instance_name, pusherdict["user_name"]
@@ -315,7 +302,7 @@ class PusherPool:
         user_id = pusherdict["user_name"]
         last_stream_ordering = pusherdict["last_stream_ordering"]
         if last_stream_ordering:
-            have_notifs = yield self.store.get_if_maybe_push_in_range_for_user(
+            have_notifs = await self.store.get_if_maybe_push_in_range_for_user(
                 user_id, last_stream_ordering
             )
         else:
@@ -327,8 +314,7 @@ class PusherPool:
 
         return p
 
-    @defer.inlineCallbacks
-    def remove_pusher(self, app_id, pushkey, user_id):
+    async def remove_pusher(self, app_id, pushkey, user_id):
         appid_pushkey = "%s:%s" % (app_id, pushkey)
 
         byuser = self.pushers.get(user_id, {})
@@ -340,6 +326,6 @@ class PusherPool:
 
             synapse_pushers.labels(type(pusher).__name__, pusher.app_id).dec()
 
-        yield self.store.delete_pusher_by_app_id_pushkey_user_id(
+        await self.store.delete_pusher_by_app_id_pushkey_user_id(
             app_id, pushkey, user_id
         )
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 8cfcdb0573..abea2be4ef 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -43,7 +43,7 @@ REQUIREMENTS = [
     "jsonschema>=2.5.1",
     "frozendict>=1",
     "unpaddedbase64>=1.1.0",
-    "canonicaljson>=1.1.3",
+    "canonicaljson>=1.2.0",
     # we use the type definitions added in signedjson 1.1.
     "signedjson>=1.1.0",
     "pynacl>=1.2.1",
diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py
index 5ef1c6c1dc..a84a064c8d 100644
--- a/synapse/replication/http/__init__.py
+++ b/synapse/replication/http/__init__.py
@@ -39,10 +39,10 @@ class ReplicationRestResource(JsonResource):
         federation.register_servlets(hs, self)
         presence.register_servlets(hs, self)
         membership.register_servlets(hs, self)
+        streams.register_servlets(hs, self)
 
         # The following can't currently be instantiated on workers.
         if hs.config.worker.worker_app is None:
             login.register_servlets(hs, self)
             register.register_servlets(hs, self)
             devices.register_servlets(hs, self)
-            streams.register_servlets(hs, self)
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index c287c4e269..ca065e819e 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -78,7 +78,9 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
         """
         event_payloads = []
         for event, context in event_and_contexts:
-            serialized_context = yield context.serialize(event, store)
+            serialized_context = yield defer.ensureDeferred(
+                context.serialize(event, store)
+            )
 
             event_payloads.append(
                 {
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index c981723c1a..b30e4d5039 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -77,7 +77,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
             extra_users (list(UserID)): Any extra users to notify about event
         """
 
-        serialized_context = yield context.serialize(event, store)
+        serialized_context = yield defer.ensureDeferred(context.serialize(event, store))
 
         payload = {
             "event": event.get_pdu_json(),
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 4985e40b1f..fcf8ebf1e7 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -24,6 +24,7 @@ from twisted.internet.protocol import ReconnectingClientFactory
 from synapse.api.constants import EventTypes
 from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
 from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol
+from synapse.replication.tcp.streams import TypingStream
 from synapse.replication.tcp.streams.events import (
     EventsStream,
     EventsStreamEventRow,
@@ -104,6 +105,7 @@ class ReplicationDataHandler:
         self._clock = hs.get_clock()
         self._streams = hs.get_replication_streams()
         self._instance_name = hs.get_instance_name()
+        self._typing_handler = hs.get_typing_handler()
 
         # Map from stream to list of deferreds waiting for the stream to
         # arrive at a particular position. The lists are sorted by stream position.
@@ -127,6 +129,12 @@ class ReplicationDataHandler:
         """
         self.store.process_replication_rows(stream_name, instance_name, token, rows)
 
+        if stream_name == TypingStream.NAME:
+            self._typing_handler.process_replication_rows(token, rows)
+            self.notifier.on_new_event(
+                "typing_key", token, rooms=[row.room_id for row in rows]
+            )
+
         if stream_name == EventsStream.NAME:
             # We shouldn't get multiple rows per token for events stream, so
             # we don't need to optimise this for multiple rows.
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index 1de590bba2..1c303f3a46 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -16,6 +16,7 @@
 import logging
 from typing import (
     Any,
+    Awaitable,
     Dict,
     Iterable,
     Iterator,
@@ -33,6 +34,7 @@ from typing_extensions import Deque
 from twisted.internet.protocol import ReconnectingClientFactory
 
 from synapse.metrics import LaterGauge
+from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.replication.tcp.client import DirectTcpReplicationClientFactory
 from synapse.replication.tcp.commands import (
     ClearUserSyncsCommand,
@@ -152,7 +154,7 @@ class ReplicationCommandHandler:
         # When POSITION or RDATA commands arrive, we stick them in a queue and process
         # them in order in a separate background process.
 
-        # the streams which are currently being processed by _unsafe_process_stream
+        # the streams which are currently being processed by _unsafe_process_queue
         self._processing_streams = set()  # type: Set[str]
 
         # for each stream, a queue of commands that are awaiting processing, and the
@@ -185,7 +187,7 @@ class ReplicationCommandHandler:
         if self._is_master:
             self._server_notices_sender = hs.get_server_notices_sender()
 
-    async def _add_command_to_stream_queue(
+    def _add_command_to_stream_queue(
         self, conn: AbstractConnection, cmd: Union[RdataCommand, PositionCommand]
     ) -> None:
         """Queue the given received command for processing
@@ -199,33 +201,34 @@ class ReplicationCommandHandler:
             logger.error("Got %s for unknown stream: %s", cmd.NAME, stream_name)
             return
 
-        # if we're already processing this stream, stick the new command in the
-        # queue, and we're done.
+        queue.append((cmd, conn))
+
+        # if we're already processing this stream, there's nothing more to do:
+        # the new entry on the queue will get picked up in due course
         if stream_name in self._processing_streams:
-            queue.append((cmd, conn))
             return
 
-        # otherwise, process the new command.
+        # fire off a background process to start processing the queue.
+        run_as_background_process(
+            "process-replication-data", self._unsafe_process_queue, stream_name
+        )
 
-        # arguably we should start off a new background process here, but nothing
-        # will be too upset if we don't return for ages, so let's save the overhead
-        # and use the existing logcontext.
+    async def _unsafe_process_queue(self, stream_name: str):
+        """Processes the command queue for the given stream, until it is empty
+
+        Does not check if there is already a thread processing the queue, hence "unsafe"
+        """
+        assert stream_name not in self._processing_streams
 
         self._processing_streams.add(stream_name)
         try:
-            # might as well skip the queue for this one, since it must be empty
-            assert not queue
-            await self._process_command(cmd, conn, stream_name)
-
-            # now process any other commands that have built up while we were
-            # dealing with that one.
+            queue = self._command_queues_by_stream.get(stream_name)
             while queue:
                 cmd, conn = queue.popleft()
                 try:
                     await self._process_command(cmd, conn, stream_name)
                 except Exception:
                     logger.exception("Failed to handle command %s", cmd)
-
         finally:
             self._processing_streams.discard(stream_name)
 
@@ -299,7 +302,7 @@ class ReplicationCommandHandler:
         """
         return self._streams_to_replicate
 
-    async def on_REPLICATE(self, conn: AbstractConnection, cmd: ReplicateCommand):
+    def on_REPLICATE(self, conn: AbstractConnection, cmd: ReplicateCommand):
         self.send_positions_to_connection(conn)
 
     def send_positions_to_connection(self, conn: AbstractConnection):
@@ -318,57 +321,73 @@ class ReplicationCommandHandler:
                 )
             )
 
-    async def on_USER_SYNC(self, conn: AbstractConnection, cmd: UserSyncCommand):
+    def on_USER_SYNC(
+        self, conn: AbstractConnection, cmd: UserSyncCommand
+    ) -> Optional[Awaitable[None]]:
         user_sync_counter.inc()
 
         if self._is_master:
-            await self._presence_handler.update_external_syncs_row(
+            return self._presence_handler.update_external_syncs_row(
                 cmd.instance_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms
             )
+        else:
+            return None
 
-    async def on_CLEAR_USER_SYNC(
+    def on_CLEAR_USER_SYNC(
         self, conn: AbstractConnection, cmd: ClearUserSyncsCommand
-    ):
+    ) -> Optional[Awaitable[None]]:
         if self._is_master:
-            await self._presence_handler.update_external_syncs_clear(cmd.instance_id)
+            return self._presence_handler.update_external_syncs_clear(cmd.instance_id)
+        else:
+            return None
 
-    async def on_FEDERATION_ACK(
-        self, conn: AbstractConnection, cmd: FederationAckCommand
-    ):
+    def on_FEDERATION_ACK(self, conn: AbstractConnection, cmd: FederationAckCommand):
         federation_ack_counter.inc()
 
         if self._federation_sender:
             self._federation_sender.federation_ack(cmd.instance_name, cmd.token)
 
-    async def on_REMOVE_PUSHER(
+    def on_REMOVE_PUSHER(
         self, conn: AbstractConnection, cmd: RemovePusherCommand
-    ):
+    ) -> Optional[Awaitable[None]]:
         remove_pusher_counter.inc()
 
         if self._is_master:
-            await self._store.delete_pusher_by_app_id_pushkey_user_id(
-                app_id=cmd.app_id, pushkey=cmd.push_key, user_id=cmd.user_id
-            )
+            return self._handle_remove_pusher(cmd)
+        else:
+            return None
+
+    async def _handle_remove_pusher(self, cmd: RemovePusherCommand):
+        await self._store.delete_pusher_by_app_id_pushkey_user_id(
+            app_id=cmd.app_id, pushkey=cmd.push_key, user_id=cmd.user_id
+        )
 
-            self._notifier.on_new_replication_data()
+        self._notifier.on_new_replication_data()
 
-    async def on_USER_IP(self, conn: AbstractConnection, cmd: UserIpCommand):
+    def on_USER_IP(
+        self, conn: AbstractConnection, cmd: UserIpCommand
+    ) -> Optional[Awaitable[None]]:
         user_ip_cache_counter.inc()
 
         if self._is_master:
-            await self._store.insert_client_ip(
-                cmd.user_id,
-                cmd.access_token,
-                cmd.ip,
-                cmd.user_agent,
-                cmd.device_id,
-                cmd.last_seen,
-            )
+            return self._handle_user_ip(cmd)
+        else:
+            return None
+
+    async def _handle_user_ip(self, cmd: UserIpCommand):
+        await self._store.insert_client_ip(
+            cmd.user_id,
+            cmd.access_token,
+            cmd.ip,
+            cmd.user_agent,
+            cmd.device_id,
+            cmd.last_seen,
+        )
 
-        if self._server_notices_sender:
-            await self._server_notices_sender.on_user_ip(cmd.user_id)
+        assert self._server_notices_sender is not None
+        await self._server_notices_sender.on_user_ip(cmd.user_id)
 
-    async def on_RDATA(self, conn: AbstractConnection, cmd: RdataCommand):
+    def on_RDATA(self, conn: AbstractConnection, cmd: RdataCommand):
         if cmd.instance_name == self._instance_name:
             # Ignore RDATA that are just our own echoes
             return
@@ -382,7 +401,7 @@ class ReplicationCommandHandler:
         #   2. so we don't race with getting a POSITION command and fetching
         #      missing RDATA.
 
-        await self._add_command_to_stream_queue(conn, cmd)
+        self._add_command_to_stream_queue(conn, cmd)
 
     async def _process_rdata(
         self, stream_name: str, conn: AbstractConnection, cmd: RdataCommand
@@ -459,14 +478,14 @@ class ReplicationCommandHandler:
             stream_name, instance_name, token, rows
         )
 
-    async def on_POSITION(self, conn: AbstractConnection, cmd: PositionCommand):
+    def on_POSITION(self, conn: AbstractConnection, cmd: PositionCommand):
         if cmd.instance_name == self._instance_name:
             # Ignore POSITION that are just our own echoes
             return
 
         logger.info("Handling '%s %s'", cmd.NAME, cmd.to_line())
 
-        await self._add_command_to_stream_queue(conn, cmd)
+        self._add_command_to_stream_queue(conn, cmd)
 
     async def _process_position(
         self, stream_name: str, conn: AbstractConnection, cmd: PositionCommand
@@ -526,9 +545,7 @@ class ReplicationCommandHandler:
 
         self._streams_by_connection.setdefault(conn, set()).add(stream_name)
 
-    async def on_REMOTE_SERVER_UP(
-        self, conn: AbstractConnection, cmd: RemoteServerUpCommand
-    ):
+    def on_REMOTE_SERVER_UP(self, conn: AbstractConnection, cmd: RemoteServerUpCommand):
         """"Called when get a new REMOTE_SERVER_UP command."""
         self._replication_data_handler.on_remote_server_up(cmd.data)
 
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 23191e3218..0350923898 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -50,6 +50,7 @@ import abc
 import fcntl
 import logging
 import struct
+from inspect import isawaitable
 from typing import TYPE_CHECKING, List
 
 from prometheus_client import Counter
@@ -128,6 +129,8 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
 
     On receiving a new command it calls `on_<COMMAND_NAME>` with the parsed
     command before delegating to `ReplicationCommandHandler.on_<COMMAND_NAME>`.
+    `ReplicationCommandHandler.on_<COMMAND_NAME>` can optionally return a coroutine;
+    if so, that will get run as a background process.
 
     It also sends `PING` periodically, and correctly times out remote connections
     (if they send a `PING` command)
@@ -166,9 +169,9 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
 
         # a logcontext which we use for processing incoming commands. We declare it as a
         # background process so that the CPU stats get reported to prometheus.
-        self._logging_context = BackgroundProcessLoggingContext(
-            "replication_command_handler-%s" % self.conn_id
-        )
+        ctx_name = "replication-conn-%s" % self.conn_id
+        self._logging_context = BackgroundProcessLoggingContext(ctx_name)
+        self._logging_context.request = ctx_name
 
     def connectionMade(self):
         logger.info("[%s] Connection established", self.id())
@@ -246,18 +249,17 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
 
         tcp_inbound_commands_counter.labels(cmd.NAME, self.name).inc()
 
-        # Now lets try and call on_<CMD_NAME> function
-        run_as_background_process(
-            "replication-" + cmd.get_logcontext_id(), self.handle_command, cmd
-        )
+        self.handle_command(cmd)
 
-    async def handle_command(self, cmd: Command):
+    def handle_command(self, cmd: Command) -> None:
         """Handle a command we have received over the replication stream.
 
         First calls `self.on_<COMMAND>` if it exists, then calls
-        `self.command_handler.on_<COMMAND>` if it exists. This allows for
-        protocol level handling of commands (e.g. PINGs), before delegating to
-        the handler.
+        `self.command_handler.on_<COMMAND>` if it exists (which can optionally
+        return an Awaitable).
+
+        This allows for protocol level handling of commands (e.g. PINGs), before
+        delegating to the handler.
 
         Args:
             cmd: received command
@@ -268,13 +270,22 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
         # specific handling.
         cmd_func = getattr(self, "on_%s" % (cmd.NAME,), None)
         if cmd_func:
-            await cmd_func(cmd)
+            cmd_func(cmd)
             handled = True
 
         # Then call out to the handler.
         cmd_func = getattr(self.command_handler, "on_%s" % (cmd.NAME,), None)
         if cmd_func:
-            await cmd_func(self, cmd)
+            res = cmd_func(self, cmd)
+
+            # the handler might be a coroutine: fire it off as a background process
+            # if so.
+
+            if isawaitable(res):
+                run_as_background_process(
+                    "replication-" + cmd.get_logcontext_id(), lambda: res
+                )
+
             handled = True
 
         if not handled:
@@ -350,10 +361,10 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
         for cmd in pending:
             self.send_command(cmd)
 
-    async def on_PING(self, line):
+    def on_PING(self, line):
         self.received_ping = True
 
-    async def on_ERROR(self, cmd):
+    def on_ERROR(self, cmd):
         logger.error("[%s] Remote reported error: %r", self.id(), cmd.data)
 
     def pauseProducing(self):
@@ -448,7 +459,7 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
         self.send_command(ServerCommand(self.server_name))
         super().connectionMade()
 
-    async def on_NAME(self, cmd):
+    def on_NAME(self, cmd):
         logger.info("[%s] Renamed to %r", self.id(), cmd.data)
         self.name = cmd.data
 
@@ -477,7 +488,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
         # Once we've connected subscribe to the necessary streams
         self.replicate()
 
-    async def on_SERVER(self, cmd):
+    def on_SERVER(self, cmd):
         if cmd.data != self.server_name:
             logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data)
             self.send_error("Wrong remote")
diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py
index b5c533a607..f225e533de 100644
--- a/synapse/replication/tcp/redis.py
+++ b/synapse/replication/tcp/redis.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from inspect import isawaitable
 from typing import TYPE_CHECKING
 
 import txredisapi
@@ -124,36 +125,32 @@ class RedisSubscriber(txredisapi.SubscriberProtocol, AbstractConnection):
         # remote instances.
         tcp_inbound_commands_counter.labels(cmd.NAME, "redis").inc()
 
-        # Now lets try and call on_<CMD_NAME> function
-        run_as_background_process(
-            "replication-" + cmd.get_logcontext_id(), self.handle_command, cmd
-        )
+        self.handle_command(cmd)
 
-    async def handle_command(self, cmd: Command):
+    def handle_command(self, cmd: Command) -> None:
         """Handle a command we have received over the replication stream.
 
-        By default delegates to on_<COMMAND>, which should return an awaitable.
+        Delegates to `self.handler.on_<COMMAND>` (which can optionally return an
+        Awaitable).
 
         Args:
             cmd: received command
         """
-        handled = False
-
-        # First call any command handlers on this instance. These are for redis
-        # specific handling.
-        cmd_func = getattr(self, "on_%s" % (cmd.NAME,), None)
-        if cmd_func:
-            await cmd_func(cmd)
-            handled = True
 
-        # Then call out to the handler.
         cmd_func = getattr(self.handler, "on_%s" % (cmd.NAME,), None)
-        if cmd_func:
-            await cmd_func(self, cmd)
-            handled = True
-
-        if not handled:
+        if not cmd_func:
             logger.warning("Unhandled command: %r", cmd)
+            return
+
+        res = cmd_func(self, cmd)
+
+        # the handler might be a coroutine: fire it off as a background process
+        # if so.
+
+        if isawaitable(res):
+            run_as_background_process(
+                "replication-" + cmd.get_logcontext_id(), lambda: res
+            )
 
     def connectionLost(self, reason):
         logger.info("Lost connection to redis")
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index b8c95d045a..a8364d9793 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -103,6 +103,14 @@ class DeleteRoomRestServlet(RestServlet):
                 Codes.BAD_JSON,
             )
 
+        purge = content.get("purge", True)
+        if not isinstance(purge, bool):
+            raise SynapseError(
+                HTTPStatus.BAD_REQUEST,
+                "Param 'purge' must be a boolean, if given",
+                Codes.BAD_JSON,
+            )
+
         ret = await self.room_shutdown_handler.shutdown_room(
             room_id=room_id,
             new_room_user_id=content.get("new_room_user_id"),
@@ -113,7 +121,8 @@ class DeleteRoomRestServlet(RestServlet):
         )
 
         # Purge room
-        await self.pagination_handler.purge_room(room_id)
+        if purge:
+            await self.pagination_handler.purge_room(room_id)
 
         return (200, ret)
 
diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py
index b21538766d..f016b4f1bd 100644
--- a/synapse/rest/client/v2_alpha/_base.py
+++ b/synapse/rest/client/v2_alpha/_base.py
@@ -17,8 +17,7 @@
 """
 import logging
 import re
-
-from twisted.internet import defer
+from typing import Iterable, Pattern
 
 from synapse.api.errors import InteractiveAuthIncompleteError
 from synapse.api.urls import CLIENT_API_PREFIX
@@ -27,15 +26,23 @@ from synapse.types import JsonDict
 logger = logging.getLogger(__name__)
 
 
-def client_patterns(path_regex, releases=(0,), unstable=True, v1=False):
+def client_patterns(
+    path_regex: str,
+    releases: Iterable[int] = (0,),
+    unstable: bool = True,
+    v1: bool = False,
+) -> Iterable[Pattern]:
     """Creates a regex compiled client path with the correct client path
     prefix.
 
     Args:
-        path_regex (str): The regex string to match. This should NOT have a ^
+        path_regex: The regex string to match. This should NOT have a ^
             as this will be prefixed.
+        releases: An iterable of releases to include this endpoint under.
+        unstable: If true, include this endpoint under the "unstable" prefix.
+        v1: If true, include this endpoint under the "api/v1" prefix.
     Returns:
-        SRE_Pattern
+        An iterable of patterns.
     """
     patterns = []
 
@@ -73,34 +80,22 @@ def set_timeline_upper_limit(filter_json: JsonDict, filter_timeline_limit: int)
 def interactive_auth_handler(orig):
     """Wraps an on_POST method to handle InteractiveAuthIncompleteErrors
 
-    Takes a on_POST method which returns a deferred (errcode, body) response
+    Takes a on_POST method which returns an Awaitable (errcode, body) response
     and adds exception handling to turn a InteractiveAuthIncompleteError into
     a 401 response.
 
     Normal usage is:
 
     @interactive_auth_handler
-    @defer.inlineCallbacks
-    def on_POST(self, request):
+    async def on_POST(self, request):
         # ...
-        yield self.auth_handler.check_auth
-            """
+        await self.auth_handler.check_auth
+    """
 
-    def wrapped(*args, **kwargs):
-        res = defer.ensureDeferred(orig(*args, **kwargs))
-        res.addErrback(_catch_incomplete_interactive_auth)
-        return res
+    async def wrapped(*args, **kwargs):
+        try:
+            return await orig(*args, **kwargs)
+        except InteractiveAuthIncompleteError as e:
+            return 401, e.result
 
     return wrapped
-
-
-def _catch_incomplete_interactive_auth(f):
-    """helper for interactive_auth_handler
-
-    Catches InteractiveAuthIncompleteErrors and turns them into 401 responses
-
-    Args:
-        f (failure.Failure):
-    """
-    f.trap(InteractiveAuthIncompleteError)
-    return 401, f.value.result
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index a5c24fbd63..3f5bf75e59 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -426,6 +426,7 @@ class SyncRestServlet(RestServlet):
             result["ephemeral"] = {"events": ephemeral_events}
             result["unread_notifications"] = room.unread_notifications
             result["summary"] = room.summary
+            result["org.matrix.msc2654.unread_count"] = room.unread_count
 
         return result
 
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index 595849f9d5..20ddb9550b 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -17,8 +17,9 @@
 import logging
 import os
 import urllib
+from typing import Awaitable
 
-from twisted.internet import defer
+from twisted.internet.interfaces import IConsumer
 from twisted.protocols.basic import FileSender
 
 from synapse.api.errors import Codes, SynapseError, cs_error
@@ -77,8 +78,9 @@ def respond_404(request):
     )
 
 
-@defer.inlineCallbacks
-def respond_with_file(request, media_type, file_path, file_size=None, upload_name=None):
+async def respond_with_file(
+    request, media_type, file_path, file_size=None, upload_name=None
+):
     logger.debug("Responding with %r", file_path)
 
     if os.path.isfile(file_path):
@@ -89,7 +91,7 @@ def respond_with_file(request, media_type, file_path, file_size=None, upload_nam
         add_file_headers(request, media_type, file_size, upload_name)
 
         with open(file_path, "rb") as f:
-            yield make_deferred_yieldable(FileSender().beginFileTransfer(f, request))
+            await make_deferred_yieldable(FileSender().beginFileTransfer(f, request))
 
         finish_request(request)
     else:
@@ -198,8 +200,9 @@ def _can_encode_filename_as_token(x):
     return True
 
 
-@defer.inlineCallbacks
-def respond_with_responder(request, responder, media_type, file_size, upload_name=None):
+async def respond_with_responder(
+    request, responder, media_type, file_size, upload_name=None
+):
     """Responds to the request with given responder. If responder is None then
     returns 404.
 
@@ -218,7 +221,7 @@ def respond_with_responder(request, responder, media_type, file_size, upload_nam
     add_file_headers(request, media_type, file_size, upload_name)
     try:
         with responder:
-            yield responder.write_to_consumer(request)
+            await responder.write_to_consumer(request)
     except Exception as e:
         # The majority of the time this will be due to the client having gone
         # away. Unfortunately, Twisted simply throws a generic exception at us
@@ -239,14 +242,14 @@ class Responder(object):
     held can be cleaned up.
     """
 
-    def write_to_consumer(self, consumer):
+    def write_to_consumer(self, consumer: IConsumer) -> Awaitable:
         """Stream response into consumer
 
         Args:
-            consumer (IConsumer)
+            consumer: The consumer to stream into.
 
         Returns:
-            Deferred: Resolves once the response has finished being written
+            Resolves once the response has finished being written
         """
         pass
 
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 45628c07b4..6fb4039e98 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -18,10 +18,11 @@ import errno
 import logging
 import os
 import shutil
-from typing import Dict, Tuple
+from typing import IO, Dict, Optional, Tuple
 
 import twisted.internet.error
 import twisted.web.http
+from twisted.web.http import Request
 from twisted.web.resource import Resource
 
 from synapse.api.errors import (
@@ -40,6 +41,7 @@ from synapse.util.stringutils import random_string
 
 from ._base import (
     FileInfo,
+    Responder,
     get_filename_from_headers,
     respond_404,
     respond_with_responder,
@@ -135,19 +137,24 @@ class MediaRepository(object):
             self.recently_accessed_locals.add(media_id)
 
     async def create_content(
-        self, media_type, upload_name, content, content_length, auth_user
-    ):
+        self,
+        media_type: str,
+        upload_name: str,
+        content: IO,
+        content_length: int,
+        auth_user: str,
+    ) -> str:
         """Store uploaded content for a local user and return the mxc URL
 
         Args:
-            media_type(str): The content type of the file
-            upload_name(str): The name of the file
+            media_type: The content type of the file
+            upload_name: The name of the file
             content: A file like object that is the content to store
-            content_length(int): The length of the content
-            auth_user(str): The user_id of the uploader
+            content_length: The length of the content
+            auth_user: The user_id of the uploader
 
         Returns:
-            Deferred[str]: The mxc url of the stored content
+            The mxc url of the stored content
         """
         media_id = random_string(24)
 
@@ -170,19 +177,20 @@ class MediaRepository(object):
 
         return "mxc://%s/%s" % (self.server_name, media_id)
 
-    async def get_local_media(self, request, media_id, name):
+    async def get_local_media(
+        self, request: Request, media_id: str, name: Optional[str]
+    ) -> None:
         """Responds to reqests for local media, if exists, or returns 404.
 
         Args:
-            request(twisted.web.http.Request)
-            media_id (str): The media ID of the content. (This is the same as
+            request: The incoming request.
+            media_id: The media ID of the content. (This is the same as
                 the file_id for local content.)
-            name (str|None): Optional name that, if specified, will be used as
+            name: Optional name that, if specified, will be used as
                 the filename in the Content-Disposition header of the response.
 
         Returns:
-            Deferred: Resolves once a response has successfully been written
-                to request
+            Resolves once a response has successfully been written to request
         """
         media_info = await self.store.get_local_media(media_id)
         if not media_info or media_info["quarantined_by"]:
@@ -203,20 +211,20 @@ class MediaRepository(object):
             request, responder, media_type, media_length, upload_name
         )
 
-    async def get_remote_media(self, request, server_name, media_id, name):
+    async def get_remote_media(
+        self, request: Request, server_name: str, media_id: str, name: Optional[str]
+    ) -> None:
         """Respond to requests for remote media.
 
         Args:
-            request(twisted.web.http.Request)
-            server_name (str): Remote server_name where the media originated.
-            media_id (str): The media ID of the content (as defined by the
-                remote server).
-            name (str|None): Optional name that, if specified, will be used as
+            request: The incoming request.
+            server_name: Remote server_name where the media originated.
+            media_id: The media ID of the content (as defined by the remote server).
+            name: Optional name that, if specified, will be used as
                 the filename in the Content-Disposition header of the response.
 
         Returns:
-            Deferred: Resolves once a response has successfully been written
-                to request
+            Resolves once a response has successfully been written to request
         """
         if (
             self.federation_domain_whitelist is not None
@@ -245,17 +253,16 @@ class MediaRepository(object):
         else:
             respond_404(request)
 
-    async def get_remote_media_info(self, server_name, media_id):
+    async def get_remote_media_info(self, server_name: str, media_id: str) -> dict:
         """Gets the media info associated with the remote file, downloading
         if necessary.
 
         Args:
-            server_name (str): Remote server_name where the media originated.
-            media_id (str): The media ID of the content (as defined by the
-                remote server).
+            server_name: Remote server_name where the media originated.
+            media_id: The media ID of the content (as defined by the remote server).
 
         Returns:
-            Deferred[dict]: The media_info of the file
+            The media info of the file
         """
         if (
             self.federation_domain_whitelist is not None
@@ -278,7 +285,9 @@ class MediaRepository(object):
 
         return media_info
 
-    async def _get_remote_media_impl(self, server_name, media_id):
+    async def _get_remote_media_impl(
+        self, server_name: str, media_id: str
+    ) -> Tuple[Optional[Responder], dict]:
         """Looks for media in local cache, if not there then attempt to
         download from remote server.
 
@@ -288,7 +297,7 @@ class MediaRepository(object):
                 remote server).
 
         Returns:
-            Deferred[(Responder, media_info)]
+            A tuple of responder and the media info of the file.
         """
         media_info = await self.store.get_cached_remote_media(server_name, media_id)
 
@@ -319,19 +328,21 @@ class MediaRepository(object):
         responder = await self.media_storage.fetch_media(file_info)
         return responder, media_info
 
-    async def _download_remote_file(self, server_name, media_id, file_id):
+    async def _download_remote_file(
+        self, server_name: str, media_id: str, file_id: str
+    ) -> dict:
         """Attempt to download the remote file from the given server name,
         using the given file_id as the local id.
 
         Args:
-            server_name (str): Originating server
-            media_id (str): The media ID of the content (as defined by the
+            server_name: Originating server
+            media_id: The media ID of the content (as defined by the
                 remote server). This is different than the file_id, which is
                 locally generated.
-            file_id (str): Local file ID
+            file_id: Local file ID
 
         Returns:
-            Deferred[MediaInfo]
+            The media info of the file.
         """
 
         file_info = FileInfo(server_name=server_name, file_id=file_id)
@@ -549,25 +560,31 @@ class MediaRepository(object):
             return output_path
 
     async def _generate_thumbnails(
-        self, server_name, media_id, file_id, media_type, url_cache=False
-    ):
+        self,
+        server_name: Optional[str],
+        media_id: str,
+        file_id: str,
+        media_type: str,
+        url_cache: bool = False,
+    ) -> Optional[dict]:
         """Generate and store thumbnails for an image.
 
         Args:
-            server_name (str|None): The server name if remote media, else None if local
-            media_id (str): The media ID of the content. (This is the same as
+            server_name: The server name if remote media, else None if local
+            media_id: The media ID of the content. (This is the same as
                 the file_id for local content)
-            file_id (str): Local file ID
-            media_type (str): The content type of the file
-            url_cache (bool): If we are thumbnailing images downloaded for the URL cache,
+            file_id: Local file ID
+            media_type: The content type of the file
+            url_cache: If we are thumbnailing images downloaded for the URL cache,
                 used exclusively by the url previewer
 
         Returns:
-            Deferred[dict]: Dict with "width" and "height" keys of original image
+            Dict with "width" and "height" keys of original image or None if the
+            media cannot be thumbnailed.
         """
         requirements = self._get_thumbnail_requirements(media_type)
         if not requirements:
-            return
+            return None
 
         input_path = await self.media_storage.ensure_media_is_in_local_cache(
             FileInfo(server_name, file_id, url_cache=url_cache)
@@ -584,7 +601,7 @@ class MediaRepository(object):
                 m_height,
                 self.max_image_pixels,
             )
-            return
+            return None
 
         if thumbnailer.transpose_method is not None:
             m_width, m_height = await defer_to_thread(
diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index 79cb0dddbe..858b6d3005 100644
--- a/synapse/rest/media/v1/media_storage.py
+++ b/synapse/rest/media/v1/media_storage.py
@@ -12,19 +12,25 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 import contextlib
+import inspect
 import logging
 import os
 import shutil
+from typing import IO, TYPE_CHECKING, Any, Optional, Sequence
 
-from twisted.internet import defer
 from twisted.protocols.basic import FileSender
 
 from synapse.logging.context import defer_to_thread, make_deferred_yieldable
 from synapse.util.file_consumer import BackgroundFileConsumer
 
-from ._base import Responder
+from ._base import FileInfo, Responder
+from .filepath import MediaFilePaths
+
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
+    from .storage_provider import StorageProvider
 
 logger = logging.getLogger(__name__)
 
@@ -33,49 +39,53 @@ class MediaStorage(object):
     """Responsible for storing/fetching files from local sources.
 
     Args:
-        hs (synapse.server.Homeserver)
-        local_media_directory (str): Base path where we store media on disk
-        filepaths (MediaFilePaths)
-        storage_providers ([StorageProvider]): List of StorageProvider that are
-            used to fetch and store files.
+        hs
+        local_media_directory: Base path where we store media on disk
+        filepaths
+        storage_providers: List of StorageProvider that are used to fetch and store files.
     """
 
-    def __init__(self, hs, local_media_directory, filepaths, storage_providers):
+    def __init__(
+        self,
+        hs: "HomeServer",
+        local_media_directory: str,
+        filepaths: MediaFilePaths,
+        storage_providers: Sequence["StorageProvider"],
+    ):
         self.hs = hs
         self.local_media_directory = local_media_directory
         self.filepaths = filepaths
         self.storage_providers = storage_providers
 
-    @defer.inlineCallbacks
-    def store_file(self, source, file_info):
+    async def store_file(self, source: IO, file_info: FileInfo) -> str:
         """Write `source` to the on disk media store, and also any other
         configured storage providers
 
         Args:
             source: A file like object that should be written
-            file_info (FileInfo): Info about the file to store
+            file_info: Info about the file to store
 
         Returns:
-            Deferred[str]: the file path written to in the primary media store
+            the file path written to in the primary media store
         """
 
         with self.store_into_file(file_info) as (f, fname, finish_cb):
             # Write to the main repository
-            yield defer_to_thread(
+            await defer_to_thread(
                 self.hs.get_reactor(), _write_file_synchronously, source, f
             )
-            yield finish_cb()
+            await finish_cb()
 
         return fname
 
     @contextlib.contextmanager
-    def store_into_file(self, file_info):
+    def store_into_file(self, file_info: FileInfo):
         """Context manager used to get a file like object to write into, as
         described by file_info.
 
         Actually yields a 3-tuple (file, fname, finish_cb), where file is a file
         like object that can be written to, fname is the absolute path of file
-        on disk, and finish_cb is a function that returns a Deferred.
+        on disk, and finish_cb is a function that returns an awaitable.
 
         fname can be used to read the contents from after upload, e.g. to
         generate thumbnails.
@@ -85,13 +95,13 @@ class MediaStorage(object):
         error.
 
         Args:
-            file_info (FileInfo): Info about the file to store
+            file_info: Info about the file to store
 
         Example:
 
             with media_storage.store_into_file(info) as (f, fname, finish_cb):
                 # .. write into f ...
-                yield finish_cb()
+                await finish_cb()
         """
 
         path = self._file_info_to_path(file_info)
@@ -103,10 +113,13 @@ class MediaStorage(object):
 
         finished_called = [False]
 
-        @defer.inlineCallbacks
-        def finish():
+        async def finish():
             for provider in self.storage_providers:
-                yield provider.store_file(path, file_info)
+                # store_file is supposed to return an Awaitable, but guard
+                # against improper implementations.
+                result = provider.store_file(path, file_info)
+                if inspect.isawaitable(result):
+                    await result
 
             finished_called[0] = True
 
@@ -123,17 +136,15 @@ class MediaStorage(object):
         if not finished_called:
             raise Exception("Finished callback not called")
 
-    @defer.inlineCallbacks
-    def fetch_media(self, file_info):
+    async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
         """Attempts to fetch media described by file_info from the local cache
         and configured storage providers.
 
         Args:
-            file_info (FileInfo)
+            file_info
 
         Returns:
-            Deferred[Responder|None]: Returns a Responder if the file was found,
-                otherwise None.
+            Returns a Responder if the file was found, otherwise None.
         """
 
         path = self._file_info_to_path(file_info)
@@ -142,23 +153,26 @@ class MediaStorage(object):
             return FileResponder(open(local_path, "rb"))
 
         for provider in self.storage_providers:
-            res = yield provider.fetch(path, file_info)
+            res = provider.fetch(path, file_info)  # type: Any
+            # Fetch is supposed to return an Awaitable[Responder], but guard
+            # against improper implementations.
+            if inspect.isawaitable(res):
+                res = await res
             if res:
                 logger.debug("Streaming %s from %s", path, provider)
                 return res
 
         return None
 
-    @defer.inlineCallbacks
-    def ensure_media_is_in_local_cache(self, file_info):
+    async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str:
         """Ensures that the given file is in the local cache. Attempts to
         download it from storage providers if it isn't.
 
         Args:
-            file_info (FileInfo)
+            file_info
 
         Returns:
-            Deferred[str]: Full path to local file
+            Full path to local file
         """
         path = self._file_info_to_path(file_info)
         local_path = os.path.join(self.local_media_directory, path)
@@ -170,29 +184,27 @@ class MediaStorage(object):
             os.makedirs(dirname)
 
         for provider in self.storage_providers:
-            res = yield provider.fetch(path, file_info)
+            res = provider.fetch(path, file_info)  # type: Any
+            # Fetch is supposed to return an Awaitable[Responder], but guard
+            # against improper implementations.
+            if inspect.isawaitable(res):
+                res = await res
             if res:
                 with res:
                     consumer = BackgroundFileConsumer(
                         open(local_path, "wb"), self.hs.get_reactor()
                     )
-                    yield res.write_to_consumer(consumer)
-                    yield consumer.wait()
+                    await res.write_to_consumer(consumer)
+                    await consumer.wait()
                 return local_path
 
         raise Exception("file could not be found")
 
-    def _file_info_to_path(self, file_info):
+    def _file_info_to_path(self, file_info: FileInfo) -> str:
         """Converts file_info into a relative path.
 
         The path is suitable for storing files under a directory, e.g. used to
         store files on local FS under the base media repository directory.
-
-        Args:
-            file_info (FileInfo)
-
-        Returns:
-            str
         """
         if file_info.url_cache:
             if file_info.thumbnail:
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index e52c86c798..e12f65a206 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -26,6 +26,7 @@ import traceback
 from typing import Dict, Optional
 from urllib import parse as urlparse
 
+import attr
 from canonicaljson import json
 
 from twisted.internet import defer
@@ -56,6 +57,65 @@ _content_type_match = re.compile(r'.*; *charset="?(.*?)"?(;|$)', flags=re.I)
 OG_TAG_NAME_MAXLEN = 50
 OG_TAG_VALUE_MAXLEN = 1000
 
+ONE_HOUR = 60 * 60 * 1000
+
+# A map of globs to API endpoints.
+_oembed_globs = {
+    # Twitter.
+    "https://publish.twitter.com/oembed": [
+        "https://twitter.com/*/status/*",
+        "https://*.twitter.com/*/status/*",
+        "https://twitter.com/*/moments/*",
+        "https://*.twitter.com/*/moments/*",
+        # Include the HTTP versions too.
+        "http://twitter.com/*/status/*",
+        "http://*.twitter.com/*/status/*",
+        "http://twitter.com/*/moments/*",
+        "http://*.twitter.com/*/moments/*",
+    ],
+}
+# Convert the globs to regular expressions.
+_oembed_patterns = {}
+for endpoint, globs in _oembed_globs.items():
+    for glob in globs:
+        # Convert the glob into a sane regular expression to match against. The
+        # rules followed will be slightly different for the domain portion vs.
+        # the rest.
+        #
+        # 1. The scheme must be one of HTTP / HTTPS (and have no globs).
+        # 2. The domain can have globs, but we limit it to characters that can
+        #    reasonably be a domain part.
+        #    TODO: This does not attempt to handle Unicode domain names.
+        # 3. Other parts allow a glob to be any one, or more, characters.
+        results = urlparse.urlparse(glob)
+
+        # Ensure the scheme does not have wildcards (and is a sane scheme).
+        if results.scheme not in {"http", "https"}:
+            raise ValueError("Insecure oEmbed glob scheme: %s" % (results.scheme,))
+
+        pattern = urlparse.urlunparse(
+            [
+                results.scheme,
+                re.escape(results.netloc).replace("\\*", "[a-zA-Z0-9_-]+"),
+            ]
+            + [re.escape(part).replace("\\*", ".+") for part in results[2:]]
+        )
+        _oembed_patterns[re.compile(pattern)] = endpoint
+
+
+@attr.s
+class OEmbedResult:
+    # Either HTML content or URL must be provided.
+    html = attr.ib(type=Optional[str])
+    url = attr.ib(type=Optional[str])
+    title = attr.ib(type=Optional[str])
+    # Number of seconds to cache the content.
+    cache_age = attr.ib(type=int)
+
+
+class OEmbedError(Exception):
+    """An error occurred processing the oEmbed object."""
+
 
 class PreviewUrlResource(DirectServeJsonResource):
     isLeaf = True
@@ -99,7 +159,7 @@ class PreviewUrlResource(DirectServeJsonResource):
             cache_name="url_previews",
             clock=self.clock,
             # don't spider URLs more often than once an hour
-            expiry_ms=60 * 60 * 1000,
+            expiry_ms=ONE_HOUR,
         )
 
         if self._worker_run_media_background_jobs:
@@ -171,16 +231,16 @@ class PreviewUrlResource(DirectServeJsonResource):
         og = await make_deferred_yieldable(defer.maybeDeferred(observable.observe))
         respond_with_json_bytes(request, 200, og, send_cors=True)
 
-    async def _do_preview(self, url, user, ts):
+    async def _do_preview(self, url: str, user: str, ts: int) -> bytes:
         """Check the db, and download the URL and build a preview
 
         Args:
-            url (str):
-            user (str):
-            ts (int):
+            url: The URL to preview.
+            user: The user requesting the preview.
+            ts: The timestamp requested for the preview.
 
         Returns:
-            Deferred[bytes]: json-encoded og data
+            json-encoded og data
         """
         # check the URL cache in the DB (which will also provide us with
         # historical previews, if we have any)
@@ -310,6 +370,87 @@ class PreviewUrlResource(DirectServeJsonResource):
 
         return jsonog.encode("utf8")
 
+    def _get_oembed_url(self, url: str) -> Optional[str]:
+        """
+        Check whether the URL should be downloaded as oEmbed content instead.
+
+        Params:
+            url: The URL to check.
+
+        Returns:
+            A URL to use instead or None if the original URL should be used.
+        """
+        for url_pattern, endpoint in _oembed_patterns.items():
+            if url_pattern.fullmatch(url):
+                return endpoint
+
+        # No match.
+        return None
+
+    async def _get_oembed_content(self, endpoint: str, url: str) -> OEmbedResult:
+        """
+        Request content from an oEmbed endpoint.
+
+        Params:
+            endpoint: The oEmbed API endpoint.
+            url: The URL to pass to the API.
+
+        Returns:
+            An object representing the metadata returned.
+
+        Raises:
+            OEmbedError if fetching or parsing of the oEmbed information fails.
+        """
+        try:
+            logger.debug("Trying to get oEmbed content for url '%s'", url)
+            result = await self.client.get_json(
+                endpoint,
+                # TODO Specify max height / width.
+                # Note that only the JSON format is supported.
+                args={"url": url},
+            )
+
+            # Ensure there's a version of 1.0.
+            if result.get("version") != "1.0":
+                raise OEmbedError("Invalid version: %s" % (result.get("version"),))
+
+            oembed_type = result.get("type")
+
+            # Ensure the cache age is None or an int.
+            cache_age = result.get("cache_age")
+            if cache_age:
+                cache_age = int(cache_age)
+
+            oembed_result = OEmbedResult(None, None, result.get("title"), cache_age)
+
+            # HTML content.
+            if oembed_type == "rich":
+                oembed_result.html = result.get("html")
+                return oembed_result
+
+            if oembed_type == "photo":
+                oembed_result.url = result.get("url")
+                return oembed_result
+
+            # TODO Handle link and video types.
+
+            if "thumbnail_url" in result:
+                oembed_result.url = result.get("thumbnail_url")
+                return oembed_result
+
+            raise OEmbedError("Incompatible oEmbed information.")
+
+        except OEmbedError as e:
+            # Trap OEmbedErrors first so we can directly re-raise them.
+            logger.warning("Error parsing oEmbed metadata from %s: %r", url, e)
+            raise
+
+        except Exception as e:
+            # Trap any exception and let the code follow as usual.
+            # FIXME: pass through 404s and other error messages nicely
+            logger.warning("Error downloading oEmbed metadata from %s: %r", url, e)
+            raise OEmbedError() from e
+
     async def _download_url(self, url, user):
         # TODO: we should probably honour robots.txt... except in practice
         # we're most likely being explicitly triggered by a human rather than a
@@ -319,54 +460,90 @@ class PreviewUrlResource(DirectServeJsonResource):
 
         file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True)
 
-        with self.media_storage.store_into_file(file_info) as (f, fname, finish):
+        # If this URL can be accessed via oEmbed, use that instead.
+        url_to_download = url
+        oembed_url = self._get_oembed_url(url)
+        if oembed_url:
+            # The result might be a new URL to download, or it might be HTML content.
             try:
-                logger.debug("Trying to get preview for url '%s'", url)
-                length, headers, uri, code = await self.client.get_file(
-                    url,
-                    output_stream=f,
-                    max_size=self.max_spider_size,
-                    headers={"Accept-Language": self.url_preview_accept_language},
-                )
-            except SynapseError:
-                # Pass SynapseErrors through directly, so that the servlet
-                # handler will return a SynapseError to the client instead of
-                # blank data or a 500.
-                raise
-            except DNSLookupError:
-                # DNS lookup returned no results
-                # Note: This will also be the case if one of the resolved IP
-                # addresses is blacklisted
-                raise SynapseError(
-                    502,
-                    "DNS resolution failure during URL preview generation",
-                    Codes.UNKNOWN,
-                )
-            except Exception as e:
-                # FIXME: pass through 404s and other error messages nicely
-                logger.warning("Error downloading %s: %r", url, e)
+                oembed_result = await self._get_oembed_content(oembed_url, url)
+                if oembed_result.url:
+                    url_to_download = oembed_result.url
+                elif oembed_result.html:
+                    url_to_download = None
+            except OEmbedError:
+                # If an error occurs, try doing a normal preview.
+                pass
 
-                raise SynapseError(
-                    500,
-                    "Failed to download content: %s"
-                    % (traceback.format_exception_only(sys.exc_info()[0], e),),
-                    Codes.UNKNOWN,
-                )
-            await finish()
+        if url_to_download:
+            with self.media_storage.store_into_file(file_info) as (f, fname, finish):
+                try:
+                    logger.debug("Trying to get preview for url '%s'", url_to_download)
+                    length, headers, uri, code = await self.client.get_file(
+                        url_to_download,
+                        output_stream=f,
+                        max_size=self.max_spider_size,
+                        headers={"Accept-Language": self.url_preview_accept_language},
+                    )
+                except SynapseError:
+                    # Pass SynapseErrors through directly, so that the servlet
+                    # handler will return a SynapseError to the client instead of
+                    # blank data or a 500.
+                    raise
+                except DNSLookupError:
+                    # DNS lookup returned no results
+                    # Note: This will also be the case if one of the resolved IP
+                    # addresses is blacklisted
+                    raise SynapseError(
+                        502,
+                        "DNS resolution failure during URL preview generation",
+                        Codes.UNKNOWN,
+                    )
+                except Exception as e:
+                    # FIXME: pass through 404s and other error messages nicely
+                    logger.warning("Error downloading %s: %r", url_to_download, e)
+
+                    raise SynapseError(
+                        500,
+                        "Failed to download content: %s"
+                        % (traceback.format_exception_only(sys.exc_info()[0], e),),
+                        Codes.UNKNOWN,
+                    )
+                await finish()
+
+                if b"Content-Type" in headers:
+                    media_type = headers[b"Content-Type"][0].decode("ascii")
+                else:
+                    media_type = "application/octet-stream"
+
+                download_name = get_filename_from_headers(headers)
+
+                # FIXME: we should calculate a proper expiration based on the
+                # Cache-Control and Expire headers.  But for now, assume 1 hour.
+                expires = ONE_HOUR
+                etag = headers["ETag"][0] if "ETag" in headers else None
+        else:
+            html_bytes = oembed_result.html.encode("utf-8")  # type: ignore
+            with self.media_storage.store_into_file(file_info) as (f, fname, finish):
+                f.write(html_bytes)
+                await finish()
+
+            media_type = "text/html"
+            download_name = oembed_result.title
+            length = len(html_bytes)
+            # If a specific cache age was not given, assume 1 hour.
+            expires = oembed_result.cache_age or ONE_HOUR
+            uri = oembed_url
+            code = 200
+            etag = None
 
         try:
-            if b"Content-Type" in headers:
-                media_type = headers[b"Content-Type"][0].decode("ascii")
-            else:
-                media_type = "application/octet-stream"
             time_now_ms = self.clock.time_msec()
 
-            download_name = get_filename_from_headers(headers)
-
             await self.store.store_local_media(
                 media_id=file_id,
                 media_type=media_type,
-                time_now_ms=self.clock.time_msec(),
+                time_now_ms=time_now_ms,
                 upload_name=download_name,
                 media_length=length,
                 user_id=user,
@@ -389,10 +566,8 @@ class PreviewUrlResource(DirectServeJsonResource):
             "filename": fname,
             "uri": uri,
             "response_code": code,
-            # FIXME: we should calculate a proper expiration based on the
-            # Cache-Control and Expire headers.  But for now, assume 1 hour.
-            "expires": 60 * 60 * 1000,
-            "etag": headers["ETag"][0] if "ETag" in headers else None,
+            "expires": expires,
+            "etag": etag,
         }
 
     def _start_expire_url_cache_data(self):
@@ -449,7 +624,7 @@ class PreviewUrlResource(DirectServeJsonResource):
         # These may be cached for a bit on the client (i.e., they
         # may have a room open with a preview url thing open).
         # So we wait a couple of days before deleting, just in case.
-        expire_before = now - 2 * 24 * 60 * 60 * 1000
+        expire_before = now - 2 * 24 * ONE_HOUR
         media_ids = await self.store.get_url_cache_media_before(expire_before)
 
         removed_media = []
diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py
index 858680be26..a33f56e806 100644
--- a/synapse/rest/media/v1/storage_provider.py
+++ b/synapse/rest/media/v1/storage_provider.py
@@ -16,62 +16,62 @@
 import logging
 import os
 import shutil
-
-from twisted.internet import defer
+from typing import Optional
 
 from synapse.config._base import Config
 from synapse.logging.context import defer_to_thread, run_in_background
 
+from ._base import FileInfo, Responder
 from .media_storage import FileResponder
 
 logger = logging.getLogger(__name__)
 
 
-class StorageProvider(object):
+class StorageProvider:
     """A storage provider is a service that can store uploaded media and
     retrieve them.
     """
 
-    def store_file(self, path, file_info):
+    async def store_file(self, path: str, file_info: FileInfo):
         """Store the file described by file_info. The actual contents can be
         retrieved by reading the file in file_info.upload_path.
 
         Args:
-            path (str): Relative path of file in local cache
-            file_info (FileInfo)
-
-        Returns:
-            Deferred
+            path: Relative path of file in local cache
+            file_info: The metadata of the file.
         """
-        pass
 
-    def fetch(self, path, file_info):
+    async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
         """Attempt to fetch the file described by file_info and stream it
         into writer.
 
         Args:
-            path (str): Relative path of file in local cache
-            file_info (FileInfo)
+            path: Relative path of file in local cache
+            file_info: The metadata of the file.
 
         Returns:
-            Deferred(Responder): Returns a Responder if the provider has the file,
-                otherwise returns None.
+            Returns a Responder if the provider has the file, otherwise returns None.
         """
-        pass
 
 
 class StorageProviderWrapper(StorageProvider):
     """Wraps a storage provider and provides various config options
 
     Args:
-        backend (StorageProvider)
-        store_local (bool): Whether to store new local files or not.
-        store_synchronous (bool): Whether to wait for file to be successfully
+        backend: The storage provider to wrap.
+        store_local: Whether to store new local files or not.
+        store_synchronous: Whether to wait for file to be successfully
             uploaded, or todo the upload in the background.
-        store_remote (bool): Whether remote media should be uploaded
+        store_remote: Whether remote media should be uploaded
     """
 
-    def __init__(self, backend, store_local, store_synchronous, store_remote):
+    def __init__(
+        self,
+        backend: StorageProvider,
+        store_local: bool,
+        store_synchronous: bool,
+        store_remote: bool,
+    ):
         self.backend = backend
         self.store_local = store_local
         self.store_synchronous = store_synchronous
@@ -80,15 +80,15 @@ class StorageProviderWrapper(StorageProvider):
     def __str__(self):
         return "StorageProviderWrapper[%s]" % (self.backend,)
 
-    def store_file(self, path, file_info):
+    async def store_file(self, path, file_info):
         if not file_info.server_name and not self.store_local:
-            return defer.succeed(None)
+            return None
 
         if file_info.server_name and not self.store_remote:
-            return defer.succeed(None)
+            return None
 
         if self.store_synchronous:
-            return self.backend.store_file(path, file_info)
+            return await self.backend.store_file(path, file_info)
         else:
             # TODO: Handle errors.
             def store():
@@ -98,10 +98,10 @@ class StorageProviderWrapper(StorageProvider):
                     logger.exception("Error storing file")
 
             run_in_background(store)
-            return defer.succeed(None)
+            return None
 
-    def fetch(self, path, file_info):
-        return self.backend.fetch(path, file_info)
+    async def fetch(self, path, file_info):
+        return await self.backend.fetch(path, file_info)
 
 
 class FileStorageProviderBackend(StorageProvider):
@@ -120,7 +120,7 @@ class FileStorageProviderBackend(StorageProvider):
     def __str__(self):
         return "FileStorageProviderBackend[%s]" % (self.base_directory,)
 
-    def store_file(self, path, file_info):
+    async def store_file(self, path, file_info):
         """See StorageProvider.store_file"""
 
         primary_fname = os.path.join(self.cache_directory, path)
@@ -130,11 +130,11 @@ class FileStorageProviderBackend(StorageProvider):
         if not os.path.exists(dirname):
             os.makedirs(dirname)
 
-        return defer_to_thread(
+        return await defer_to_thread(
             self.hs.get_reactor(), shutil.copyfile, primary_fname, backup_fname
         )
 
-    def fetch(self, path, file_info):
+    async def fetch(self, path, file_info):
         """See StorageProvider.fetch"""
 
         backup_fname = os.path.join(self.base_directory, path)
diff --git a/synapse/server.pyi b/synapse/server.pyi
index 90a673778f..1aba408c21 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -31,6 +31,7 @@ import synapse.server_notices.server_notices_sender
 import synapse.state
 import synapse.storage
 from synapse.events.builder import EventBuilderFactory
+from synapse.handlers.typing import FollowerTypingHandler
 from synapse.replication.tcp.streams import Stream
 
 class HomeServer(object):
@@ -150,3 +151,5 @@ class HomeServer(object):
         pass
     def should_send_federation(self) -> bool:
         pass
+    def get_typing_handler(self) -> FollowerTypingHandler:
+        pass
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 495d9f04c8..25ccef5aa5 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -16,14 +16,12 @@
 
 import logging
 from collections import namedtuple
-from typing import Dict, Iterable, List, Optional, Set
+from typing import Awaitable, Dict, Iterable, List, Optional, Set
 
 import attr
 from frozendict import frozendict
 from prometheus_client import Histogram
 
-from twisted.internet import defer
-
 from synapse.api.constants import EventTypes
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions
 from synapse.events import EventBase
@@ -31,6 +29,7 @@ from synapse.events.snapshot import EventContext
 from synapse.logging.utils import log_function
 from synapse.state import v1, v2
 from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
+from synapse.storage.roommember import ProfileInfo
 from synapse.types import StateMap
 from synapse.util import Clock
 from synapse.util.async_helpers import Linearizer
@@ -108,8 +107,7 @@ class StateHandler(object):
         self.hs = hs
         self._state_resolution_handler = hs.get_state_resolution_handler()
 
-    @defer.inlineCallbacks
-    def get_current_state(
+    async def get_current_state(
         self, room_id, event_type=None, state_key="", latest_event_ids=None
     ):
         """ Retrieves the current state for the room. This is done by
@@ -126,20 +124,20 @@ class StateHandler(object):
             map from (type, state_key) to event
         """
         if not latest_event_ids:
-            latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
+            latest_event_ids = await self.store.get_latest_event_ids_in_room(room_id)
 
         logger.debug("calling resolve_state_groups from get_current_state")
-        ret = yield self.resolve_state_groups_for_events(room_id, latest_event_ids)
+        ret = await self.resolve_state_groups_for_events(room_id, latest_event_ids)
         state = ret.state
 
         if event_type:
             event_id = state.get((event_type, state_key))
             event = None
             if event_id:
-                event = yield self.store.get_event(event_id, allow_none=True)
+                event = await self.store.get_event(event_id, allow_none=True)
             return event
 
-        state_map = yield self.store.get_events(
+        state_map = await self.store.get_events(
             list(state.values()), get_prev_content=False
         )
         state = {
@@ -148,8 +146,7 @@ class StateHandler(object):
 
         return state
 
-    @defer.inlineCallbacks
-    def get_current_state_ids(self, room_id, latest_event_ids=None):
+    async def get_current_state_ids(self, room_id, latest_event_ids=None):
         """Get the current state, or the state at a set of events, for a room
 
         Args:
@@ -164,41 +161,38 @@ class StateHandler(object):
                 (event_type, state_key) -> event_id
         """
         if not latest_event_ids:
-            latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
+            latest_event_ids = await self.store.get_latest_event_ids_in_room(room_id)
 
         logger.debug("calling resolve_state_groups from get_current_state_ids")
-        ret = yield self.resolve_state_groups_for_events(room_id, latest_event_ids)
+        ret = await self.resolve_state_groups_for_events(room_id, latest_event_ids)
         state = ret.state
 
         return state
 
-    @defer.inlineCallbacks
-    def get_current_users_in_room(self, room_id, latest_event_ids=None):
+    async def get_current_users_in_room(
+        self, room_id: str, latest_event_ids: Optional[List[str]] = None
+    ) -> Dict[str, ProfileInfo]:
         """
         Get the users who are currently in a room.
 
         Args:
-            room_id (str): The ID of the room.
-            latest_event_ids (List[str]|None): Precomputed list of latest
-                event IDs. Will be computed if None.
+            room_id: The ID of the room.
+            latest_event_ids: Precomputed list of latest event IDs. Will be computed if None.
         Returns:
-            Deferred[Dict[str,ProfileInfo]]: Dictionary of user IDs to their
-                profileinfo.
+            Dictionary of user IDs to their profileinfo.
         """
         if not latest_event_ids:
-            latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
+            latest_event_ids = await self.store.get_latest_event_ids_in_room(room_id)
         logger.debug("calling resolve_state_groups from get_current_users_in_room")
-        entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids)
-        joined_users = yield self.store.get_joined_users_from_state(room_id, entry)
+        entry = await self.resolve_state_groups_for_events(room_id, latest_event_ids)
+        joined_users = await self.store.get_joined_users_from_state(room_id, entry)
         return joined_users
 
-    @defer.inlineCallbacks
-    def get_current_hosts_in_room(self, room_id):
-        event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
-        return (yield self.get_hosts_in_room_at_events(room_id, event_ids))
+    async def get_current_hosts_in_room(self, room_id):
+        event_ids = await self.store.get_latest_event_ids_in_room(room_id)
+        return await self.get_hosts_in_room_at_events(room_id, event_ids)
 
-    @defer.inlineCallbacks
-    def get_hosts_in_room_at_events(self, room_id, event_ids):
+    async def get_hosts_in_room_at_events(self, room_id, event_ids):
         """Get the hosts that were in a room at the given event ids
 
         Args:
@@ -208,12 +202,11 @@ class StateHandler(object):
         Returns:
             Deferred[list[str]]: the hosts in the room at the given events
         """
-        entry = yield self.resolve_state_groups_for_events(room_id, event_ids)
-        joined_hosts = yield self.store.get_joined_hosts(room_id, entry)
+        entry = await self.resolve_state_groups_for_events(room_id, event_ids)
+        joined_hosts = await self.store.get_joined_hosts(room_id, entry)
         return joined_hosts
 
-    @defer.inlineCallbacks
-    def compute_event_context(
+    async def compute_event_context(
         self, event: EventBase, old_state: Optional[Iterable[EventBase]] = None
     ):
         """Build an EventContext structure for the event.
@@ -278,7 +271,7 @@ class StateHandler(object):
             # otherwise, we'll need to resolve the state across the prev_events.
             logger.debug("calling resolve_state_groups from compute_event_context")
 
-            entry = yield self.resolve_state_groups_for_events(
+            entry = await self.resolve_state_groups_for_events(
                 event.room_id, event.prev_event_ids()
             )
 
@@ -295,7 +288,7 @@ class StateHandler(object):
         #
 
         if not state_group_before_event:
-            state_group_before_event = yield self.state_store.store_state_group(
+            state_group_before_event = await self.state_store.store_state_group(
                 event.event_id,
                 event.room_id,
                 prev_group=state_group_before_event_prev_group,
@@ -335,7 +328,7 @@ class StateHandler(object):
         state_ids_after_event[key] = event.event_id
         delta_ids = {key: event.event_id}
 
-        state_group_after_event = yield self.state_store.store_state_group(
+        state_group_after_event = await self.state_store.store_state_group(
             event.event_id,
             event.room_id,
             prev_group=state_group_before_event,
@@ -353,8 +346,7 @@ class StateHandler(object):
         )
 
     @measure_func()
-    @defer.inlineCallbacks
-    def resolve_state_groups_for_events(self, room_id, event_ids):
+    async def resolve_state_groups_for_events(self, room_id, event_ids):
         """ Given a list of event_ids this method fetches the state at each
         event, resolves conflicts between them and returns them.
 
@@ -373,7 +365,7 @@ class StateHandler(object):
         # map from state group id to the state in that state group (where
         # 'state' is a map from state key to event id)
         # dict[int, dict[(str, str), str]]
-        state_groups_ids = yield self.state_store.get_state_groups_ids(
+        state_groups_ids = await self.state_store.get_state_groups_ids(
             room_id, event_ids
         )
 
@@ -382,7 +374,7 @@ class StateHandler(object):
         elif len(state_groups_ids) == 1:
             name, state_list = list(state_groups_ids.items()).pop()
 
-            prev_group, delta_ids = yield self.state_store.get_state_group_delta(name)
+            prev_group, delta_ids = await self.state_store.get_state_group_delta(name)
 
             return _StateCacheEntry(
                 state=state_list,
@@ -391,9 +383,9 @@ class StateHandler(object):
                 delta_ids=delta_ids,
             )
 
-        room_version = yield self.store.get_room_version_id(room_id)
+        room_version = await self.store.get_room_version_id(room_id)
 
-        result = yield self._state_resolution_handler.resolve_state_groups(
+        result = await self._state_resolution_handler.resolve_state_groups(
             room_id,
             room_version,
             state_groups_ids,
@@ -402,8 +394,7 @@ class StateHandler(object):
         )
         return result
 
-    @defer.inlineCallbacks
-    def resolve_events(self, room_version, state_sets, event):
+    async def resolve_events(self, room_version, state_sets, event):
         logger.info(
             "Resolving state for %s with %d groups", event.room_id, len(state_sets)
         )
@@ -414,7 +405,7 @@ class StateHandler(object):
         state_map = {ev.event_id: ev for st in state_sets for ev in st}
 
         with Measure(self.clock, "state._resolve_events"):
-            new_state = yield resolve_events_with_store(
+            new_state = await resolve_events_with_store(
                 self.clock,
                 event.room_id,
                 room_version,
@@ -451,9 +442,8 @@ class StateResolutionHandler(object):
             reset_expiry_on_get=True,
         )
 
-    @defer.inlineCallbacks
     @log_function
-    def resolve_state_groups(
+    async def resolve_state_groups(
         self, room_id, room_version, state_groups_ids, event_map, state_res_store
     ):
         """Resolves conflicts between a set of state groups
@@ -479,13 +469,13 @@ class StateResolutionHandler(object):
             state_res_store (StateResolutionStore)
 
         Returns:
-            Deferred[_StateCacheEntry]: resolved state
+            _StateCacheEntry: resolved state
         """
         logger.debug("resolve_state_groups state_groups %s", state_groups_ids.keys())
 
         group_names = frozenset(state_groups_ids.keys())
 
-        with (yield self.resolve_linearizer.queue(group_names)):
+        with (await self.resolve_linearizer.queue(group_names)):
             if self._state_cache is not None:
                 cache = self._state_cache.get(group_names, None)
                 if cache:
@@ -517,7 +507,7 @@ class StateResolutionHandler(object):
             if conflicted_state:
                 logger.info("Resolving conflicted state for %r", room_id)
                 with Measure(self.clock, "state._resolve_events"):
-                    new_state = yield resolve_events_with_store(
+                    new_state = await resolve_events_with_store(
                         self.clock,
                         room_id,
                         room_version,
@@ -598,7 +588,7 @@ def resolve_events_with_store(
     state_sets: List[StateMap[str]],
     event_map: Optional[Dict[str, EventBase]],
     state_res_store: "StateResolutionStore",
-):
+) -> Awaitable[StateMap[str]]:
     """
     Args:
         room_id: the room we are working in
@@ -619,8 +609,7 @@ def resolve_events_with_store(
         state_res_store: a place to fetch events from
 
     Returns:
-        Deferred[dict[(str, str), str]]:
-            a map from (type, state_key) to event_id.
+        a map from (type, state_key) to event_id.
     """
     v = KNOWN_ROOM_VERSIONS[room_version]
     if v.state_res == StateResolutionVersions.V1:
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index 7b531a8337..ab5e24841d 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -15,9 +15,7 @@
 
 import hashlib
 import logging
-from typing import Callable, Dict, List, Optional
-
-from twisted.internet import defer
+from typing import Awaitable, Callable, Dict, List, Optional
 
 from synapse import event_auth
 from synapse.api.constants import EventTypes
@@ -32,12 +30,11 @@ logger = logging.getLogger(__name__)
 POWER_KEY = (EventTypes.PowerLevels, "")
 
 
-@defer.inlineCallbacks
-def resolve_events_with_store(
+async def resolve_events_with_store(
     room_id: str,
     state_sets: List[StateMap[str]],
     event_map: Optional[Dict[str, EventBase]],
-    state_map_factory: Callable,
+    state_map_factory: Callable[[List[str]], Awaitable],
 ):
     """
     Args:
@@ -56,7 +53,7 @@ def resolve_events_with_store(
 
         state_map_factory: will be called
             with a list of event_ids that are needed, and should return with
-            a Deferred of dict of event_id to event.
+            an Awaitable that resolves to a dict of event_id to event.
 
     Returns:
         Deferred[dict[(str, str), str]]:
@@ -80,7 +77,7 @@ def resolve_events_with_store(
 
     # dict[str, FrozenEvent]: a map from state event id to event. Only includes
     # the state events which are in conflict (and those in event_map)
-    state_map = yield state_map_factory(needed_events)
+    state_map = await state_map_factory(needed_events)
     if event_map is not None:
         state_map.update(event_map)
 
@@ -110,7 +107,7 @@ def resolve_events_with_store(
         "Asking for %d/%d auth events", len(new_needed_events), new_needed_event_count
     )
 
-    state_map_new = yield state_map_factory(new_needed_events)
+    state_map_new = await state_map_factory(new_needed_events)
     for event in state_map_new.values():
         if event.room_id != room_id:
             raise Exception(
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index bf6caa0946..6634955cdc 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -18,8 +18,6 @@ import itertools
 import logging
 from typing import Dict, List, Optional
 
-from twisted.internet import defer
-
 import synapse.state
 from synapse import event_auth
 from synapse.api.constants import EventTypes
@@ -32,14 +30,13 @@ from synapse.util import Clock
 logger = logging.getLogger(__name__)
 
 
-# We want to yield to the reactor occasionally during state res when dealing
+# We want to await to the reactor occasionally during state res when dealing
 # with large data sets, so that we don't exhaust the reactor. This is done by
-# yielding to reactor during loops every N iterations.
-_YIELD_AFTER_ITERATIONS = 100
+# awaiting to reactor during loops every N iterations.
+_AWAIT_AFTER_ITERATIONS = 100
 
 
-@defer.inlineCallbacks
-def resolve_events_with_store(
+async def resolve_events_with_store(
     clock: Clock,
     room_id: str,
     room_version: str,
@@ -87,7 +84,7 @@ def resolve_events_with_store(
 
     # Also fetch all auth events that appear in only some of the state sets'
     # auth chains.
-    auth_diff = yield _get_auth_chain_difference(state_sets, event_map, state_res_store)
+    auth_diff = await _get_auth_chain_difference(state_sets, event_map, state_res_store)
 
     full_conflicted_set = set(
         itertools.chain(
@@ -95,7 +92,7 @@ def resolve_events_with_store(
         )
     )
 
-    events = yield state_res_store.get_events(
+    events = await state_res_store.get_events(
         [eid for eid in full_conflicted_set if eid not in event_map],
         allow_rejected=True,
     )
@@ -118,14 +115,14 @@ def resolve_events_with_store(
         eid for eid in full_conflicted_set if _is_power_event(event_map[eid])
     )
 
-    sorted_power_events = yield _reverse_topological_power_sort(
+    sorted_power_events = await _reverse_topological_power_sort(
         clock, room_id, power_events, event_map, state_res_store, full_conflicted_set
     )
 
     logger.debug("sorted %d power events", len(sorted_power_events))
 
     # Now sequentially auth each one
-    resolved_state = yield _iterative_auth_checks(
+    resolved_state = await _iterative_auth_checks(
         clock,
         room_id,
         room_version,
@@ -148,13 +145,13 @@ def resolve_events_with_store(
     logger.debug("sorting %d remaining events", len(leftover_events))
 
     pl = resolved_state.get((EventTypes.PowerLevels, ""), None)
-    leftover_events = yield _mainline_sort(
+    leftover_events = await _mainline_sort(
         clock, room_id, leftover_events, pl, event_map, state_res_store
     )
 
     logger.debug("resolving remaining events")
 
-    resolved_state = yield _iterative_auth_checks(
+    resolved_state = await _iterative_auth_checks(
         clock,
         room_id,
         room_version,
@@ -174,8 +171,7 @@ def resolve_events_with_store(
     return resolved_state
 
 
-@defer.inlineCallbacks
-def _get_power_level_for_sender(room_id, event_id, event_map, state_res_store):
+async def _get_power_level_for_sender(room_id, event_id, event_map, state_res_store):
     """Return the power level of the sender of the given event according to
     their auth events.
 
@@ -188,11 +184,11 @@ def _get_power_level_for_sender(room_id, event_id, event_map, state_res_store):
     Returns:
         Deferred[int]
     """
-    event = yield _get_event(room_id, event_id, event_map, state_res_store)
+    event = await _get_event(room_id, event_id, event_map, state_res_store)
 
     pl = None
     for aid in event.auth_event_ids():
-        aev = yield _get_event(
+        aev = await _get_event(
             room_id, aid, event_map, state_res_store, allow_none=True
         )
         if aev and (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
@@ -202,7 +198,7 @@ def _get_power_level_for_sender(room_id, event_id, event_map, state_res_store):
     if pl is None:
         # Couldn't find power level. Check if they're the creator of the room
         for aid in event.auth_event_ids():
-            aev = yield _get_event(
+            aev = await _get_event(
                 room_id, aid, event_map, state_res_store, allow_none=True
             )
             if aev and (aev.type, aev.state_key) == (EventTypes.Create, ""):
@@ -221,8 +217,7 @@ def _get_power_level_for_sender(room_id, event_id, event_map, state_res_store):
         return int(level)
 
 
-@defer.inlineCallbacks
-def _get_auth_chain_difference(state_sets, event_map, state_res_store):
+async def _get_auth_chain_difference(state_sets, event_map, state_res_store):
     """Compare the auth chains of each state set and return the set of events
     that only appear in some but not all of the auth chains.
 
@@ -235,7 +230,7 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store):
         Deferred[set[str]]: Set of event IDs
     """
 
-    difference = yield state_res_store.get_auth_chain_difference(
+    difference = await state_res_store.get_auth_chain_difference(
         [set(state_set.values()) for state_set in state_sets]
     )
 
@@ -292,8 +287,7 @@ def _is_power_event(event):
     return False
 
 
-@defer.inlineCallbacks
-def _add_event_and_auth_chain_to_graph(
+async def _add_event_and_auth_chain_to_graph(
     graph, room_id, event_id, event_map, state_res_store, auth_diff
 ):
     """Helper function for _reverse_topological_power_sort that add the event
@@ -314,7 +308,7 @@ def _add_event_and_auth_chain_to_graph(
         eid = state.pop()
         graph.setdefault(eid, set())
 
-        event = yield _get_event(room_id, eid, event_map, state_res_store)
+        event = await _get_event(room_id, eid, event_map, state_res_store)
         for aid in event.auth_event_ids():
             if aid in auth_diff:
                 if aid not in graph:
@@ -323,8 +317,7 @@ def _add_event_and_auth_chain_to_graph(
                 graph.setdefault(eid, set()).add(aid)
 
 
-@defer.inlineCallbacks
-def _reverse_topological_power_sort(
+async def _reverse_topological_power_sort(
     clock, room_id, event_ids, event_map, state_res_store, auth_diff
 ):
     """Returns a list of the event_ids sorted by reverse topological ordering,
@@ -344,26 +337,26 @@ def _reverse_topological_power_sort(
 
     graph = {}
     for idx, event_id in enumerate(event_ids, start=1):
-        yield _add_event_and_auth_chain_to_graph(
+        await _add_event_and_auth_chain_to_graph(
             graph, room_id, event_id, event_map, state_res_store, auth_diff
         )
 
-        # We yield occasionally when we're working with large data sets to
+        # We await occasionally when we're working with large data sets to
         # ensure that we don't block the reactor loop for too long.
-        if idx % _YIELD_AFTER_ITERATIONS == 0:
-            yield clock.sleep(0)
+        if idx % _AWAIT_AFTER_ITERATIONS == 0:
+            await clock.sleep(0)
 
     event_to_pl = {}
     for idx, event_id in enumerate(graph, start=1):
-        pl = yield _get_power_level_for_sender(
+        pl = await _get_power_level_for_sender(
             room_id, event_id, event_map, state_res_store
         )
         event_to_pl[event_id] = pl
 
-        # We yield occasionally when we're working with large data sets to
+        # We await occasionally when we're working with large data sets to
         # ensure that we don't block the reactor loop for too long.
-        if idx % _YIELD_AFTER_ITERATIONS == 0:
-            yield clock.sleep(0)
+        if idx % _AWAIT_AFTER_ITERATIONS == 0:
+            await clock.sleep(0)
 
     def _get_power_order(event_id):
         ev = event_map[event_id]
@@ -378,8 +371,7 @@ def _reverse_topological_power_sort(
     return sorted_events
 
 
-@defer.inlineCallbacks
-def _iterative_auth_checks(
+async def _iterative_auth_checks(
     clock, room_id, room_version, event_ids, base_state, event_map, state_res_store
 ):
     """Sequentially apply auth checks to each event in given list, updating the
@@ -405,7 +397,7 @@ def _iterative_auth_checks(
 
         auth_events = {}
         for aid in event.auth_event_ids():
-            ev = yield _get_event(
+            ev = await _get_event(
                 room_id, aid, event_map, state_res_store, allow_none=True
             )
 
@@ -420,7 +412,7 @@ def _iterative_auth_checks(
         for key in event_auth.auth_types_for_event(event):
             if key in resolved_state:
                 ev_id = resolved_state[key]
-                ev = yield _get_event(room_id, ev_id, event_map, state_res_store)
+                ev = await _get_event(room_id, ev_id, event_map, state_res_store)
 
                 if ev.rejected_reason is None:
                     auth_events[key] = event_map[ev_id]
@@ -438,16 +430,15 @@ def _iterative_auth_checks(
         except AuthError:
             pass
 
-        # We yield occasionally when we're working with large data sets to
+        # We await occasionally when we're working with large data sets to
         # ensure that we don't block the reactor loop for too long.
-        if idx % _YIELD_AFTER_ITERATIONS == 0:
-            yield clock.sleep(0)
+        if idx % _AWAIT_AFTER_ITERATIONS == 0:
+            await clock.sleep(0)
 
     return resolved_state
 
 
-@defer.inlineCallbacks
-def _mainline_sort(
+async def _mainline_sort(
     clock, room_id, event_ids, resolved_power_event_id, event_map, state_res_store
 ):
     """Returns a sorted list of event_ids sorted by mainline ordering based on
@@ -474,21 +465,21 @@ def _mainline_sort(
     idx = 0
     while pl:
         mainline.append(pl)
-        pl_ev = yield _get_event(room_id, pl, event_map, state_res_store)
+        pl_ev = await _get_event(room_id, pl, event_map, state_res_store)
         auth_events = pl_ev.auth_event_ids()
         pl = None
         for aid in auth_events:
-            ev = yield _get_event(
+            ev = await _get_event(
                 room_id, aid, event_map, state_res_store, allow_none=True
             )
             if ev and (ev.type, ev.state_key) == (EventTypes.PowerLevels, ""):
                 pl = aid
                 break
 
-        # We yield occasionally when we're working with large data sets to
+        # We await occasionally when we're working with large data sets to
         # ensure that we don't block the reactor loop for too long.
-        if idx != 0 and idx % _YIELD_AFTER_ITERATIONS == 0:
-            yield clock.sleep(0)
+        if idx != 0 and idx % _AWAIT_AFTER_ITERATIONS == 0:
+            await clock.sleep(0)
 
         idx += 1
 
@@ -498,23 +489,24 @@ def _mainline_sort(
 
     order_map = {}
     for idx, ev_id in enumerate(event_ids, start=1):
-        depth = yield _get_mainline_depth_for_event(
+        depth = await _get_mainline_depth_for_event(
             event_map[ev_id], mainline_map, event_map, state_res_store
         )
         order_map[ev_id] = (depth, event_map[ev_id].origin_server_ts, ev_id)
 
-        # We yield occasionally when we're working with large data sets to
+        # We await occasionally when we're working with large data sets to
         # ensure that we don't block the reactor loop for too long.
-        if idx % _YIELD_AFTER_ITERATIONS == 0:
-            yield clock.sleep(0)
+        if idx % _AWAIT_AFTER_ITERATIONS == 0:
+            await clock.sleep(0)
 
     event_ids.sort(key=lambda ev_id: order_map[ev_id])
 
     return event_ids
 
 
-@defer.inlineCallbacks
-def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_store):
+async def _get_mainline_depth_for_event(
+    event, mainline_map, event_map, state_res_store
+):
     """Get the mainline depths for the given event based on the mainline map
 
     Args:
@@ -541,7 +533,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
         event = None
 
         for aid in auth_events:
-            aev = yield _get_event(
+            aev = await _get_event(
                 room_id, aid, event_map, state_res_store, allow_none=True
             )
             if aev and (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
@@ -552,8 +544,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
     return 0
 
 
-@defer.inlineCallbacks
-def _get_event(room_id, event_id, event_map, state_res_store, allow_none=False):
+async def _get_event(room_id, event_id, event_map, state_res_store, allow_none=False):
     """Helper function to look up event in event_map, falling back to looking
     it up in the store
 
@@ -569,7 +560,7 @@ def _get_event(room_id, event_id, event_map, state_res_store, allow_none=False):
         Deferred[Optional[FrozenEvent]]
     """
     if event_id not in event_map:
-        events = yield state_res_store.get_events([event_id], allow_rejected=True)
+        events = await state_res_store.get_events([event_id], allow_rejected=True)
         event_map.update(events)
     event = event_map.get(event_id)
 
diff --git a/synapse/storage/data_stores/main/cache.py b/synapse/storage/data_stores/main/cache.py
index f39f556c20..edc3624fed 100644
--- a/synapse/storage/data_stores/main/cache.py
+++ b/synapse/storage/data_stores/main/cache.py
@@ -172,6 +172,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
 
         self.get_latest_event_ids_in_room.invalidate((room_id,))
 
+        self.get_unread_message_count_for_user.invalidate_many((room_id,))
         self.get_unread_event_push_actions_by_room_for_user.invalidate_many((room_id,))
 
         if not backfilled:
diff --git a/synapse/storage/data_stores/main/event_push_actions.py b/synapse/storage/data_stores/main/event_push_actions.py
index 504babaa7e..ad82838901 100644
--- a/synapse/storage/data_stores/main/event_push_actions.py
+++ b/synapse/storage/data_stores/main/event_push_actions.py
@@ -15,11 +15,10 @@
 # limitations under the License.
 
 import logging
+from typing import List
 
 from canonicaljson import json
 
-from twisted.internet import defer
-
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import LoggingTransaction, SQLBaseStore, db_to_json
 from synapse.storage.database import Database
@@ -166,8 +165,9 @@ class EventPushActionsWorkerStore(SQLBaseStore):
 
         return {"notify_count": notify_count, "highlight_count": highlight_count}
 
-    @defer.inlineCallbacks
-    def get_push_action_users_in_range(self, min_stream_ordering, max_stream_ordering):
+    async def get_push_action_users_in_range(
+        self, min_stream_ordering, max_stream_ordering
+    ):
         def f(txn):
             sql = (
                 "SELECT DISTINCT(user_id) FROM event_push_actions WHERE"
@@ -176,26 +176,28 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, (min_stream_ordering, max_stream_ordering))
             return [r[0] for r in txn]
 
-        ret = yield self.db.runInteraction("get_push_action_users_in_range", f)
+        ret = await self.db.runInteraction("get_push_action_users_in_range", f)
         return ret
 
-    @defer.inlineCallbacks
-    def get_unread_push_actions_for_user_in_range_for_http(
-        self, user_id, min_stream_ordering, max_stream_ordering, limit=20
-    ):
+    async def get_unread_push_actions_for_user_in_range_for_http(
+        self,
+        user_id: str,
+        min_stream_ordering: int,
+        max_stream_ordering: int,
+        limit: int = 20,
+    ) -> List[dict]:
         """Get a list of the most recent unread push actions for a given user,
         within the given stream ordering range. Called by the httppusher.
 
         Args:
-            user_id (str): The user to fetch push actions for.
-            min_stream_ordering(int): The exclusive lower bound on the
+            user_id: The user to fetch push actions for.
+            min_stream_ordering: The exclusive lower bound on the
                 stream ordering of event push actions to fetch.
-            max_stream_ordering(int): The inclusive upper bound on the
+            max_stream_ordering: The inclusive upper bound on the
                 stream ordering of event push actions to fetch.
-            limit (int): The maximum number of rows to return.
+            limit: The maximum number of rows to return.
         Returns:
-            A promise which resolves to a list of dicts with the keys "event_id",
-            "room_id", "stream_ordering", "actions".
+            A list of dicts with the keys "event_id", "room_id", "stream_ordering", "actions".
             The list will be ordered by ascending stream_ordering.
             The list will have between 0~limit entries.
         """
@@ -228,7 +230,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, args)
             return txn.fetchall()
 
-        after_read_receipt = yield self.db.runInteraction(
+        after_read_receipt = await self.db.runInteraction(
             "get_unread_push_actions_for_user_in_range_http_arr", get_after_receipt
         )
 
@@ -256,7 +258,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, args)
             return txn.fetchall()
 
-        no_read_receipt = yield self.db.runInteraction(
+        no_read_receipt = await self.db.runInteraction(
             "get_unread_push_actions_for_user_in_range_http_nrr", get_no_receipt
         )
 
@@ -280,23 +282,25 @@ class EventPushActionsWorkerStore(SQLBaseStore):
         # one of the subqueries may have hit the limit.
         return notifs[:limit]
 
-    @defer.inlineCallbacks
-    def get_unread_push_actions_for_user_in_range_for_email(
-        self, user_id, min_stream_ordering, max_stream_ordering, limit=20
-    ):
+    async def get_unread_push_actions_for_user_in_range_for_email(
+        self,
+        user_id: str,
+        min_stream_ordering: int,
+        max_stream_ordering: int,
+        limit: int = 20,
+    ) -> List[dict]:
         """Get a list of the most recent unread push actions for a given user,
         within the given stream ordering range. Called by the emailpusher
 
         Args:
-            user_id (str): The user to fetch push actions for.
-            min_stream_ordering(int): The exclusive lower bound on the
+            user_id: The user to fetch push actions for.
+            min_stream_ordering: The exclusive lower bound on the
                 stream ordering of event push actions to fetch.
-            max_stream_ordering(int): The inclusive upper bound on the
+            max_stream_ordering: The inclusive upper bound on the
                 stream ordering of event push actions to fetch.
-            limit (int): The maximum number of rows to return.
+            limit: The maximum number of rows to return.
         Returns:
-            A promise which resolves to a list of dicts with the keys "event_id",
-            "room_id", "stream_ordering", "actions", "received_ts".
+            A list of dicts with the keys "event_id", "room_id", "stream_ordering", "actions", "received_ts".
             The list will be ordered by descending received_ts.
             The list will have between 0~limit entries.
         """
@@ -328,7 +332,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, args)
             return txn.fetchall()
 
-        after_read_receipt = yield self.db.runInteraction(
+        after_read_receipt = await self.db.runInteraction(
             "get_unread_push_actions_for_user_in_range_email_arr", get_after_receipt
         )
 
@@ -356,7 +360,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, args)
             return txn.fetchall()
 
-        no_read_receipt = yield self.db.runInteraction(
+        no_read_receipt = await self.db.runInteraction(
             "get_unread_push_actions_for_user_in_range_email_nrr", get_no_receipt
         )
 
@@ -411,7 +415,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             _get_if_maybe_push_in_range_for_user_txn,
         )
 
-    def add_push_actions_to_staging(self, event_id, user_id_actions):
+    async def add_push_actions_to_staging(self, event_id, user_id_actions):
         """Add the push actions for the event to the push action staging area.
 
         Args:
@@ -457,21 +461,17 @@ class EventPushActionsWorkerStore(SQLBaseStore):
                 ),
             )
 
-        return self.db.runInteraction(
+        return await self.db.runInteraction(
             "add_push_actions_to_staging", _add_push_actions_to_staging_txn
         )
 
-    @defer.inlineCallbacks
-    def remove_push_actions_from_staging(self, event_id):
+    async def remove_push_actions_from_staging(self, event_id: str) -> None:
         """Called if we failed to persist the event to ensure that stale push
         actions don't build up in the DB
-
-        Args:
-            event_id (str)
         """
 
         try:
-            res = yield self.db.simple_delete(
+            res = await self.db.simple_delete(
                 table="event_push_actions_staging",
                 keyvalues={"event_id": event_id},
                 desc="remove_push_actions_from_staging",
@@ -606,8 +606,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
 
         return range_end
 
-    @defer.inlineCallbacks
-    def get_time_of_last_push_action_before(self, stream_ordering):
+    async def get_time_of_last_push_action_before(self, stream_ordering):
         def f(txn):
             sql = (
                 "SELECT e.received_ts"
@@ -620,7 +619,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, (stream_ordering,))
             return txn.fetchone()
 
-        result = yield self.db.runInteraction("get_time_of_last_push_action_before", f)
+        result = await self.db.runInteraction("get_time_of_last_push_action_before", f)
         return result[0] if result else None
 
 
@@ -650,8 +649,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
             self._start_rotate_notifs, 30 * 60 * 1000
         )
 
-    @defer.inlineCallbacks
-    def get_push_actions_for_user(
+    async def get_push_actions_for_user(
         self, user_id, before=None, limit=50, only_highlight=False
     ):
         def f(txn):
@@ -682,18 +680,17 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
             txn.execute(sql, args)
             return self.db.cursor_to_dict(txn)
 
-        push_actions = yield self.db.runInteraction("get_push_actions_for_user", f)
+        push_actions = await self.db.runInteraction("get_push_actions_for_user", f)
         for pa in push_actions:
             pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"])
         return push_actions
 
-    @defer.inlineCallbacks
-    def get_latest_push_action_stream_ordering(self):
+    async def get_latest_push_action_stream_ordering(self):
         def f(txn):
             txn.execute("SELECT MAX(stream_ordering) FROM event_push_actions")
             return txn.fetchone()
 
-        result = yield self.db.runInteraction(
+        result = await self.db.runInteraction(
             "get_latest_push_action_stream_ordering", f
         )
         return result[0] or 0
@@ -747,8 +744,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
     def _start_rotate_notifs(self):
         return run_as_background_process("rotate_notifs", self._rotate_notifs)
 
-    @defer.inlineCallbacks
-    def _rotate_notifs(self):
+    async def _rotate_notifs(self):
         if self._doing_notif_rotation or self.stream_ordering_day_ago is None:
             return
         self._doing_notif_rotation = True
@@ -757,12 +753,12 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
             while True:
                 logger.info("Rotating notifications")
 
-                caught_up = yield self.db.runInteraction(
+                caught_up = await self.db.runInteraction(
                     "_rotate_notifs", self._rotate_notifs_txn
                 )
                 if caught_up:
                     break
-                yield self.hs.get_clock().sleep(self._rotate_delay)
+                await self.hs.get_clock().sleep(self._rotate_delay)
         finally:
             self._doing_notif_rotation = False
 
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 6f2e0d15cc..0c9c02afa1 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -53,6 +53,47 @@ event_counter = Counter(
     ["type", "origin_type", "origin_entity"],
 )
 
+STATE_EVENT_TYPES_TO_MARK_UNREAD = {
+    EventTypes.Topic,
+    EventTypes.Name,
+    EventTypes.RoomAvatar,
+    EventTypes.Tombstone,
+}
+
+
+def should_count_as_unread(event: EventBase, context: EventContext) -> bool:
+    # Exclude rejected and soft-failed events.
+    if context.rejected or event.internal_metadata.is_soft_failed():
+        return False
+
+    # Exclude notices.
+    if (
+        not event.is_state()
+        and event.type == EventTypes.Message
+        and event.content.get("msgtype") == "m.notice"
+    ):
+        return False
+
+    # Exclude edits.
+    relates_to = event.content.get("m.relates_to", {})
+    if relates_to.get("rel_type") == RelationTypes.REPLACE:
+        return False
+
+    # Mark events that have a non-empty string body as unread.
+    body = event.content.get("body")
+    if isinstance(body, str) and body:
+        return True
+
+    # Mark some state events as unread.
+    if event.is_state() and event.type in STATE_EVENT_TYPES_TO_MARK_UNREAD:
+        return True
+
+    # Mark encrypted events as unread.
+    if not event.is_state() and event.type == EventTypes.Encrypted:
+        return True
+
+    return False
+
 
 def encode_json(json_object):
     """
@@ -196,6 +237,10 @@ class PersistEventsStore:
 
                 event_counter.labels(event.type, origin_type, origin_entity).inc()
 
+                self.store.get_unread_message_count_for_user.invalidate_many(
+                    (event.room_id,),
+                )
+
             for room_id, new_state in current_state_for_room.items():
                 self.store.get_current_state_ids.prefill((room_id,), new_state)
 
@@ -817,8 +862,9 @@ class PersistEventsStore:
                     "contains_url": (
                         "url" in event.content and isinstance(event.content["url"], str)
                     ),
+                    "count_as_unread": should_count_as_unread(event, context),
                 }
-                for event, _ in events_and_contexts
+                for event, context in events_and_contexts
             ],
         )
 
diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py
index e812c67078..b03b259636 100644
--- a/synapse/storage/data_stores/main/events_worker.py
+++ b/synapse/storage/data_stores/main/events_worker.py
@@ -41,9 +41,15 @@ from synapse.replication.tcp.streams import BackfillStream
 from synapse.replication.tcp.streams.events import EventsStream
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
 from synapse.storage.database import Database
+from synapse.storage.types import Cursor
 from synapse.storage.util.id_generators import StreamIdGenerator
 from synapse.types import get_domain_from_id
-from synapse.util.caches.descriptors import Cache, cached, cachedInlineCallbacks
+from synapse.util.caches.descriptors import (
+    Cache,
+    _CacheContext,
+    cached,
+    cachedInlineCallbacks,
+)
 from synapse.util.iterutils import batch_iter
 from synapse.util.metrics import Measure
 
@@ -1358,6 +1364,84 @@ class EventsWorkerStore(SQLBaseStore):
             desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
         )
 
+    @cached(tree=True, cache_context=True)
+    async def get_unread_message_count_for_user(
+        self, room_id: str, user_id: str, cache_context: _CacheContext,
+    ) -> int:
+        """Retrieve the count of unread messages for the given room and user.
+
+        Args:
+            room_id: The ID of the room to count unread messages in.
+            user_id: The ID of the user to count unread messages for.
+
+        Returns:
+            The number of unread messages for the given user in the given room.
+        """
+        with Measure(self._clock, "get_unread_message_count_for_user"):
+            last_read_event_id = await self.get_last_receipt_event_id_for_user(
+                user_id=user_id,
+                room_id=room_id,
+                receipt_type="m.read",
+                on_invalidate=cache_context.invalidate,
+            )
+
+            return await self.db.runInteraction(
+                "get_unread_message_count_for_user",
+                self._get_unread_message_count_for_user_txn,
+                user_id,
+                room_id,
+                last_read_event_id,
+            )
+
+    def _get_unread_message_count_for_user_txn(
+        self,
+        txn: Cursor,
+        user_id: str,
+        room_id: str,
+        last_read_event_id: Optional[str],
+    ) -> int:
+        if last_read_event_id:
+            # Get the stream ordering for the last read event.
+            stream_ordering = self.db.simple_select_one_onecol_txn(
+                txn=txn,
+                table="events",
+                keyvalues={"room_id": room_id, "event_id": last_read_event_id},
+                retcol="stream_ordering",
+            )
+        else:
+            # If there's no read receipt for that room, it probably means the user hasn't
+            # opened it yet, in which case use the stream ID of their join event.
+            # We can't just set it to 0 otherwise messages from other local users from
+            # before this user joined will be counted as well.
+            txn.execute(
+                """
+                SELECT stream_ordering FROM local_current_membership
+                LEFT JOIN events USING (event_id, room_id)
+                WHERE membership = 'join'
+                    AND user_id = ?
+                    AND room_id = ?
+                """,
+                (user_id, room_id),
+            )
+            row = txn.fetchone()
+
+            if row is None:
+                return 0
+
+            stream_ordering = row[0]
+
+        # Count the messages that qualify as unread after the stream ordering we've just
+        # retrieved.
+        sql = """
+            SELECT COUNT(*) FROM events
+            WHERE sender != ? AND room_id = ? AND stream_ordering > ? AND count_as_unread
+        """
+
+        txn.execute(sql, (user_id, room_id, stream_ordering))
+        row = txn.fetchone()
+
+        return row[0] if row else 0
+
 
 AllNewEventsResult = namedtuple(
     "AllNewEventsResult",
diff --git a/synapse/storage/data_stores/main/purge_events.py b/synapse/storage/data_stores/main/purge_events.py
index 6546569139..b53fe35c33 100644
--- a/synapse/storage/data_stores/main/purge_events.py
+++ b/synapse/storage/data_stores/main/purge_events.py
@@ -62,6 +62,7 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
         #     event_json
         #     event_push_actions
         #     event_reference_hashes
+        #     event_relations
         #     event_search
         #     event_to_state_groups
         #     events
@@ -209,6 +210,7 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
             "event_edges",
             "event_forward_extremities",
             "event_reference_hashes",
+            "event_relations",
             "event_search",
             "rejections",
         ):
diff --git a/synapse/storage/data_stores/main/push_rule.py b/synapse/storage/data_stores/main/push_rule.py
index c10da245d2..861050814d 100644
--- a/synapse/storage/data_stores/main/push_rule.py
+++ b/synapse/storage/data_stores/main/push_rule.py
@@ -284,7 +284,7 @@ class PushRulesWorkerStore(
             # To do this we set the state_group to a new object as object() != object()
             state_group = object()
 
-        current_state_ids = yield context.get_current_state_ids()
+        current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
         result = yield self._bulk_get_push_rules_for_room(
             event.room_id, state_group, current_state_ids, event=event
         )
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index d2e1e36e7f..ab48052cdc 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -23,8 +23,6 @@ from typing import Any, Dict, List, Optional, Tuple
 
 from canonicaljson import json
 
-from twisted.internet import defer
-
 from synapse.api.constants import EventTypes
 from synapse.api.errors import StoreError
 from synapse.api.room_versions import RoomVersion, RoomVersions
@@ -32,7 +30,7 @@ from synapse.storage._base import SQLBaseStore, db_to_json
 from synapse.storage.data_stores.main.search import SearchStore
 from synapse.storage.database import Database, LoggingTransaction
 from synapse.types import ThirdPartyInstanceID
-from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
+from synapse.util.caches.descriptors import cached
 
 logger = logging.getLogger(__name__)
 
@@ -192,8 +190,7 @@ class RoomWorkerStore(SQLBaseStore):
 
         return self.db.runInteraction("count_public_rooms", _count_public_rooms_txn)
 
-    @defer.inlineCallbacks
-    def get_largest_public_rooms(
+    async def get_largest_public_rooms(
         self,
         network_tuple: Optional[ThirdPartyInstanceID],
         search_filter: Optional[dict],
@@ -330,10 +327,10 @@ class RoomWorkerStore(SQLBaseStore):
 
             return results
 
-        ret_val = yield self.db.runInteraction(
+        ret_val = await self.db.runInteraction(
             "get_largest_public_rooms", _get_largest_public_rooms_txn
         )
-        defer.returnValue(ret_val)
+        return ret_val
 
     @cached(max_entries=10000)
     def is_room_blocked(self, room_id):
@@ -509,8 +506,8 @@ class RoomWorkerStore(SQLBaseStore):
             "get_rooms_paginate", _get_rooms_paginate_txn,
         )
 
-    @cachedInlineCallbacks(max_entries=10000)
-    def get_ratelimit_for_user(self, user_id):
+    @cached(max_entries=10000)
+    async def get_ratelimit_for_user(self, user_id):
         """Check if there are any overrides for ratelimiting for the given
         user
 
@@ -522,7 +519,7 @@ class RoomWorkerStore(SQLBaseStore):
             of RatelimitOverride are None or 0 then ratelimitng has been
             disabled for that user entirely.
         """
-        row = yield self.db.simple_select_one(
+        row = await self.db.simple_select_one(
             table="ratelimit_override",
             keyvalues={"user_id": user_id},
             retcols=("messages_per_second", "burst_count"),
@@ -538,8 +535,8 @@ class RoomWorkerStore(SQLBaseStore):
         else:
             return None
 
-    @cachedInlineCallbacks()
-    def get_retention_policy_for_room(self, room_id):
+    @cached()
+    async def get_retention_policy_for_room(self, room_id):
         """Get the retention policy for a given room.
 
         If no retention policy has been found for this room, returns a policy defined
@@ -566,19 +563,17 @@ class RoomWorkerStore(SQLBaseStore):
 
             return self.db.cursor_to_dict(txn)
 
-        ret = yield self.db.runInteraction(
+        ret = await self.db.runInteraction(
             "get_retention_policy_for_room", get_retention_policy_for_room_txn,
         )
 
         # If we don't know this room ID, ret will be None, in this case return the default
         # policy.
         if not ret:
-            defer.returnValue(
-                {
-                    "min_lifetime": self.config.retention_default_min_lifetime,
-                    "max_lifetime": self.config.retention_default_max_lifetime,
-                }
-            )
+            return {
+                "min_lifetime": self.config.retention_default_min_lifetime,
+                "max_lifetime": self.config.retention_default_max_lifetime,
+            }
 
         row = ret[0]
 
@@ -592,7 +587,7 @@ class RoomWorkerStore(SQLBaseStore):
         if row["max_lifetime"] is None:
             row["max_lifetime"] = self.config.retention_default_max_lifetime
 
-        defer.returnValue(row)
+        return row
 
     def get_media_mxcs_in_room(self, room_id):
         """Retrieves all the local and remote media MXC URIs in a given room
@@ -881,8 +876,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
             self._background_add_rooms_room_version_column,
         )
 
-    @defer.inlineCallbacks
-    def _background_insert_retention(self, progress, batch_size):
+    async def _background_insert_retention(self, progress, batch_size):
         """Retrieves a list of all rooms within a range and inserts an entry for each of
         them into the room_retention table.
         NULLs the property's columns if missing from the retention event in the room's
@@ -940,14 +934,14 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
             else:
                 return False
 
-        end = yield self.db.runInteraction(
+        end = await self.db.runInteraction(
             "insert_room_retention", _background_insert_retention_txn,
         )
 
         if end:
-            yield self.db.updates._end_background_update("insert_room_retention")
+            await self.db.updates._end_background_update("insert_room_retention")
 
-        defer.returnValue(batch_size)
+        return batch_size
 
     async def _background_add_rooms_room_version_column(
         self, progress: dict, batch_size: int
@@ -1096,8 +1090,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
             lock=False,
         )
 
-    @defer.inlineCallbacks
-    def store_room(
+    async def store_room(
         self,
         room_id: str,
         room_creator_user_id: str,
@@ -1140,7 +1133,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                     )
 
             with self._public_room_id_gen.get_next() as next_id:
-                yield self.db.runInteraction("store_room_txn", store_room_txn, next_id)
+                await self.db.runInteraction("store_room_txn", store_room_txn, next_id)
         except Exception as e:
             logger.error("store_room with room_id=%s failed: %s", room_id, e)
             raise StoreError(500, "Problem creating room.")
@@ -1165,8 +1158,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
             lock=False,
         )
 
-    @defer.inlineCallbacks
-    def set_room_is_public(self, room_id, is_public):
+    async def set_room_is_public(self, room_id, is_public):
         def set_room_is_public_txn(txn, next_id):
             self.db.simple_update_one_txn(
                 txn,
@@ -1206,13 +1198,12 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                 )
 
         with self._public_room_id_gen.get_next() as next_id:
-            yield self.db.runInteraction(
+            await self.db.runInteraction(
                 "set_room_is_public", set_room_is_public_txn, next_id
             )
         self.hs.get_notifier().on_new_replication_data()
 
-    @defer.inlineCallbacks
-    def set_room_is_public_appservice(
+    async def set_room_is_public_appservice(
         self, room_id, appservice_id, network_id, is_public
     ):
         """Edit the appservice/network specific public room list.
@@ -1287,7 +1278,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                 )
 
         with self._public_room_id_gen.get_next() as next_id:
-            yield self.db.runInteraction(
+            await self.db.runInteraction(
                 "set_room_is_public_appservice",
                 set_room_is_public_appservice_txn,
                 next_id,
@@ -1327,52 +1318,47 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
     def get_current_public_room_stream_id(self):
         return self._public_room_id_gen.get_current_token()
 
-    @defer.inlineCallbacks
-    def block_room(self, room_id, user_id):
+    async def block_room(self, room_id: str, user_id: str) -> None:
         """Marks the room as blocked. Can be called multiple times.
 
         Args:
-            room_id (str): Room to block
-            user_id (str): Who blocked it
-
-        Returns:
-            Deferred
+            room_id: Room to block
+            user_id: Who blocked it
         """
-        yield self.db.simple_upsert(
+        await self.db.simple_upsert(
             table="blocked_rooms",
             keyvalues={"room_id": room_id},
             values={},
             insertion_values={"user_id": user_id},
             desc="block_room",
         )
-        yield self.db.runInteraction(
+        await self.db.runInteraction(
             "block_room_invalidation",
             self._invalidate_cache_and_stream,
             self.is_room_blocked,
             (room_id,),
         )
 
-    @defer.inlineCallbacks
-    def get_rooms_for_retention_period_in_range(
-        self, min_ms, max_ms, include_null=False
-    ):
+    async def get_rooms_for_retention_period_in_range(
+        self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False
+    ) -> Dict[str, dict]:
         """Retrieves all of the rooms within the given retention range.
 
         Optionally includes the rooms which don't have a retention policy.
 
         Args:
-            min_ms (int|None): Duration in milliseconds that define the lower limit of
+            min_ms: Duration in milliseconds that define the lower limit of
                 the range to handle (exclusive). If None, doesn't set a lower limit.
-            max_ms (int|None): Duration in milliseconds that define the upper limit of
+            max_ms: Duration in milliseconds that define the upper limit of
                 the range to handle (inclusive). If None, doesn't set an upper limit.
-            include_null (bool): Whether to include rooms which retention policy is NULL
+            include_null: Whether to include rooms which retention policy is NULL
                 in the returned set.
 
         Returns:
-            dict[str, dict]: The rooms within this range, along with their retention
-                policy. The key is "room_id", and maps to a dict describing the retention
-                policy associated with this room ID. The keys for this nested dict are
-                "min_lifetime" (int|None), and "max_lifetime" (int|None).
+            The rooms within this range, along with their retention
+            policy. The key is "room_id", and maps to a dict describing the retention
+            policy associated with this room ID. The keys for this nested dict are
+            "min_lifetime" (int|None), and "max_lifetime" (int|None).
         """
 
         def get_rooms_for_retention_period_in_range_txn(txn):
@@ -1431,9 +1417,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
 
             return rooms_dict
 
-        rooms = yield self.db.runInteraction(
+        rooms = await self.db.runInteraction(
             "get_rooms_for_retention_period_in_range",
             get_rooms_for_retention_period_in_range_txn,
         )
 
-        defer.returnValue(rooms)
+        return rooms
diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py
index 29765890ee..a92e401e88 100644
--- a/synapse/storage/data_stores/main/roommember.py
+++ b/synapse/storage/data_stores/main/roommember.py
@@ -497,7 +497,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             # To do this we set the state_group to a new object as object() != object()
             state_group = object()
 
-        current_state_ids = yield context.get_current_state_ids()
+        current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
         result = yield self._get_joined_users_from_context(
             event.room_id, state_group, current_state_ids, event=event, context=context
         )
diff --git a/synapse/storage/data_stores/main/schema/delta/58/12unread_messages.sql b/synapse/storage/data_stores/main/schema/delta/58/12unread_messages.sql
new file mode 100644
index 0000000000..531b532c73
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/58/12unread_messages.sql
@@ -0,0 +1,18 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Store a boolean value in the events table for whether the event should be counted in
+-- the unread_count property of sync responses.
+ALTER TABLE events ADD COLUMN count_as_unread BOOLEAN;
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index bb38a04ede..a360699408 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -16,12 +16,12 @@
 import collections.abc
 import logging
 from collections import namedtuple
-
-from twisted.internet import defer
+from typing import Iterable, Optional, Set
 
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
+from synapse.events import EventBase
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
 from synapse.storage.data_stores.main.roommember import RoomMemberWorkerStore
@@ -108,28 +108,27 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         create_event = await self.get_create_event_for_room(room_id)
         return create_event.content.get("room_version", "1")
 
-    @defer.inlineCallbacks
-    def get_room_predecessor(self, room_id):
+    async def get_room_predecessor(self, room_id: str) -> Optional[dict]:
         """Get the predecessor of an upgraded room if it exists.
         Otherwise return None.
 
         Args:
-            room_id (str)
+            room_id: The room ID.
 
         Returns:
-            Deferred[dict|None]: A dictionary containing the structure of the predecessor
-                field from the room's create event. The structure is subject to other servers,
-                but it is expected to be:
-                    * room_id (str): The room ID of the predecessor room
-                    * event_id (str): The ID of the tombstone event in the predecessor room
+            A dictionary containing the structure of the predecessor
+            field from the room's create event. The structure is subject to other servers,
+            but it is expected to be:
+                * room_id (str): The room ID of the predecessor room
+                * event_id (str): The ID of the tombstone event in the predecessor room
 
-                None if a predecessor key is not found, or is not a dictionary.
+            None if a predecessor key is not found, or is not a dictionary.
 
         Raises:
             NotFoundError if the given room is unknown
         """
         # Retrieve the room's create event
-        create_event = yield self.get_create_event_for_room(room_id)
+        create_event = await self.get_create_event_for_room(room_id)
 
         # Retrieve the predecessor key of the create event
         predecessor = create_event.content.get("predecessor", None)
@@ -140,20 +139,19 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         return predecessor
 
-    @defer.inlineCallbacks
-    def get_create_event_for_room(self, room_id):
+    async def get_create_event_for_room(self, room_id: str) -> EventBase:
         """Get the create state event for a room.
 
         Args:
-            room_id (str)
+            room_id: The room ID.
 
         Returns:
-            Deferred[EventBase]: The room creation event.
+            The room creation event.
 
         Raises:
             NotFoundError if the room is unknown
         """
-        state_ids = yield self.get_current_state_ids(room_id)
+        state_ids = await self.get_current_state_ids(room_id)
         create_id = state_ids.get((EventTypes.Create, ""))
 
         # If we can't find the create event, assume we've hit a dead end
@@ -161,7 +159,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             raise NotFoundError("Unknown room %s" % (room_id,))
 
         # Retrieve the room's create event and return
-        create_event = yield self.get_event(create_id)
+        create_event = await self.get_event(create_id)
         return create_event
 
     @cached(max_entries=100000, iterable=True)
@@ -237,18 +235,17 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             "get_filtered_current_state_ids", _get_filtered_current_state_ids_txn
         )
 
-    @defer.inlineCallbacks
-    def get_canonical_alias_for_room(self, room_id):
+    async def get_canonical_alias_for_room(self, room_id: str) -> Optional[str]:
         """Get canonical alias for room, if any
 
         Args:
-            room_id (str)
+            room_id: The room ID
 
         Returns:
-            Deferred[str|None]: The canonical alias, if any
+            The canonical alias, if any
         """
 
-        state = yield self.get_filtered_current_state_ids(
+        state = await self.get_filtered_current_state_ids(
             room_id, StateFilter.from_types([(EventTypes.CanonicalAlias, "")])
         )
 
@@ -256,7 +253,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         if not event_id:
             return
 
-        event = yield self.get_event(event_id, allow_none=True)
+        event = await self.get_event(event_id, allow_none=True)
         if not event:
             return
 
@@ -292,19 +289,19 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         return {row["event_id"]: row["state_group"] for row in rows}
 
-    @defer.inlineCallbacks
-    def get_referenced_state_groups(self, state_groups):
+    async def get_referenced_state_groups(
+        self, state_groups: Iterable[int]
+    ) -> Set[int]:
         """Check if the state groups are referenced by events.
 
         Args:
-            state_groups (Iterable[int])
+            state_groups
 
         Returns:
-            Deferred[set[int]]: The subset of state groups that are
-            referenced.
+            The subset of state groups that are referenced.
         """
 
-        rows = yield self.db.simple_select_many_batch(
+        rows = await self.db.simple_select_many_batch(
             table="event_to_state_groups",
             column="state_group",
             iterable=state_groups,
diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py
index 380c1ec7da..922400a7c3 100644
--- a/synapse/storage/data_stores/main/stats.py
+++ b/synapse/storage/data_stores/main/stats.py
@@ -16,8 +16,8 @@
 
 import logging
 from itertools import chain
+from typing import Tuple
 
-from twisted.internet import defer
 from twisted.internet.defer import DeferredLock
 
 from synapse.api.constants import EventTypes, Membership
@@ -97,13 +97,12 @@ class StatsStore(StateDeltasStore):
         """
         return (ts // self.stats_bucket_size) * self.stats_bucket_size
 
-    @defer.inlineCallbacks
-    def _populate_stats_process_users(self, progress, batch_size):
+    async def _populate_stats_process_users(self, progress, batch_size):
         """
         This is a background update which regenerates statistics for users.
         """
         if not self.stats_enabled:
-            yield self.db.updates._end_background_update("populate_stats_process_users")
+            await self.db.updates._end_background_update("populate_stats_process_users")
             return 1
 
         last_user_id = progress.get("last_user_id", "")
@@ -118,20 +117,20 @@ class StatsStore(StateDeltasStore):
             txn.execute(sql, (last_user_id, batch_size))
             return [r for r, in txn]
 
-        users_to_work_on = yield self.db.runInteraction(
+        users_to_work_on = await self.db.runInteraction(
             "_populate_stats_process_users", _get_next_batch
         )
 
         # No more rooms -- complete the transaction.
         if not users_to_work_on:
-            yield self.db.updates._end_background_update("populate_stats_process_users")
+            await self.db.updates._end_background_update("populate_stats_process_users")
             return 1
 
         for user_id in users_to_work_on:
-            yield self._calculate_and_set_initial_state_for_user(user_id)
+            await self._calculate_and_set_initial_state_for_user(user_id)
             progress["last_user_id"] = user_id
 
-        yield self.db.runInteraction(
+        await self.db.runInteraction(
             "populate_stats_process_users",
             self.db.updates._background_update_progress_txn,
             "populate_stats_process_users",
@@ -140,13 +139,12 @@ class StatsStore(StateDeltasStore):
 
         return len(users_to_work_on)
 
-    @defer.inlineCallbacks
-    def _populate_stats_process_rooms(self, progress, batch_size):
+    async def _populate_stats_process_rooms(self, progress, batch_size):
         """
         This is a background update which regenerates statistics for rooms.
         """
         if not self.stats_enabled:
-            yield self.db.updates._end_background_update("populate_stats_process_rooms")
+            await self.db.updates._end_background_update("populate_stats_process_rooms")
             return 1
 
         last_room_id = progress.get("last_room_id", "")
@@ -161,20 +159,20 @@ class StatsStore(StateDeltasStore):
             txn.execute(sql, (last_room_id, batch_size))
             return [r for r, in txn]
 
-        rooms_to_work_on = yield self.db.runInteraction(
+        rooms_to_work_on = await self.db.runInteraction(
             "populate_stats_rooms_get_batch", _get_next_batch
         )
 
         # No more rooms -- complete the transaction.
         if not rooms_to_work_on:
-            yield self.db.updates._end_background_update("populate_stats_process_rooms")
+            await self.db.updates._end_background_update("populate_stats_process_rooms")
             return 1
 
         for room_id in rooms_to_work_on:
-            yield self._calculate_and_set_initial_state_for_room(room_id)
+            await self._calculate_and_set_initial_state_for_room(room_id)
             progress["last_room_id"] = room_id
 
-        yield self.db.runInteraction(
+        await self.db.runInteraction(
             "_populate_stats_process_rooms",
             self.db.updates._background_update_progress_txn,
             "populate_stats_process_rooms",
@@ -696,16 +694,16 @@ class StatsStore(StateDeltasStore):
 
         return room_deltas, user_deltas
 
-    @defer.inlineCallbacks
-    def _calculate_and_set_initial_state_for_room(self, room_id):
+    async def _calculate_and_set_initial_state_for_room(
+        self, room_id: str
+    ) -> Tuple[dict, dict, int]:
         """Calculate and insert an entry into room_stats_current.
 
         Args:
-            room_id (str)
+            room_id: The room ID under calculation.
 
         Returns:
-            Deferred[tuple[dict, dict, int]]: A tuple of room state, membership
-            counts and stream position.
+            A tuple of room state, membership counts and stream position.
         """
 
         def _fetch_current_state_stats(txn):
@@ -767,11 +765,11 @@ class StatsStore(StateDeltasStore):
             current_state_events_count,
             users_in_room,
             pos,
-        ) = yield self.db.runInteraction(
+        ) = await self.db.runInteraction(
             "get_initial_state_for_room", _fetch_current_state_stats
         )
 
-        state_event_map = yield self.get_events(event_ids, get_prev_content=False)
+        state_event_map = await self.get_events(event_ids, get_prev_content=False)
 
         room_state = {
             "join_rules": None,
@@ -806,11 +804,11 @@ class StatsStore(StateDeltasStore):
                     event.content.get("m.federate", True) is True
                 )
 
-        yield self.update_room_state(room_id, room_state)
+        await self.update_room_state(room_id, room_state)
 
         local_users_in_room = [u for u in users_in_room if self.hs.is_mine_id(u)]
 
-        yield self.update_stats_delta(
+        await self.update_stats_delta(
             ts=self.clock.time_msec(),
             stats_type="room",
             stats_id=room_id,
@@ -826,8 +824,7 @@ class StatsStore(StateDeltasStore):
             },
         )
 
-    @defer.inlineCallbacks
-    def _calculate_and_set_initial_state_for_user(self, user_id):
+    async def _calculate_and_set_initial_state_for_user(self, user_id):
         def _calculate_and_set_initial_state_for_user_txn(txn):
             pos = self._get_max_stream_id_in_current_state_deltas_txn(txn)
 
@@ -842,12 +839,12 @@ class StatsStore(StateDeltasStore):
             (count,) = txn.fetchone()
             return count, pos
 
-        joined_rooms, pos = yield self.db.runInteraction(
+        joined_rooms, pos = await self.db.runInteraction(
             "calculate_and_set_initial_state_for_user",
             _calculate_and_set_initial_state_for_user_txn,
         )
 
-        yield self.update_stats_delta(
+        await self.update_stats_delta(
             ts=self.clock.time_msec(),
             stats_type="user",
             stats_id=user_id,
diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py
index 5e32c7aa1e..10d39b3699 100644
--- a/synapse/storage/data_stores/main/stream.py
+++ b/synapse/storage/data_stores/main/stream.py
@@ -255,7 +255,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         self._instance_name = hs.get_instance_name()
         self._send_federation = hs.should_send_federation()
-        self._federation_shard_config = hs.config.federation.federation_shard_config
+        self._federation_shard_config = hs.config.worker.federation_shard_config
 
         # If we're a process that sends federation we may need to reset the
         # `federation_stream_position` table to match the current sharding
diff --git a/synapse/storage/data_stores/main/user_directory.py b/synapse/storage/data_stores/main/user_directory.py
index 6b8130bf0f..942e51fd3a 100644
--- a/synapse/storage/data_stores/main/user_directory.py
+++ b/synapse/storage/data_stores/main/user_directory.py
@@ -198,7 +198,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
                     room_id
                 )
 
-                users_with_profile = yield state.get_current_users_in_room(room_id)
+                users_with_profile = yield defer.ensureDeferred(
+                    state.get_current_users_in_room(room_id)
+                )
                 user_ids = set(users_with_profile)
 
                 # Update each user in the user directory.
diff --git a/synapse/storage/data_stores/main/user_erasure_store.py b/synapse/storage/data_stores/main/user_erasure_store.py
index ec6b8a4ffd..d3038ff06d 100644
--- a/synapse/storage/data_stores/main/user_erasure_store.py
+++ b/synapse/storage/data_stores/main/user_erasure_store.py
@@ -70,11 +70,11 @@ class UserErasureWorkerStore(SQLBaseStore):
 
 
 class UserErasureStore(UserErasureWorkerStore):
-    def mark_user_erased(self, user_id):
+    def mark_user_erased(self, user_id: str) -> None:
         """Indicate that user_id wishes their message history to be erased.
 
         Args:
-            user_id (str): full user_id to be erased
+            user_id: full user_id to be erased
         """
 
         def f(txn):
@@ -89,3 +89,25 @@ class UserErasureStore(UserErasureWorkerStore):
             self._invalidate_cache_and_stream(txn, self.is_user_erased, (user_id,))
 
         return self.db.runInteraction("mark_user_erased", f)
+
+    def mark_user_not_erased(self, user_id: str) -> None:
+        """Indicate that user_id is no longer erased.
+
+        Args:
+            user_id: full user_id to be un-erased
+        """
+
+        def f(txn):
+            # first check if they are already in the list
+            txn.execute("SELECT 1 FROM erased_users WHERE user_id = ?", (user_id,))
+            if not txn.fetchone():
+                return
+
+            # They are there, delete them.
+            self.simple_delete_one_txn(
+                txn, "erased_users", keyvalues={"user_id": user_id}
+            )
+
+            self._invalidate_cache_and_stream(txn, self.is_user_erased, (user_id,))
+
+        return self.db.runInteraction("mark_user_not_erased", f)
diff --git a/synapse/storage/data_stores/state/store.py b/synapse/storage/data_stores/state/store.py
index 128c09a2cf..7dada7f75f 100644
--- a/synapse/storage/data_stores/state/store.py
+++ b/synapse/storage/data_stores/state/store.py
@@ -139,10 +139,9 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             "get_state_group_delta", _get_state_group_delta_txn
         )
 
-    @defer.inlineCallbacks
-    def _get_state_groups_from_groups(
+    async def _get_state_groups_from_groups(
         self, groups: List[int], state_filter: StateFilter
-    ):
+    ) -> Dict[int, StateMap[str]]:
         """Returns the state groups for a given set of groups from the
         database, filtering on types of state events.
 
@@ -151,13 +150,13 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             state_filter: The state filter used to fetch state
                 from the database.
         Returns:
-            Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
+            Dict of state group to state map.
         """
         results = {}
 
         chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)]
         for chunk in chunks:
-            res = yield self.db.runInteraction(
+            res = await self.db.runInteraction(
                 "_get_state_groups_from_groups",
                 self._get_state_groups_from_groups_txn,
                 chunk,
@@ -206,10 +205,9 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
 
         return state_filter.filter_state(state_dict_ids), not missing_types
 
-    @defer.inlineCallbacks
-    def _get_state_for_groups(
+    async def _get_state_for_groups(
         self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all()
-    ):
+    ) -> Dict[int, StateMap[str]]:
         """Gets the state at each of a list of state groups, optionally
         filtering by type/state_key
 
@@ -219,7 +217,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             state_filter: The state filter used to fetch state
                 from the database.
         Returns:
-            Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
+            Dict of state group to state map.
         """
 
         member_filter, non_member_filter = state_filter.get_member_split()
@@ -228,14 +226,11 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
         (
             non_member_state,
             incomplete_groups_nm,
-        ) = yield self._get_state_for_groups_using_cache(
+        ) = self._get_state_for_groups_using_cache(
             groups, self._state_group_cache, state_filter=non_member_filter
         )
 
-        (
-            member_state,
-            incomplete_groups_m,
-        ) = yield self._get_state_for_groups_using_cache(
+        (member_state, incomplete_groups_m,) = self._get_state_for_groups_using_cache(
             groups, self._state_group_members_cache, state_filter=member_filter
         )
 
@@ -256,7 +251,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
         # Help the cache hit ratio by expanding the filter a bit
         db_state_filter = state_filter.return_expanded()
 
-        group_to_state_dict = yield self._get_state_groups_from_groups(
+        group_to_state_dict = await self._get_state_groups_from_groups(
             list(incomplete_groups), state_filter=db_state_filter
         )
 
@@ -576,19 +571,19 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             ((sg,) for sg in state_groups_to_delete),
         )
 
-    @defer.inlineCallbacks
-    def get_previous_state_groups(self, state_groups):
+    async def get_previous_state_groups(
+        self, state_groups: Iterable[int]
+    ) -> Dict[int, int]:
         """Fetch the previous groups of the given state groups.
 
         Args:
-            state_groups (Iterable[int])
+            state_groups
 
         Returns:
-            Deferred[dict[int, int]]: mapping from state group to previous
-            state group.
+            A mapping from state group to previous state group.
         """
 
-        rows = yield self.db.simple_select_many_batch(
+        rows = await self.db.simple_select_many_batch(
             table="state_group_edges",
             column="prev_state_group",
             iterable=state_groups,
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 3be20c866a..ce8757a400 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -49,11 +49,11 @@ from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3E
 from synapse.storage.types import Connection, Cursor
 from synapse.types import Collection
 
-logger = logging.getLogger(__name__)
-
 # python 3 does not have a maximum int value
 MAX_TXN_ID = 2 ** 63 - 1
 
+logger = logging.getLogger(__name__)
+
 sql_logger = logging.getLogger("synapse.storage.SQL")
 transaction_logger = logging.getLogger("synapse.storage.txn")
 perf_logger = logging.getLogger("synapse.storage.TIME")
@@ -233,7 +233,7 @@ class LoggingTransaction:
         try:
             return func(sql, *args)
         except Exception as e:
-            logger.debug("[SQL FAIL] {%s} %s", self.name, e)
+            sql_logger.debug("[SQL FAIL] {%s} %s", self.name, e)
             raise
         finally:
             secs = time.time() - start
@@ -419,7 +419,7 @@ class Database(object):
                 except self.engine.module.OperationalError as e:
                     # This can happen if the database disappears mid
                     # transaction.
-                    logger.warning(
+                    transaction_logger.warning(
                         "[TXN OPERROR] {%s} %s %d/%d", name, e, i, N,
                     )
                     if i < N:
@@ -427,18 +427,20 @@ class Database(object):
                         try:
                             conn.rollback()
                         except self.engine.module.Error as e1:
-                            logger.warning("[TXN EROLL] {%s} %s", name, e1)
+                            transaction_logger.warning("[TXN EROLL] {%s} %s", name, e1)
                         continue
                     raise
                 except self.engine.module.DatabaseError as e:
                     if self.engine.is_deadlock(e):
-                        logger.warning("[TXN DEADLOCK] {%s} %d/%d", name, i, N)
+                        transaction_logger.warning(
+                            "[TXN DEADLOCK] {%s} %d/%d", name, i, N
+                        )
                         if i < N:
                             i += 1
                             try:
                                 conn.rollback()
                             except self.engine.module.Error as e1:
-                                logger.warning(
+                                transaction_logger.warning(
                                     "[TXN EROLL] {%s} %s", name, e1,
                                 )
                             continue
@@ -478,7 +480,7 @@ class Database(object):
                     # [2]: https://github.com/python/cpython/blob/v3.8.0/Modules/_sqlite/cursor.c#L236
                     cursor.close()
         except Exception as e:
-            logger.debug("[TXN FAIL] {%s} %s", name, e)
+            transaction_logger.debug("[TXN FAIL] {%s} %s", name, e)
             raise
         finally:
             end = monotonic_time()
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index fa46041676..4a164834d9 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -25,11 +25,10 @@ from prometheus_client import Counter, Histogram
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, Membership
-from synapse.events import FrozenEvent
+from synapse.events import EventBase
 from synapse.events.snapshot import EventContext
 from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
 from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.state import StateResolutionStore
 from synapse.storage.data_stores import DataStores
 from synapse.storage.data_stores.main.events import DeltaState
 from synapse.types import StateMap
@@ -193,12 +192,11 @@ class EventsPersistenceStorage(object):
         self._event_persist_queue = _EventPeristenceQueue()
         self._state_resolution_handler = hs.get_state_resolution_handler()
 
-    @defer.inlineCallbacks
-    def persist_events(
+    async def persist_events(
         self,
-        events_and_contexts: List[Tuple[FrozenEvent, EventContext]],
+        events_and_contexts: List[Tuple[EventBase, EventContext]],
         backfilled: bool = False,
-    ):
+    ) -> int:
         """
         Write events to the database
         Args:
@@ -208,7 +206,7 @@ class EventsPersistenceStorage(object):
                 which might update the current state etc.
 
         Returns:
-            Deferred[int]: the stream ordering of the latest persisted event
+            the stream ordering of the latest persisted event
         """
         partitioned = {}
         for event, ctx in events_and_contexts:
@@ -224,22 +222,19 @@ class EventsPersistenceStorage(object):
         for room_id in partitioned:
             self._maybe_start_persisting(room_id)
 
-        yield make_deferred_yieldable(
+        await make_deferred_yieldable(
             defer.gatherResults(deferreds, consumeErrors=True)
         )
 
-        max_persisted_id = yield self.main_store.get_current_events_token()
-
-        return max_persisted_id
+        return self.main_store.get_current_events_token()
 
-    @defer.inlineCallbacks
-    def persist_event(
-        self, event: FrozenEvent, context: EventContext, backfilled: bool = False
-    ):
+    async def persist_event(
+        self, event: EventBase, context: EventContext, backfilled: bool = False
+    ) -> Tuple[int, int]:
         """
         Returns:
-            Deferred[Tuple[int, int]]: the stream ordering of ``event``,
-            and the stream ordering of the latest persisted event
+            The stream ordering of `event`, and the stream ordering of the
+            latest persisted event
         """
         deferred = self._event_persist_queue.add_to_queue(
             event.room_id, [(event, context)], backfilled=backfilled
@@ -247,9 +242,9 @@ class EventsPersistenceStorage(object):
 
         self._maybe_start_persisting(event.room_id)
 
-        yield make_deferred_yieldable(deferred)
+        await make_deferred_yieldable(deferred)
 
-        max_persisted_id = yield self.main_store.get_current_events_token()
+        max_persisted_id = self.main_store.get_current_events_token()
         return (event.internal_metadata.stream_ordering, max_persisted_id)
 
     def _maybe_start_persisting(self, room_id: str):
@@ -263,7 +258,7 @@ class EventsPersistenceStorage(object):
 
     async def _persist_events(
         self,
-        events_and_contexts: List[Tuple[FrozenEvent, EventContext]],
+        events_and_contexts: List[Tuple[EventBase, EventContext]],
         backfilled: bool = False,
     ):
         """Calculates the change to current state and forward extremities, and
@@ -440,7 +435,7 @@ class EventsPersistenceStorage(object):
     async def _calculate_new_extremities(
         self,
         room_id: str,
-        event_contexts: List[Tuple[FrozenEvent, EventContext]],
+        event_contexts: List[Tuple[EventBase, EventContext]],
         latest_event_ids: List[str],
     ):
         """Calculates the new forward extremities for a room given events to
@@ -498,7 +493,7 @@ class EventsPersistenceStorage(object):
     async def _get_new_state_after_events(
         self,
         room_id: str,
-        events_context: List[Tuple[FrozenEvent, EventContext]],
+        events_context: List[Tuple[EventBase, EventContext]],
         old_latest_event_ids: Iterable[str],
         new_latest_event_ids: Iterable[str],
     ) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]]]:
@@ -648,6 +643,10 @@ class EventsPersistenceStorage(object):
             room_version = await self.main_store.get_room_version_id(room_id)
 
         logger.debug("calling resolve_state_groups from preserve_events")
+
+        # Avoid a circular import.
+        from synapse.state import StateResolutionStore
+
         res = await self._state_resolution_handler.resolve_state_groups(
             room_id,
             room_version,
@@ -680,7 +679,7 @@ class EventsPersistenceStorage(object):
     async def _is_server_still_joined(
         self,
         room_id: str,
-        ev_ctx_rm: List[Tuple[FrozenEvent, EventContext]],
+        ev_ctx_rm: List[Tuple[EventBase, EventContext]],
         delta: DeltaState,
         current_state: Optional[StateMap[str]],
         potentially_left_users: Set[str],
diff --git a/synapse/storage/purge_events.py b/synapse/storage/purge_events.py
index fdc0abf5cf..79d9f06e2e 100644
--- a/synapse/storage/purge_events.py
+++ b/synapse/storage/purge_events.py
@@ -15,8 +15,7 @@
 
 import itertools
 import logging
-
-from twisted.internet import defer
+from typing import Set
 
 logger = logging.getLogger(__name__)
 
@@ -28,49 +27,48 @@ class PurgeEventsStorage(object):
     def __init__(self, hs, stores):
         self.stores = stores
 
-    @defer.inlineCallbacks
-    def purge_room(self, room_id: str):
+    async def purge_room(self, room_id: str):
         """Deletes all record of a room
         """
 
-        state_groups_to_delete = yield self.stores.main.purge_room(room_id)
-        yield self.stores.state.purge_room_state(room_id, state_groups_to_delete)
+        state_groups_to_delete = await self.stores.main.purge_room(room_id)
+        await self.stores.state.purge_room_state(room_id, state_groups_to_delete)
 
-    @defer.inlineCallbacks
-    def purge_history(self, room_id, token, delete_local_events):
+    async def purge_history(
+        self, room_id: str, token: str, delete_local_events: bool
+    ) -> None:
         """Deletes room history before a certain point
 
         Args:
-            room_id (str):
+            room_id: The room ID
 
-            token (str): A topological token to delete events before
+            token: A topological token to delete events before
 
-            delete_local_events (bool):
+            delete_local_events:
                 if True, we will delete local events as well as remote ones
                 (instead of just marking them as outliers and deleting their
                 state groups).
         """
-        state_groups = yield self.stores.main.purge_history(
+        state_groups = await self.stores.main.purge_history(
             room_id, token, delete_local_events
         )
 
         logger.info("[purge] finding state groups that can be deleted")
 
-        sg_to_delete = yield self._find_unreferenced_groups(state_groups)
+        sg_to_delete = await self._find_unreferenced_groups(state_groups)
 
-        yield self.stores.state.purge_unreferenced_state_groups(room_id, sg_to_delete)
+        await self.stores.state.purge_unreferenced_state_groups(room_id, sg_to_delete)
 
-    @defer.inlineCallbacks
-    def _find_unreferenced_groups(self, state_groups):
+    async def _find_unreferenced_groups(self, state_groups: Set[int]) -> Set[int]:
         """Used when purging history to figure out which state groups can be
         deleted.
 
         Args:
-            state_groups (set[int]): Set of state groups referenced by events
+            state_groups: Set of state groups referenced by events
                 that are going to be deleted.
 
         Returns:
-            Deferred[set[int]] The set of state groups that can be deleted.
+            The set of state groups that can be deleted.
         """
         # Graph of state group -> previous group
         graph = {}
@@ -93,7 +91,7 @@ class PurgeEventsStorage(object):
                 current_search = set(itertools.islice(next_to_search, 100))
                 next_to_search -= current_search
 
-            referenced = yield self.stores.main.get_referenced_state_groups(
+            referenced = await self.stores.main.get_referenced_state_groups(
                 current_search
             )
             referenced_groups |= referenced
@@ -102,7 +100,7 @@ class PurgeEventsStorage(object):
             # groups that are referenced.
             current_search -= referenced
 
-            edges = yield self.stores.state.get_previous_state_groups(current_search)
+            edges = await self.stores.state.get_previous_state_groups(current_search)
 
             prevs = set(edges.values())
             # We don't bother re-handling groups we've already seen
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index dc568476f4..534883361f 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -14,13 +14,12 @@
 # limitations under the License.
 
 import logging
-from typing import Iterable, List, TypeVar
+from typing import Awaitable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar
 
 import attr
 
-from twisted.internet import defer
-
 from synapse.api.constants import EventTypes
+from synapse.events import EventBase
 from synapse.types import StateMap
 
 logger = logging.getLogger(__name__)
@@ -34,16 +33,16 @@ class StateFilter(object):
     """A filter used when querying for state.
 
     Attributes:
-        types (dict[str, set[str]|None]): Map from type to set of state keys (or
-            None). This specifies which state_keys for the given type to fetch
-            from the DB. If None then all events with that type are fetched. If
-            the set is empty then no events with that type are fetched.
-        include_others (bool): Whether to fetch events with types that do not
+        types: Map from type to set of state keys (or None). This specifies
+            which state_keys for the given type to fetch from the DB. If None
+            then all events with that type are fetched. If the set is empty
+            then no events with that type are fetched.
+        include_others: Whether to fetch events with types that do not
             appear in `types`.
     """
 
-    types = attr.ib()
-    include_others = attr.ib(default=False)
+    types = attr.ib(type=Dict[str, Optional[Set[str]]])
+    include_others = attr.ib(default=False, type=bool)
 
     def __attrs_post_init__(self):
         # If `include_others` is set we canonicalise the filter by removing
@@ -52,36 +51,35 @@ class StateFilter(object):
             self.types = {k: v for k, v in self.types.items() if v is not None}
 
     @staticmethod
-    def all():
+    def all() -> "StateFilter":
         """Creates a filter that fetches everything.
 
         Returns:
-            StateFilter
+            The new state filter.
         """
         return StateFilter(types={}, include_others=True)
 
     @staticmethod
-    def none():
+    def none() -> "StateFilter":
         """Creates a filter that fetches nothing.
 
         Returns:
-            StateFilter
+            The new state filter.
         """
         return StateFilter(types={}, include_others=False)
 
     @staticmethod
-    def from_types(types):
+    def from_types(types: Iterable[Tuple[str, Optional[str]]]) -> "StateFilter":
         """Creates a filter that only fetches the given types
 
         Args:
-            types (Iterable[tuple[str, str|None]]): A list of type and state
-                keys to fetch. A state_key of None fetches everything for
-                that type
+            types: A list of type and state keys to fetch. A state_key of None
+                fetches everything for that type
 
         Returns:
-            StateFilter
+            The new state filter.
         """
-        type_dict = {}
+        type_dict = {}  # type: Dict[str, Optional[Set[str]]]
         for typ, s in types:
             if typ in type_dict:
                 if type_dict[typ] is None:
@@ -91,24 +89,24 @@ class StateFilter(object):
                 type_dict[typ] = None
                 continue
 
-            type_dict.setdefault(typ, set()).add(s)
+            type_dict.setdefault(typ, set()).add(s)  # type: ignore
 
         return StateFilter(types=type_dict)
 
     @staticmethod
-    def from_lazy_load_member_list(members):
+    def from_lazy_load_member_list(members: Iterable[str]) -> "StateFilter":
         """Creates a filter that returns all non-member events, plus the member
         events for the given users
 
         Args:
-            members (iterable[str]): Set of user IDs
+            members: Set of user IDs
 
         Returns:
-            StateFilter
+            The new state filter
         """
         return StateFilter(types={EventTypes.Member: set(members)}, include_others=True)
 
-    def return_expanded(self):
+    def return_expanded(self) -> "StateFilter":
         """Creates a new StateFilter where type wild cards have been removed
         (except for memberships). The returned filter is a superset of the
         current one, i.e. anything that passes the current filter will pass
@@ -130,7 +128,7 @@ class StateFilter(object):
                return all non-member events
 
         Returns:
-            StateFilter
+            The new state filter.
         """
 
         if self.is_full():
@@ -167,7 +165,7 @@ class StateFilter(object):
                 include_others=True,
             )
 
-    def make_sql_filter_clause(self):
+    def make_sql_filter_clause(self) -> Tuple[str, List[str]]:
         """Converts the filter to an SQL clause.
 
         For example:
@@ -179,13 +177,12 @@ class StateFilter(object):
 
 
         Returns:
-            tuple[str, list]: The SQL string (may be empty) and arguments. An
-            empty SQL string is returned when the filter matches everything
-            (i.e. is "full").
+            The SQL string (may be empty) and arguments. An empty SQL string is
+            returned when the filter matches everything (i.e. is "full").
         """
 
         where_clause = ""
-        where_args = []
+        where_args = []  # type: List[str]
 
         if self.is_full():
             return where_clause, where_args
@@ -221,7 +218,7 @@ class StateFilter(object):
 
         return where_clause, where_args
 
-    def max_entries_returned(self):
+    def max_entries_returned(self) -> Optional[int]:
         """Returns the maximum number of entries this filter will return if
         known, otherwise returns None.
 
@@ -260,33 +257,33 @@ class StateFilter(object):
 
         return filtered_state
 
-    def is_full(self):
+    def is_full(self) -> bool:
         """Whether this filter fetches everything or not
 
         Returns:
-            bool
+            True if the filter fetches everything.
         """
         return self.include_others and not self.types
 
-    def has_wildcards(self):
+    def has_wildcards(self) -> bool:
         """Whether the filter includes wildcards or is attempting to fetch
         specific state.
 
         Returns:
-            bool
+            True if the filter includes wildcards.
         """
 
         return self.include_others or any(
             state_keys is None for state_keys in self.types.values()
         )
 
-    def concrete_types(self):
+    def concrete_types(self) -> List[Tuple[str, str]]:
         """Returns a list of concrete type/state_keys (i.e. not None) that
         will be fetched. This will be a complete list if `has_wildcards`
         returns False, but otherwise will be a subset (or even empty).
 
         Returns:
-            list[tuple[str,str]]
+            A list of type/state_keys tuples.
         """
         return [
             (t, s)
@@ -295,7 +292,7 @@ class StateFilter(object):
             for s in state_keys
         ]
 
-    def get_member_split(self):
+    def get_member_split(self) -> Tuple["StateFilter", "StateFilter"]:
         """Return the filter split into two: one which assumes it's exclusively
         matching against member state, and one which assumes it's matching
         against non member state.
@@ -307,7 +304,7 @@ class StateFilter(object):
         state caches).
 
         Returns:
-            tuple[StateFilter, StateFilter]: The member and non member filters
+            The member and non member filters
         """
 
         if EventTypes.Member in self.types:
@@ -340,6 +337,9 @@ class StateGroupStorage(object):
         """Given a state group try to return a previous group and a delta between
         the old and the new.
 
+        Args:
+            state_group: The state group used to retrieve state deltas.
+
         Returns:
             Deferred[Tuple[Optional[int], Optional[StateMap[str]]]]:
                 (prev_group, delta_ids)
@@ -347,55 +347,59 @@ class StateGroupStorage(object):
 
         return self.stores.state.get_state_group_delta(state_group)
 
-    @defer.inlineCallbacks
-    def get_state_groups_ids(self, _room_id, event_ids):
+    async def get_state_groups_ids(
+        self, _room_id: str, event_ids: Iterable[str]
+    ) -> Dict[int, StateMap[str]]:
         """Get the event IDs of all the state for the state groups for the given events
 
         Args:
-            _room_id (str): id of the room for these events
-            event_ids (iterable[str]): ids of the events
+            _room_id: id of the room for these events
+            event_ids: ids of the events
 
         Returns:
-            Deferred[dict[int, StateMap[str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
+            dict of state_group_id -> (dict of (type, state_key) -> event id)
         """
         if not event_ids:
             return {}
 
-        event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
+        event_to_groups = await self.stores.main._get_state_group_for_events(event_ids)
 
         groups = set(event_to_groups.values())
-        group_to_state = yield self.stores.state._get_state_for_groups(groups)
+        group_to_state = await self.stores.state._get_state_for_groups(groups)
 
         return group_to_state
 
-    @defer.inlineCallbacks
-    def get_state_ids_for_group(self, state_group):
+    async def get_state_ids_for_group(self, state_group: int) -> StateMap[str]:
         """Get the event IDs of all the state in the given state group
 
         Args:
-            state_group (int)
+            state_group: A state group for which we want to get the state IDs.
 
         Returns:
-            Deferred[dict]: Resolves to a map of (type, state_key) -> event_id
+            Resolves to a map of (type, state_key) -> event_id
         """
-        group_to_state = yield self._get_state_for_groups((state_group,))
+        group_to_state = await self._get_state_for_groups((state_group,))
 
         return group_to_state[state_group]
 
-    @defer.inlineCallbacks
-    def get_state_groups(self, room_id, event_ids):
+    async def get_state_groups(
+        self, room_id: str, event_ids: Iterable[str]
+    ) -> Dict[int, List[EventBase]]:
         """ Get the state groups for the given list of event_ids
+
+        Args:
+            room_id: ID of the room for these events.
+            event_ids: The event IDs to retrieve state for.
+
         Returns:
-            Deferred[dict[int, list[EventBase]]]:
-                dict of state_group_id -> list of state events.
+            dict of state_group_id -> list of state events.
         """
         if not event_ids:
             return {}
 
-        group_to_ids = yield self.get_state_groups_ids(room_id, event_ids)
+        group_to_ids = await self.get_state_groups_ids(room_id, event_ids)
 
-        state_event_map = yield self.stores.main.get_events(
+        state_event_map = await self.stores.main.get_events(
             [
                 ev_id
                 for group_ids in group_to_ids.values()
@@ -415,7 +419,7 @@ class StateGroupStorage(object):
 
     def _get_state_groups_from_groups(
         self, groups: List[int], state_filter: StateFilter
-    ):
+    ) -> Awaitable[Dict[int, StateMap[str]]]:
         """Returns the state groups for a given set of groups, filtering on
         types of state events.
 
@@ -423,31 +427,34 @@ class StateGroupStorage(object):
             groups: list of state group IDs to query
             state_filter: The state filter used to fetch state
                 from the database.
+
         Returns:
-            Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
+            Dict of state group to state map.
         """
 
         return self.stores.state._get_state_groups_from_groups(groups, state_filter)
 
-    @defer.inlineCallbacks
-    def get_state_for_events(self, event_ids, state_filter=StateFilter.all()):
+    async def get_state_for_events(
+        self, event_ids: List[str], state_filter: StateFilter = StateFilter.all()
+    ):
         """Given a list of event_ids and type tuples, return a list of state
         dicts for each event.
+
         Args:
-            event_ids (list[string])
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
+            event_ids: The events to fetch the state of.
+            state_filter: The state filter used to fetch state.
+
         Returns:
-            deferred: A dict of (event_id) -> (type, state_key) -> [state_events]
+            A dict of (event_id) -> (type, state_key) -> [state_events]
         """
-        event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
+        event_to_groups = await self.stores.main._get_state_group_for_events(event_ids)
 
         groups = set(event_to_groups.values())
-        group_to_state = yield self.stores.state._get_state_for_groups(
+        group_to_state = await self.stores.state._get_state_for_groups(
             groups, state_filter
         )
 
-        state_event_map = yield self.stores.main.get_events(
+        state_event_map = await self.stores.main.get_events(
             [ev_id for sd in group_to_state.values() for ev_id in sd.values()],
             get_prev_content=False,
         )
@@ -463,24 +470,24 @@ class StateGroupStorage(object):
 
         return {event: event_to_state[event] for event in event_ids}
 
-    @defer.inlineCallbacks
-    def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()):
+    async def get_state_ids_for_events(
+        self, event_ids: List[str], state_filter: StateFilter = StateFilter.all()
+    ):
         """
         Get the state dicts corresponding to a list of events, containing the event_ids
         of the state events (as opposed to the events themselves)
 
         Args:
-            event_ids(list(str)): events whose state should be returned
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
+            event_ids: events whose state should be returned
+            state_filter: The state filter used to fetch state from the database.
 
         Returns:
-            A deferred dict from event_id -> (type, state_key) -> event_id
+            A dict from event_id -> (type, state_key) -> event_id
         """
-        event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
+        event_to_groups = await self.stores.main._get_state_group_for_events(event_ids)
 
         groups = set(event_to_groups.values())
-        group_to_state = yield self.stores.state._get_state_for_groups(
+        group_to_state = await self.stores.state._get_state_for_groups(
             groups, state_filter
         )
 
@@ -491,67 +498,72 @@ class StateGroupStorage(object):
 
         return {event: event_to_state[event] for event in event_ids}
 
-    @defer.inlineCallbacks
-    def get_state_for_event(self, event_id, state_filter=StateFilter.all()):
+    async def get_state_for_event(
+        self, event_id: str, state_filter: StateFilter = StateFilter.all()
+    ):
         """
         Get the state dict corresponding to a particular event
 
         Args:
-            event_id(str): event whose state should be returned
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
+            event_id: event whose state should be returned
+            state_filter: The state filter used to fetch state from the database.
 
         Returns:
-            A deferred dict from (type, state_key) -> state_event
+            A dict from (type, state_key) -> state_event
         """
-        state_map = yield self.get_state_for_events([event_id], state_filter)
+        state_map = await self.get_state_for_events([event_id], state_filter)
         return state_map[event_id]
 
-    @defer.inlineCallbacks
-    def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()):
+    async def get_state_ids_for_event(
+        self, event_id: str, state_filter: StateFilter = StateFilter.all()
+    ):
         """
         Get the state dict corresponding to a particular event
 
         Args:
-            event_id(str): event whose state should be returned
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
+            event_id: event whose state should be returned
+            state_filter: The state filter used to fetch state from the database.
 
         Returns:
             A deferred dict from (type, state_key) -> state_event
         """
-        state_map = yield self.get_state_ids_for_events([event_id], state_filter)
+        state_map = await self.get_state_ids_for_events([event_id], state_filter)
         return state_map[event_id]
 
     def _get_state_for_groups(
         self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all()
-    ):
+    ) -> Awaitable[Dict[int, StateMap[str]]]:
         """Gets the state at each of a list of state groups, optionally
         filtering by type/state_key
 
         Args:
-            groups (iterable[int]): list of state groups for which we want
-                to get the state.
-            state_filter (StateFilter): The state filter used to fetch state
+            groups: list of state groups for which we want to get the state.
+            state_filter: The state filter used to fetch state.
                 from the database.
+
         Returns:
-            Deferred[dict[int, StateMap[str]]]: Dict of state group to state map.
+            Dict of state group to state map.
         """
         return self.stores.state._get_state_for_groups(groups, state_filter)
 
     def store_state_group(
-        self, event_id, room_id, prev_group, delta_ids, current_state_ids
+        self,
+        event_id: str,
+        room_id: str,
+        prev_group: Optional[int],
+        delta_ids: Optional[dict],
+        current_state_ids: dict,
     ):
         """Store a new set of state, returning a newly assigned state group.
 
         Args:
-            event_id (str): The event ID for which the state was calculated
-            room_id (str)
-            prev_group (int|None): A previous state group for the room, optional.
-            delta_ids (dict|None): The delta between state at `prev_group` and
+            event_id: The event ID for which the state was calculated.
+            room_id: ID of the room for which the state was calculated.
+            prev_group: A previous state group for the room, optional.
+            delta_ids: The delta between state at `prev_group` and
                 `current_state_ids`, if `prev_group` was given. Same format as
                 `current_state_ids`.
-            current_state_ids (dict): The state to store. Map of (type, state_key)
+            current_state_ids: The state to store. Map of (type, state_key)
                 to event_id.
 
         Returns:
diff --git a/synapse/visibility.py b/synapse/visibility.py
index 0f042c5696..e3da7744d2 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -16,8 +16,6 @@
 import logging
 import operator
 
-from twisted.internet import defer
-
 from synapse.api.constants import EventTypes, Membership
 from synapse.events.utils import prune_event
 from synapse.storage import Storage
@@ -39,8 +37,7 @@ MEMBERSHIP_PRIORITY = (
 )
 
 
-@defer.inlineCallbacks
-def filter_events_for_client(
+async def filter_events_for_client(
     storage: Storage,
     user_id,
     events,
@@ -67,19 +64,19 @@ def filter_events_for_client(
             also be called to check whether a user can see the state at a given point.
 
     Returns:
-        Deferred[list[synapse.events.EventBase]]
+        list[synapse.events.EventBase]
     """
     # Filter out events that have been soft failed so that we don't relay them
     # to clients.
     events = [e for e in events if not e.internal_metadata.is_soft_failed()]
 
     types = ((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id))
-    event_id_to_state = yield storage.state.get_state_for_events(
+    event_id_to_state = await storage.state.get_state_for_events(
         frozenset(e.event_id for e in events),
         state_filter=StateFilter.from_types(types),
     )
 
-    ignore_dict_content = yield storage.main.get_global_account_data_by_type_for_user(
+    ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user(
         "m.ignored_user_list", user_id
     )
 
@@ -90,7 +87,7 @@ def filter_events_for_client(
         else []
     )
 
-    erased_senders = yield storage.main.are_users_erased((e.sender for e in events))
+    erased_senders = await storage.main.are_users_erased((e.sender for e in events))
 
     if filter_send_to_client:
         room_ids = {e.room_id for e in events}
@@ -99,7 +96,7 @@ def filter_events_for_client(
         for room_id in room_ids:
             retention_policies[
                 room_id
-            ] = yield storage.main.get_retention_policy_for_room(room_id)
+            ] = await storage.main.get_retention_policy_for_room(room_id)
 
     def allowed(event):
         """
@@ -254,8 +251,7 @@ def filter_events_for_client(
     return list(filtered_events)
 
 
-@defer.inlineCallbacks
-def filter_events_for_server(
+async def filter_events_for_server(
     storage: Storage,
     server_name,
     events,
@@ -277,7 +273,7 @@ def filter_events_for_server(
             backfill or not.
 
     Returns
-        Deferred[list[FrozenEvent]]
+        list[FrozenEvent]
     """
 
     def is_sender_erased(event, erased_senders):
@@ -321,7 +317,7 @@ def filter_events_for_server(
     # Lets check to see if all the events have a history visibility
     # of "shared" or "world_readable". If that's the case then we don't
     # need to check membership (as we know the server is in the room).
-    event_to_state_ids = yield storage.state.get_state_ids_for_events(
+    event_to_state_ids = await storage.state.get_state_ids_for_events(
         frozenset(e.event_id for e in events),
         state_filter=StateFilter.from_types(
             types=((EventTypes.RoomHistoryVisibility, ""),)
@@ -339,14 +335,14 @@ def filter_events_for_server(
     if not visibility_ids:
         all_open = True
     else:
-        event_map = yield storage.main.get_events(visibility_ids)
+        event_map = await storage.main.get_events(visibility_ids)
         all_open = all(
             e.content.get("history_visibility") in (None, "shared", "world_readable")
             for e in event_map.values()
         )
 
     if not check_history_visibility_only:
-        erased_senders = yield storage.main.are_users_erased((e.sender for e in events))
+        erased_senders = await storage.main.are_users_erased((e.sender for e in events))
     else:
         # We don't want to check whether users are erased, which is equivalent
         # to no users having been erased.
@@ -375,7 +371,7 @@ def filter_events_for_server(
 
     # first, for each event we're wanting to return, get the event_ids
     # of the history vis and membership state at those events.
-    event_to_state_ids = yield storage.state.get_state_ids_for_events(
+    event_to_state_ids = await storage.state.get_state_ids_for_events(
         frozenset(e.event_id for e in events),
         state_filter=StateFilter.from_types(
             types=((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, None))
@@ -405,7 +401,7 @@ def filter_events_for_server(
             return False
         return state_key[idx + 1 :] == server_name
 
-    event_map = yield storage.main.get_events(
+    event_map = await storage.main.get_events(
         [e_id for e_id, key in event_id_to_state_key.items() if include(key[0], key[1])]
     )
 
diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py
index 4003869ed6..236b608d58 100644
--- a/tests/appservice/test_appservice.py
+++ b/tests/appservice/test_appservice.py
@@ -50,13 +50,17 @@ class ApplicationServiceTestCase(unittest.TestCase):
     def test_regex_user_id_prefix_match(self):
         self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
         self.event.sender = "@irc_foobar:matrix.org"
-        self.assertTrue((yield self.service.is_interested(self.event)))
+        self.assertTrue(
+            (yield defer.ensureDeferred(self.service.is_interested(self.event)))
+        )
 
     @defer.inlineCallbacks
     def test_regex_user_id_prefix_no_match(self):
         self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
         self.event.sender = "@someone_else:matrix.org"
-        self.assertFalse((yield self.service.is_interested(self.event)))
+        self.assertFalse(
+            (yield defer.ensureDeferred(self.service.is_interested(self.event)))
+        )
 
     @defer.inlineCallbacks
     def test_regex_room_member_is_checked(self):
@@ -64,7 +68,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.event.sender = "@someone_else:matrix.org"
         self.event.type = "m.room.member"
         self.event.state_key = "@irc_foobar:matrix.org"
-        self.assertTrue((yield self.service.is_interested(self.event)))
+        self.assertTrue(
+            (yield defer.ensureDeferred(self.service.is_interested(self.event)))
+        )
 
     @defer.inlineCallbacks
     def test_regex_room_id_match(self):
@@ -72,7 +78,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
             _regex("!some_prefix.*some_suffix:matrix.org")
         )
         self.event.room_id = "!some_prefixs0m3th1nGsome_suffix:matrix.org"
-        self.assertTrue((yield self.service.is_interested(self.event)))
+        self.assertTrue(
+            (yield defer.ensureDeferred(self.service.is_interested(self.event)))
+        )
 
     @defer.inlineCallbacks
     def test_regex_room_id_no_match(self):
@@ -80,19 +88,26 @@ class ApplicationServiceTestCase(unittest.TestCase):
             _regex("!some_prefix.*some_suffix:matrix.org")
         )
         self.event.room_id = "!XqBunHwQIXUiqCaoxq:matrix.org"
-        self.assertFalse((yield self.service.is_interested(self.event)))
+        self.assertFalse(
+            (yield defer.ensureDeferred(self.service.is_interested(self.event)))
+        )
 
     @defer.inlineCallbacks
     def test_regex_alias_match(self):
         self.service.namespaces[ApplicationService.NS_ALIASES].append(
             _regex("#irc_.*:matrix.org")
         )
-        self.store.get_aliases_for_room.return_value = [
-            "#irc_foobar:matrix.org",
-            "#athing:matrix.org",
-        ]
-        self.store.get_users_in_room.return_value = []
-        self.assertTrue((yield self.service.is_interested(self.event, self.store)))
+        self.store.get_aliases_for_room.return_value = defer.succeed(
+            ["#irc_foobar:matrix.org", "#athing:matrix.org"]
+        )
+        self.store.get_users_in_room.return_value = defer.succeed([])
+        self.assertTrue(
+            (
+                yield defer.ensureDeferred(
+                    self.service.is_interested(self.event, self.store)
+                )
+            )
+        )
 
     def test_non_exclusive_alias(self):
         self.service.namespaces[ApplicationService.NS_ALIASES].append(
@@ -135,12 +150,17 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.service.namespaces[ApplicationService.NS_ALIASES].append(
             _regex("#irc_.*:matrix.org")
         )
-        self.store.get_aliases_for_room.return_value = [
-            "#xmpp_foobar:matrix.org",
-            "#athing:matrix.org",
-        ]
-        self.store.get_users_in_room.return_value = []
-        self.assertFalse((yield self.service.is_interested(self.event, self.store)))
+        self.store.get_aliases_for_room.return_value = defer.succeed(
+            ["#xmpp_foobar:matrix.org", "#athing:matrix.org"]
+        )
+        self.store.get_users_in_room.return_value = defer.succeed([])
+        self.assertFalse(
+            (
+                yield defer.ensureDeferred(
+                    self.service.is_interested(self.event, self.store)
+                )
+            )
+        )
 
     @defer.inlineCallbacks
     def test_regex_multiple_matches(self):
@@ -149,9 +169,17 @@ class ApplicationServiceTestCase(unittest.TestCase):
         )
         self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
         self.event.sender = "@irc_foobar:matrix.org"
-        self.store.get_aliases_for_room.return_value = ["#irc_barfoo:matrix.org"]
-        self.store.get_users_in_room.return_value = []
-        self.assertTrue((yield self.service.is_interested(self.event, self.store)))
+        self.store.get_aliases_for_room.return_value = defer.succeed(
+            ["#irc_barfoo:matrix.org"]
+        )
+        self.store.get_users_in_room.return_value = defer.succeed([])
+        self.assertTrue(
+            (
+                yield defer.ensureDeferred(
+                    self.service.is_interested(self.event, self.store)
+                )
+            )
+        )
 
     @defer.inlineCallbacks
     def test_interested_in_self(self):
@@ -161,19 +189,24 @@ class ApplicationServiceTestCase(unittest.TestCase):
         self.event.type = "m.room.member"
         self.event.content = {"membership": "invite"}
         self.event.state_key = self.service.sender
-        self.assertTrue((yield self.service.is_interested(self.event)))
+        self.assertTrue(
+            (yield defer.ensureDeferred(self.service.is_interested(self.event)))
+        )
 
     @defer.inlineCallbacks
     def test_member_list_match(self):
         self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
-        self.store.get_users_in_room.return_value = [
-            "@alice:here",
-            "@irc_fo:here",  # AS user
-            "@bob:here",
-        ]
-        self.store.get_aliases_for_room.return_value = []
+        # Note that @irc_fo:here is the AS user.
+        self.store.get_users_in_room.return_value = defer.succeed(
+            ["@alice:here", "@irc_fo:here", "@bob:here"]
+        )
+        self.store.get_aliases_for_room.return_value = defer.succeed([])
 
         self.event.sender = "@xmpp_foobar:matrix.org"
         self.assertTrue(
-            (yield self.service.is_interested(event=self.event, store=self.store))
+            (
+                yield defer.ensureDeferred(
+                    self.service.is_interested(event=self.event, store=self.store)
+                )
+            )
         )
diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
index 52f89d3f83..68a4caabbf 100644
--- a/tests/appservice/test_scheduler.py
+++ b/tests/appservice/test_scheduler.py
@@ -25,6 +25,7 @@ from synapse.appservice.scheduler import (
 from synapse.logging.context import make_deferred_yieldable
 
 from tests import unittest
+from tests.test_utils import make_awaitable
 
 from ..utils import MockClock
 
@@ -52,11 +53,11 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
         self.store.get_appservice_state = Mock(
             return_value=defer.succeed(ApplicationServiceState.UP)
         )
-        txn.send = Mock(return_value=defer.succeed(True))
+        txn.send = Mock(return_value=make_awaitable(True))
         self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn))
 
         # actual call
-        self.txnctrl.send(service, events)
+        self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events)))
 
         self.store.create_appservice_txn.assert_called_once_with(
             service=service, events=events  # txn made and saved
@@ -77,7 +78,7 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
         self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn))
 
         # actual call
-        self.txnctrl.send(service, events)
+        self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events)))
 
         self.store.create_appservice_txn.assert_called_once_with(
             service=service, events=events  # txn made and saved
@@ -98,11 +99,11 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
             return_value=defer.succeed(ApplicationServiceState.UP)
         )
         self.store.set_appservice_state = Mock(return_value=defer.succeed(True))
-        txn.send = Mock(return_value=defer.succeed(False))  # fails to send
+        txn.send = Mock(return_value=make_awaitable(False))  # fails to send
         self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn))
 
         # actual call
-        self.txnctrl.send(service, events)
+        self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events)))
 
         self.store.create_appservice_txn.assert_called_once_with(
             service=service, events=events
@@ -144,7 +145,8 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
         self.recoverer.recover()
         # shouldn't have called anything prior to waiting for exp backoff
         self.assertEquals(0, self.store.get_oldest_unsent_txn.call_count)
-        txn.send = Mock(return_value=True)
+        txn.send = Mock(return_value=make_awaitable(True))
+        txn.complete.return_value = make_awaitable(None)
         # wait for exp backoff
         self.clock.advance_time(2)
         self.assertEquals(1, txn.send.call_count)
@@ -169,7 +171,8 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
 
         self.recoverer.recover()
         self.assertEquals(0, self.store.get_oldest_unsent_txn.call_count)
-        txn.send = Mock(return_value=False)
+        txn.send = Mock(return_value=make_awaitable(False))
+        txn.complete.return_value = make_awaitable(None)
         self.clock.advance_time(2)
         self.assertEquals(1, txn.send.call_count)
         self.assertEquals(0, txn.complete.call_count)
@@ -182,7 +185,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
         self.assertEquals(3, txn.send.call_count)
         self.assertEquals(0, txn.complete.call_count)
         self.assertEquals(0, self.callback.call_count)
-        txn.send = Mock(return_value=True)  # successfully send the txn
+        txn.send = Mock(return_value=make_awaitable(True))  # successfully send the txn
         pop_txn = True  # returns the txn the first time, then no more.
         self.clock.advance_time(16)
         self.assertEquals(1, txn.send.call_count)  # new mock reset call count
diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py
index f9ce609923..e0ad8e8a77 100644
--- a/tests/crypto/test_keyring.py
+++ b/tests/crypto/test_keyring.py
@@ -102,11 +102,10 @@ class KeyringTestCase(unittest.HomeserverTestCase):
         }
         persp_deferred = defer.Deferred()
 
-        @defer.inlineCallbacks
-        def get_perspectives(**kwargs):
+        async def get_perspectives(**kwargs):
             self.assertEquals(current_context().request, "11")
             with PreserveLoggingContext():
-                yield persp_deferred
+                await persp_deferred
             return persp_resp
 
         self.http_client.post_json.side_effect = get_perspectives
@@ -355,7 +354,7 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase):
         }
         signedjson.sign.sign_json(response, SERVER_NAME, testkey)
 
-        def get_json(destination, path, **kwargs):
+        async def get_json(destination, path, **kwargs):
             self.assertEqual(destination, SERVER_NAME)
             self.assertEqual(path, "/_matrix/key/v2/server/key1")
             return response
@@ -444,7 +443,7 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
         Tell the mock http client to expect a perspectives-server key query
         """
 
-        def post_json(destination, path, data, **kwargs):
+        async def post_json(destination, path, data, **kwargs):
             self.assertEqual(destination, self.mock_perspective_server.server_name)
             self.assertEqual(path, "/_matrix/key/v2/query")
 
@@ -580,14 +579,12 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
         # remove the perspectives server's signature
         response = build_response()
         del response["signatures"][self.mock_perspective_server.server_name]
-        self.http_client.post_json.return_value = {"server_keys": [response]}
         keys = get_key_from_perspectives(response)
         self.assertEqual(keys, {}, "Expected empty dict with missing persp server sig")
 
         # remove the origin server's signature
         response = build_response()
         del response["signatures"][SERVER_NAME]
-        self.http_client.post_json.return_value = {"server_keys": [response]}
         keys = get_key_from_perspectives(response)
         self.assertEqual(keys, {}, "Expected empty dict with missing origin server sig")
 
diff --git a/tests/events/test_snapshot.py b/tests/events/test_snapshot.py
index 640f5f3bce..3a80626224 100644
--- a/tests/events/test_snapshot.py
+++ b/tests/events/test_snapshot.py
@@ -41,8 +41,10 @@ class TestEventContext(unittest.HomeserverTestCase):
         serialize/deserialize.
         """
 
-        event, context = create_event(
-            self.hs, room_id=self.room_id, type="m.test", sender=self.user_id,
+        event, context = self.get_success(
+            create_event(
+                self.hs, room_id=self.room_id, type="m.test", sender=self.user_id,
+            )
         )
 
         self._check_serialize_deserialize(event, context)
@@ -51,12 +53,14 @@ class TestEventContext(unittest.HomeserverTestCase):
         """Test that an EventContext for a state event (with not previous entry)
         is the same after serialize/deserialize.
         """
-        event, context = create_event(
-            self.hs,
-            room_id=self.room_id,
-            type="m.test",
-            sender=self.user_id,
-            state_key="",
+        event, context = self.get_success(
+            create_event(
+                self.hs,
+                room_id=self.room_id,
+                type="m.test",
+                sender=self.user_id,
+                state_key="",
+            )
         )
 
         self._check_serialize_deserialize(event, context)
@@ -65,13 +69,15 @@ class TestEventContext(unittest.HomeserverTestCase):
         """Test that an EventContext for a state event (which replaces a
         previous entry) is the same after serialize/deserialize.
         """
-        event, context = create_event(
-            self.hs,
-            room_id=self.room_id,
-            type="m.room.member",
-            sender=self.user_id,
-            state_key=self.user_id,
-            content={"membership": "leave"},
+        event, context = self.get_success(
+            create_event(
+                self.hs,
+                room_id=self.room_id,
+                type="m.room.member",
+                sender=self.user_id,
+                state_key=self.user_id,
+                content={"membership": "leave"},
+            )
         )
 
         self._check_serialize_deserialize(event, context)
diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py
index 0c9987be54..b8ca118716 100644
--- a/tests/federation/test_complexity.py
+++ b/tests/federation/test_complexity.py
@@ -23,6 +23,7 @@ from synapse.rest.client.v1 import login, room
 from synapse.types import UserID
 
 from tests import unittest
+from tests.test_utils import make_awaitable
 
 
 class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
@@ -78,9 +79,40 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
         fed_transport = self.hs.get_federation_transport_client()
 
         # Mock out some things, because we don't want to test the whole join
-        fed_transport.client.get_json = Mock(return_value=defer.succeed({"v1": 9999}))
+        fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999}))
         handler.federation_handler.do_invite_join = Mock(
-            return_value=defer.succeed(("", 1))
+            return_value=make_awaitable(("", 1))
+        )
+
+        d = handler._remote_join(
+            None,
+            ["other.example.com"],
+            "roomid",
+            UserID.from_string(u1),
+            {"membership": "join"},
+        )
+
+        self.pump()
+
+        # The request failed with a SynapseError saying the resource limit was
+        # exceeded.
+        f = self.get_failure(d, SynapseError)
+        self.assertEqual(f.value.code, 400, f.value)
+        self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
+
+    def test_join_too_large_admin(self):
+        # Check whether an admin can join if option "admins_can_join" is undefined,
+        # this option defaults to false, so the join should fail.
+
+        u1 = self.register_user("u1", "pass", admin=True)
+
+        handler = self.hs.get_room_member_handler()
+        fed_transport = self.hs.get_federation_transport_client()
+
+        # Mock out some things, because we don't want to test the whole join
+        fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999}))
+        handler.federation_handler.do_invite_join = Mock(
+            return_value=make_awaitable(("", 1))
         )
 
         d = handler._remote_join(
@@ -116,9 +148,9 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
         fed_transport = self.hs.get_federation_transport_client()
 
         # Mock out some things, because we don't want to test the whole join
-        fed_transport.client.get_json = Mock(return_value=defer.succeed(None))
+        fed_transport.client.get_json = Mock(return_value=make_awaitable(None))
         handler.federation_handler.do_invite_join = Mock(
-            return_value=defer.succeed(("", 1))
+            return_value=make_awaitable(("", 1))
         )
 
         # Artificially raise the complexity
@@ -141,3 +173,81 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
         f = self.get_failure(d, SynapseError)
         self.assertEqual(f.value.code, 400)
         self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
+
+
+class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase):
+    # Test the behavior of joining rooms which exceed the complexity if option
+    # limit_remote_rooms.admins_can_join is True.
+
+    servlets = [
+        admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+
+    def default_config(self):
+        config = super().default_config()
+        config["limit_remote_rooms"] = {
+            "enabled": True,
+            "complexity": 0.05,
+            "admins_can_join": True,
+        }
+        return config
+
+    def test_join_too_large_no_admin(self):
+        # A user which is not an admin should not be able to join a remote room
+        # which is too complex.
+
+        u1 = self.register_user("u1", "pass")
+
+        handler = self.hs.get_room_member_handler()
+        fed_transport = self.hs.get_federation_transport_client()
+
+        # Mock out some things, because we don't want to test the whole join
+        fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999}))
+        handler.federation_handler.do_invite_join = Mock(
+            return_value=make_awaitable(("", 1))
+        )
+
+        d = handler._remote_join(
+            None,
+            ["other.example.com"],
+            "roomid",
+            UserID.from_string(u1),
+            {"membership": "join"},
+        )
+
+        self.pump()
+
+        # The request failed with a SynapseError saying the resource limit was
+        # exceeded.
+        f = self.get_failure(d, SynapseError)
+        self.assertEqual(f.value.code, 400, f.value)
+        self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
+
+    def test_join_too_large_admin(self):
+        # An admin should be able to join rooms where a complexity check fails.
+
+        u1 = self.register_user("u1", "pass", admin=True)
+
+        handler = self.hs.get_room_member_handler()
+        fed_transport = self.hs.get_federation_transport_client()
+
+        # Mock out some things, because we don't want to test the whole join
+        fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999}))
+        handler.federation_handler.do_invite_join = Mock(
+            return_value=make_awaitable(("", 1))
+        )
+
+        d = handler._remote_join(
+            None,
+            ["other.example.com"],
+            "roomid",
+            UserID.from_string(u1),
+            {"membership": "join"},
+        )
+
+        self.pump()
+
+        # The request success since the user is an admin
+        self.get_success(d)
diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index 1a9bd5f37d..5f512ff8bf 100644
--- a/tests/federation/test_federation_sender.py
+++ b/tests/federation/test_federation_sender.py
@@ -26,31 +26,34 @@ from synapse.rest import admin
 from synapse.rest.client.v1 import login
 from synapse.types import JsonDict, ReadReceipt
 
+from tests.test_utils import make_awaitable
 from tests.unittest import HomeserverTestCase, override_config
 
 
 class FederationSenderReceiptsTestCases(HomeserverTestCase):
     def make_homeserver(self, reactor, clock):
+        mock_state_handler = Mock(spec=["get_current_hosts_in_room"])
+        # Ensure a new Awaitable is created for each call.
+        mock_state_handler.get_current_hosts_in_room.side_effect = lambda room_Id: make_awaitable(
+            ["test", "host2"]
+        )
         return self.setup_test_homeserver(
-            state_handler=Mock(spec=["get_current_hosts_in_room"]),
+            state_handler=mock_state_handler,
             federation_transport_client=Mock(spec=["send_transaction"]),
         )
 
     @override_config({"send_federation": True})
     def test_send_receipts(self):
-        mock_state_handler = self.hs.get_state_handler()
-        mock_state_handler.get_current_hosts_in_room.return_value = ["test", "host2"]
-
         mock_send_transaction = (
             self.hs.get_federation_transport_client().send_transaction
         )
-        mock_send_transaction.return_value = defer.succeed({})
+        mock_send_transaction.return_value = make_awaitable({})
 
         sender = self.hs.get_federation_sender()
         receipt = ReadReceipt(
             "room_id", "m.read", "user_id", ["event_id"], {"ts": 1234}
         )
-        self.successResultOf(sender.send_read_receipt(receipt))
+        self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
 
         self.pump()
 
@@ -81,19 +84,16 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
     def test_send_receipts_with_backoff(self):
         """Send two receipts in quick succession; the second should be flushed, but
         only after 20ms"""
-        mock_state_handler = self.hs.get_state_handler()
-        mock_state_handler.get_current_hosts_in_room.return_value = ["test", "host2"]
-
         mock_send_transaction = (
             self.hs.get_federation_transport_client().send_transaction
         )
-        mock_send_transaction.return_value = defer.succeed({})
+        mock_send_transaction.return_value = make_awaitable({})
 
         sender = self.hs.get_federation_sender()
         receipt = ReadReceipt(
             "room_id", "m.read", "user_id", ["event_id"], {"ts": 1234}
         )
-        self.successResultOf(sender.send_read_receipt(receipt))
+        self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
 
         self.pump()
 
@@ -125,7 +125,7 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
         receipt = ReadReceipt(
             "room_id", "m.read", "user_id", ["other_id"], {"ts": 1234}
         )
-        self.successResultOf(sender.send_read_receipt(receipt))
+        self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
         self.pump()
         mock_send_transaction.assert_not_called()
 
@@ -164,7 +164,6 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
 
     def make_homeserver(self, reactor, clock):
         return self.setup_test_homeserver(
-            state_handler=Mock(spec=["get_current_hosts_in_room"]),
             federation_transport_client=Mock(spec=["send_transaction"]),
         )
 
@@ -174,10 +173,6 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
         return c
 
     def prepare(self, reactor, clock, hs):
-        # stub out get_current_hosts_in_room
-        mock_state_handler = hs.get_state_handler()
-        mock_state_handler.get_current_hosts_in_room.return_value = ["test", "host2"]
-
         # stub out get_users_who_share_room_with_user so that it claims that
         # `@user2:host2` is in the room
         def get_users_who_share_room_with_user(user_id):
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index ebabe9a7d6..628f7d8db0 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -19,6 +19,7 @@ from twisted.internet import defer
 
 from synapse.handlers.appservice import ApplicationServicesHandler
 
+from tests.test_utils import make_awaitable
 from tests.utils import MockClock
 
 from .. import unittest
@@ -117,7 +118,7 @@ class AppServiceHandlerTestCase(unittest.TestCase):
             self._mkservice_alias(is_interested_in_alias=False),
         ]
 
-        self.mock_as_api.query_alias.return_value = defer.succeed(True)
+        self.mock_as_api.query_alias.return_value = make_awaitable(True)
         self.mock_store.get_app_services.return_value = services
         self.mock_store.get_association_from_room_alias.return_value = defer.succeed(
             Mock(room_id=room_id, servers=servers)
@@ -135,7 +136,7 @@ class AppServiceHandlerTestCase(unittest.TestCase):
 
     def _mkservice(self, is_interested):
         service = Mock()
-        service.is_interested.return_value = defer.succeed(is_interested)
+        service.is_interested.return_value = make_awaitable(is_interested)
         service.token = "mock_service_token"
         service.url = "mock_service_url"
         return service
diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
index 00bb776271..bc0c5aefdc 100644
--- a/tests/handlers/test_directory.py
+++ b/tests/handlers/test_directory.py
@@ -16,8 +16,6 @@
 
 from mock import Mock
 
-from twisted.internet import defer
-
 import synapse
 import synapse.api.errors
 from synapse.api.constants import EventTypes
@@ -26,6 +24,7 @@ from synapse.rest.client.v1 import directory, login, room
 from synapse.types import RoomAlias, create_requester
 
 from tests import unittest
+from tests.test_utils import make_awaitable
 
 
 class DirectoryTestCase(unittest.HomeserverTestCase):
@@ -71,7 +70,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
         self.assertEquals({"room_id": "!8765qwer:test", "servers": ["test"]}, result)
 
     def test_get_remote_association(self):
-        self.mock_federation.make_query.return_value = defer.succeed(
+        self.mock_federation.make_query.return_value = make_awaitable(
             {"room_id": "!8765qwer:test", "servers": ["test", "remote"]}
         )
 
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index 4f1347cd25..d70e1fc608 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -24,6 +24,7 @@ from synapse.handlers.profile import MasterProfileHandler
 from synapse.types import UserID
 
 from tests import unittest
+from tests.test_utils import make_awaitable
 from tests.utils import setup_test_homeserver
 
 
@@ -138,7 +139,7 @@ class ProfileTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_other_name(self):
-        self.mock_federation.make_query.return_value = defer.succeed(
+        self.mock_federation.make_query.return_value = make_awaitable(
             {"displayname": "Alice"}
         )
 
diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
index 954e059e76..69945a8f98 100644
--- a/tests/http/federation/test_matrix_federation_agent.py
+++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -67,6 +67,14 @@ def get_connection_factory():
     return test_server_connection_factory
 
 
+# Once Async Mocks or lambdas are supported this can go away.
+def generate_resolve_service(result):
+    async def resolve_service(_):
+        return result
+
+    return resolve_service
+
+
 class MatrixFederationAgentTests(unittest.TestCase):
     def setUp(self):
         self.reactor = ThreadedMemoryReactorClock()
@@ -373,7 +381,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
         """
         Test the behaviour when the certificate on the server doesn't match the hostname
         """
-        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
         self.reactor.lookups["testserv1"] = "1.2.3.4"
 
         test_d = self._make_get_request(b"matrix://testserv1/foo/bar")
@@ -456,7 +464,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
         Test the behaviour when the server name has no port, no SRV, and no well-known
         """
 
-        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
         self.reactor.lookups["testserv"] = "1.2.3.4"
 
         test_d = self._make_get_request(b"matrix://testserv/foo/bar")
@@ -510,7 +518,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
         """Test the behaviour when the .well-known delegates elsewhere
         """
 
-        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
         self.reactor.lookups["testserv"] = "1.2.3.4"
         self.reactor.lookups["target-server"] = "1::f"
 
@@ -572,7 +580,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
         """Test the behaviour when the server name has no port and no SRV record, but
         the .well-known has a 300 redirect
         """
-        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
         self.reactor.lookups["testserv"] = "1.2.3.4"
         self.reactor.lookups["target-server"] = "1::f"
 
@@ -661,7 +669,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
         Test the behaviour when the server name has an *invalid* well-known (and no SRV)
         """
 
-        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
         self.reactor.lookups["testserv"] = "1.2.3.4"
 
         test_d = self._make_get_request(b"matrix://testserv/foo/bar")
@@ -717,7 +725,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
         # the config left to the default, which will not trust it (since the
         # presented cert is signed by a test CA)
 
-        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
         self.reactor.lookups["testserv"] = "1.2.3.4"
 
         config = default_config("test", parse=True)
@@ -764,9 +772,9 @@ class MatrixFederationAgentTests(unittest.TestCase):
         """
         Test the behaviour when there is a single SRV record
         """
-        self.mock_resolver.resolve_service.side_effect = lambda _: [
-            Server(host=b"srvtarget", port=8443)
-        ]
+        self.mock_resolver.resolve_service.side_effect = generate_resolve_service(
+            [Server(host=b"srvtarget", port=8443)]
+        )
         self.reactor.lookups["srvtarget"] = "1.2.3.4"
 
         test_d = self._make_get_request(b"matrix://testserv/foo/bar")
@@ -819,9 +827,9 @@ class MatrixFederationAgentTests(unittest.TestCase):
         self.assertEqual(host, "1.2.3.4")
         self.assertEqual(port, 443)
 
-        self.mock_resolver.resolve_service.side_effect = lambda _: [
-            Server(host=b"srvtarget", port=8443)
-        ]
+        self.mock_resolver.resolve_service.side_effect = generate_resolve_service(
+            [Server(host=b"srvtarget", port=8443)]
+        )
 
         self._handle_well_known_connection(
             client_factory,
@@ -861,7 +869,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
     def test_idna_servername(self):
         """test the behaviour when the server name has idna chars in"""
 
-        self.mock_resolver.resolve_service.side_effect = lambda _: []
+        self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
 
         # the resolver is always called with the IDNA hostname as a native string.
         self.reactor.lookups["xn--bcher-kva.com"] = "1.2.3.4"
@@ -922,9 +930,9 @@ class MatrixFederationAgentTests(unittest.TestCase):
     def test_idna_srv_target(self):
         """test the behaviour when the target of a SRV record has idna chars"""
 
-        self.mock_resolver.resolve_service.side_effect = lambda _: [
-            Server(host=b"xn--trget-3qa.com", port=8443)  # târget.com
-        ]
+        self.mock_resolver.resolve_service.side_effect = generate_resolve_service(
+            [Server(host=b"xn--trget-3qa.com", port=8443)]  # târget.com
+        )
         self.reactor.lookups["xn--trget-3qa.com"] = "1.2.3.4"
 
         test_d = self._make_get_request(b"matrix://xn--bcher-kva.com/foo/bar")
@@ -1087,11 +1095,12 @@ class MatrixFederationAgentTests(unittest.TestCase):
     def test_srv_fallbacks(self):
         """Test that other SRV results are tried if the first one fails.
         """
-
-        self.mock_resolver.resolve_service.side_effect = lambda _: [
-            Server(host=b"target.com", port=8443),
-            Server(host=b"target.com", port=8444),
-        ]
+        self.mock_resolver.resolve_service.side_effect = generate_resolve_service(
+            [
+                Server(host=b"target.com", port=8443),
+                Server(host=b"target.com", port=8444),
+            ]
+        )
         self.reactor.lookups["target.com"] = "1.2.3.4"
 
         test_d = self._make_get_request(b"matrix://testserv/foo/bar")
diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py
index babc201643..fee2985d35 100644
--- a/tests/http/federation/test_srv_resolver.py
+++ b/tests/http/federation/test_srv_resolver.py
@@ -22,7 +22,7 @@ from twisted.internet.error import ConnectError
 from twisted.names import dns, error
 
 from synapse.http.federation.srv_resolver import SrvResolver
-from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context
+from synapse.logging.context import LoggingContext, current_context
 
 from tests import unittest
 from tests.utils import MockClock
@@ -50,13 +50,7 @@ class SrvResolverTestCase(unittest.TestCase):
 
             with LoggingContext("one") as ctx:
                 resolve_d = resolver.resolve_service(service_name)
-
-                self.assertNoResult(resolve_d)
-
-                # should have reset to the sentinel context
-                self.assertIs(current_context(), SENTINEL_CONTEXT)
-
-                result = yield resolve_d
+                result = yield defer.ensureDeferred(resolve_d)
 
                 # should have restored our context
                 self.assertIs(current_context(), ctx)
@@ -91,7 +85,7 @@ class SrvResolverTestCase(unittest.TestCase):
         cache = {service_name: [entry]}
         resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
 
-        servers = yield resolver.resolve_service(service_name)
+        servers = yield defer.ensureDeferred(resolver.resolve_service(service_name))
 
         dns_client_mock.lookupService.assert_called_once_with(service_name)
 
@@ -117,7 +111,7 @@ class SrvResolverTestCase(unittest.TestCase):
             dns_client=dns_client_mock, cache=cache, get_time=clock.time
         )
 
-        servers = yield resolver.resolve_service(service_name)
+        servers = yield defer.ensureDeferred(resolver.resolve_service(service_name))
 
         self.assertFalse(dns_client_mock.lookupService.called)
 
@@ -136,7 +130,7 @@ class SrvResolverTestCase(unittest.TestCase):
         resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
 
         with self.assertRaises(error.DNSServerError):
-            yield resolver.resolve_service(service_name)
+            yield defer.ensureDeferred(resolver.resolve_service(service_name))
 
     @defer.inlineCallbacks
     def test_name_error(self):
@@ -149,7 +143,7 @@ class SrvResolverTestCase(unittest.TestCase):
         cache = {}
         resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
 
-        servers = yield resolver.resolve_service(service_name)
+        servers = yield defer.ensureDeferred(resolver.resolve_service(service_name))
 
         self.assertEquals(len(servers), 0)
         self.assertEquals(len(cache), 0)
@@ -166,8 +160,8 @@ class SrvResolverTestCase(unittest.TestCase):
         cache = {}
         resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
 
-        resolve_d = resolver.resolve_service(service_name)
-        self.assertNoResult(resolve_d)
+        # Old versions of Twisted don't have an ensureDeferred in failureResultOf.
+        resolve_d = defer.ensureDeferred(resolver.resolve_service(service_name))
 
         # returning a single "." should make the lookup fail with a ConenctError
         lookup_deferred.callback(
@@ -192,8 +186,8 @@ class SrvResolverTestCase(unittest.TestCase):
         cache = {}
         resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
 
-        resolve_d = resolver.resolve_service(service_name)
-        self.assertNoResult(resolve_d)
+        # Old versions of Twisted don't have an ensureDeferred in successResultOf.
+        resolve_d = defer.ensureDeferred(resolver.resolve_service(service_name))
 
         lookup_deferred.callback(
             (
diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py
index fff4f0cbf4..ac598249e4 100644
--- a/tests/http/test_fedclient.py
+++ b/tests/http/test_fedclient.py
@@ -58,7 +58,9 @@ class FederationClientTests(HomeserverTestCase):
         @defer.inlineCallbacks
         def do_request():
             with LoggingContext("one") as context:
-                fetch_d = self.cl.get_json("testserv:8008", "foo/bar")
+                fetch_d = defer.ensureDeferred(
+                    self.cl.get_json("testserv:8008", "foo/bar")
+                )
 
                 # Nothing happened yet
                 self.assertNoResult(fetch_d)
@@ -120,7 +122,9 @@ class FederationClientTests(HomeserverTestCase):
         """
         If the DNS lookup returns an error, it will bubble up.
         """
-        d = self.cl.get_json("testserv2:8008", "foo/bar", timeout=10000)
+        d = defer.ensureDeferred(
+            self.cl.get_json("testserv2:8008", "foo/bar", timeout=10000)
+        )
         self.pump()
 
         f = self.failureResultOf(d)
@@ -128,7 +132,9 @@ class FederationClientTests(HomeserverTestCase):
         self.assertIsInstance(f.value.inner_exception, DNSLookupError)
 
     def test_client_connection_refused(self):
-        d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
+        d = defer.ensureDeferred(
+            self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
+        )
 
         self.pump()
 
@@ -154,7 +160,9 @@ class FederationClientTests(HomeserverTestCase):
         If the HTTP request is not connected and is timed out, it'll give a
         ConnectingCancelledError or TimeoutError.
         """
-        d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
+        d = defer.ensureDeferred(
+            self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
+        )
 
         self.pump()
 
@@ -184,7 +192,9 @@ class FederationClientTests(HomeserverTestCase):
         If the HTTP request is connected, but gets no response before being
         timed out, it'll give a ResponseNeverReceived.
         """
-        d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
+        d = defer.ensureDeferred(
+            self.cl.get_json("testserv:8008", "foo/bar", timeout=10000)
+        )
 
         self.pump()
 
@@ -226,7 +236,7 @@ class FederationClientTests(HomeserverTestCase):
         # Try making a GET request to a blacklisted IPv4 address
         # ------------------------------------------------------
         # Make the request
-        d = cl.get_json("internal:8008", "foo/bar", timeout=10000)
+        d = defer.ensureDeferred(cl.get_json("internal:8008", "foo/bar", timeout=10000))
 
         # Nothing happened yet
         self.assertNoResult(d)
@@ -244,7 +254,9 @@ class FederationClientTests(HomeserverTestCase):
         # Try making a POST request to a blacklisted IPv6 address
         # -------------------------------------------------------
         # Make the request
-        d = cl.post_json("internalv6:8008", "foo/bar", timeout=10000)
+        d = defer.ensureDeferred(
+            cl.post_json("internalv6:8008", "foo/bar", timeout=10000)
+        )
 
         # Nothing has happened yet
         self.assertNoResult(d)
@@ -263,7 +275,7 @@ class FederationClientTests(HomeserverTestCase):
         # Try making a GET request to a non-blacklisted IPv4 address
         # ----------------------------------------------------------
         # Make the request
-        d = cl.post_json("fine:8008", "foo/bar", timeout=10000)
+        d = defer.ensureDeferred(cl.post_json("fine:8008", "foo/bar", timeout=10000))
 
         # Nothing has happened yet
         self.assertNoResult(d)
@@ -286,7 +298,7 @@ class FederationClientTests(HomeserverTestCase):
         request = MatrixFederationRequest(
             method="GET", destination="testserv:8008", path="foo/bar"
         )
-        d = self.cl._send_request(request, timeout=10000)
+        d = defer.ensureDeferred(self.cl._send_request(request, timeout=10000))
 
         self.pump()
 
@@ -310,7 +322,9 @@ class FederationClientTests(HomeserverTestCase):
         If the HTTP request is connected, but gets no response before being
         timed out, it'll give a ResponseNeverReceived.
         """
-        d = self.cl.post_json("testserv:8008", "foo/bar", timeout=10000)
+        d = defer.ensureDeferred(
+            self.cl.post_json("testserv:8008", "foo/bar", timeout=10000)
+        )
 
         self.pump()
 
@@ -342,7 +356,9 @@ class FederationClientTests(HomeserverTestCase):
         requiring a trailing slash. We need to retry the request with a
         trailing slash. Workaround for Synapse <= v0.99.3, explained in #3622.
         """
-        d = self.cl.get_json("testserv:8008", "foo/bar", try_trailing_slash_on_400=True)
+        d = defer.ensureDeferred(
+            self.cl.get_json("testserv:8008", "foo/bar", try_trailing_slash_on_400=True)
+        )
 
         # Send the request
         self.pump()
@@ -395,7 +411,9 @@ class FederationClientTests(HomeserverTestCase):
 
         See test_client_requires_trailing_slashes() for context.
         """
-        d = self.cl.get_json("testserv:8008", "foo/bar", try_trailing_slash_on_400=True)
+        d = defer.ensureDeferred(
+            self.cl.get_json("testserv:8008", "foo/bar", try_trailing_slash_on_400=True)
+        )
 
         # Send the request
         self.pump()
@@ -432,7 +450,11 @@ class FederationClientTests(HomeserverTestCase):
         self.failureResultOf(d)
 
     def test_client_sends_body(self):
-        self.cl.post_json("testserv:8008", "foo/bar", timeout=10000, data={"a": "b"})
+        defer.ensureDeferred(
+            self.cl.post_json(
+                "testserv:8008", "foo/bar", timeout=10000, data={"a": "b"}
+            )
+        )
 
         self.pump()
 
@@ -453,7 +475,7 @@ class FederationClientTests(HomeserverTestCase):
 
     def test_closes_connection(self):
         """Check that the client closes unused HTTP connections"""
-        d = self.cl.get_json("testserv:8008", "foo/bar")
+        d = defer.ensureDeferred(self.cl.get_json("testserv:8008", "foo/bar"))
 
         self.pump()
 
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index 1a88c7fb80..0b5204654c 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -366,7 +366,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
         state_handler = self.hs.get_state_handler()
         context = self.get_success(state_handler.compute_event_context(event))
 
-        self.master_store.add_push_actions_to_staging(
-            event.event_id, {user_id: actions for user_id, actions in push_actions}
+        self.get_success(
+            self.master_store.add_push_actions_to_staging(
+                event.event_id, {user_id: actions for user_id, actions in push_actions}
+            )
         )
         return event, context
diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py
index 097e1653b4..c9998e88e6 100644
--- a/tests/replication/tcp/streams/test_events.py
+++ b/tests/replication/tcp/streams/test_events.py
@@ -119,7 +119,9 @@ class EventsStreamTestCase(BaseStreamTestCase):
         OTHER_USER = "@other_user:localhost"
 
         # have the user join
-        inject_member_event(self.hs, self.room_id, OTHER_USER, Membership.JOIN)
+        self.get_success(
+            inject_member_event(self.hs, self.room_id, OTHER_USER, Membership.JOIN)
+        )
 
         # Update existing power levels with mod at PL50
         pls = self.helper.get_state(
@@ -157,14 +159,16 @@ class EventsStreamTestCase(BaseStreamTestCase):
         # roll back all the state by de-modding the user
         prev_events = fork_point
         pls["users"][OTHER_USER] = 0
-        pl_event = inject_event(
-            self.hs,
-            prev_event_ids=prev_events,
-            type=EventTypes.PowerLevels,
-            state_key="",
-            sender=self.user_id,
-            room_id=self.room_id,
-            content=pls,
+        pl_event = self.get_success(
+            inject_event(
+                self.hs,
+                prev_event_ids=prev_events,
+                type=EventTypes.PowerLevels,
+                state_key="",
+                sender=self.user_id,
+                room_id=self.room_id,
+                content=pls,
+            )
         )
 
         # one more bit of state that doesn't get rolled back
@@ -268,7 +272,9 @@ class EventsStreamTestCase(BaseStreamTestCase):
 
         # have the users join
         for u in user_ids:
-            inject_member_event(self.hs, self.room_id, u, Membership.JOIN)
+            self.get_success(
+                inject_member_event(self.hs, self.room_id, u, Membership.JOIN)
+            )
 
         # Update existing power levels with mod at PL50
         pls = self.helper.get_state(
@@ -306,14 +312,16 @@ class EventsStreamTestCase(BaseStreamTestCase):
         pl_events = []
         for u in user_ids:
             pls["users"][u] = 0
-            e = inject_event(
-                self.hs,
-                prev_event_ids=prev_events,
-                type=EventTypes.PowerLevels,
-                state_key="",
-                sender=self.user_id,
-                room_id=self.room_id,
-                content=pls,
+            e = self.get_success(
+                inject_event(
+                    self.hs,
+                    prev_event_ids=prev_events,
+                    type=EventTypes.PowerLevels,
+                    state_key="",
+                    sender=self.user_id,
+                    room_id=self.room_id,
+                    content=pls,
+                )
             )
             prev_events = [e.event_id]
             pl_events.append(e)
@@ -434,13 +442,15 @@ class EventsStreamTestCase(BaseStreamTestCase):
             body = "event %i" % (self.event_count,)
             self.event_count += 1
 
-        return inject_event(
-            self.hs,
-            room_id=self.room_id,
-            sender=sender,
-            type="test_event",
-            content={"body": body},
-            **kwargs
+        return self.get_success(
+            inject_event(
+                self.hs,
+                room_id=self.room_id,
+                sender=sender,
+                type="test_event",
+                content={"body": body},
+                **kwargs
+            )
         )
 
     def _inject_state_event(
@@ -459,11 +469,13 @@ class EventsStreamTestCase(BaseStreamTestCase):
         if body is None:
             body = "state event %s" % (state_key,)
 
-        return inject_event(
-            self.hs,
-            room_id=self.room_id,
-            sender=sender,
-            type="test_state_event",
-            state_key=state_key,
-            content={"body": body},
+        return self.get_success(
+            inject_event(
+                self.hs,
+                room_id=self.room_id,
+                sender=sender,
+                type="test_state_event",
+                state_key=state_key,
+                content={"body": body},
+            )
         )
diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py
index 8d4dbf232e..83f9aa291c 100644
--- a/tests/replication/test_federation_sender_shard.py
+++ b/tests/replication/test_federation_sender_shard.py
@@ -16,8 +16,6 @@ import logging
 
 from mock import Mock
 
-from twisted.internet import defer
-
 from synapse.api.constants import EventTypes, Membership
 from synapse.events.builder import EventBuilderFactory
 from synapse.rest.admin import register_servlets_for_client_rest_resource
@@ -25,6 +23,7 @@ from synapse.rest.client.v1 import login, room
 from synapse.types import UserID
 
 from tests.replication._base import BaseMultiWorkerStreamTestCase
+from tests.test_utils import make_awaitable
 
 logger = logging.getLogger(__name__)
 
@@ -46,7 +45,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
         new event.
         """
         mock_client = Mock(spec=["put_json"])
-        mock_client.put_json.side_effect = lambda *_, **__: defer.succeed({})
+        mock_client.put_json.side_effect = lambda *_, **__: make_awaitable({})
 
         self.make_worker_hs(
             "synapse.app.federation_sender",
@@ -74,7 +73,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
         new events.
         """
         mock_client1 = Mock(spec=["put_json"])
-        mock_client1.put_json.side_effect = lambda *_, **__: defer.succeed({})
+        mock_client1.put_json.side_effect = lambda *_, **__: make_awaitable({})
         self.make_worker_hs(
             "synapse.app.federation_sender",
             {
@@ -86,7 +85,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
         )
 
         mock_client2 = Mock(spec=["put_json"])
-        mock_client2.put_json.side_effect = lambda *_, **__: defer.succeed({})
+        mock_client2.put_json.side_effect = lambda *_, **__: make_awaitable({})
         self.make_worker_hs(
             "synapse.app.federation_sender",
             {
@@ -137,7 +136,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
         new typing EDUs.
         """
         mock_client1 = Mock(spec=["put_json"])
-        mock_client1.put_json.side_effect = lambda *_, **__: defer.succeed({})
+        mock_client1.put_json.side_effect = lambda *_, **__: make_awaitable({})
         self.make_worker_hs(
             "synapse.app.federation_sender",
             {
@@ -149,7 +148,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
         )
 
         mock_client2 = Mock(spec=["put_json"])
-        mock_client2.put_json.side_effect = lambda *_, **__: defer.succeed({})
+        mock_client2.put_json.side_effect = lambda *_, **__: make_awaitable({})
         self.make_worker_hs(
             "synapse.app.federation_sender",
             {
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index b1a4decced..0f1144fe1e 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -178,7 +178,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
 
         self.fetches = []
 
-        def get_file(destination, path, output_stream, args=None, max_size=None):
+        async def get_file(destination, path, output_stream, args=None, max_size=None):
             """
             Returns tuple[int,dict,str,int] of file length, response headers,
             absolute URI, and response code.
@@ -192,7 +192,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
             d = Deferred()
             d.addCallback(write_to)
             self.fetches.append((d, destination, path, args))
-            return make_deferred_yieldable(d)
+            return await make_deferred_yieldable(d)
 
         client = Mock()
         client.get_file = get_file
diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
index 946f06d151..cec1cf928f 100644
--- a/tests/rest/admin/test_room.py
+++ b/tests/rest/admin/test_room.py
@@ -1,1447 +1,1500 @@
-# -*- coding: utf-8 -*-

-# Copyright 2020 Dirk Klimpel

-#

-# Licensed under the Apache License, Version 2.0 (the "License");

-# you may not use this file except in compliance with the License.

-# You may obtain a copy of the License at

-#

-#     http://www.apache.org/licenses/LICENSE-2.0

-#

-# Unless required by applicable law or agreed to in writing, software

-# distributed under the License is distributed on an "AS IS" BASIS,

-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

-# See the License for the specific language governing permissions and

-# limitations under the License.

-

-import json

-import urllib.parse

-from typing import List, Optional

-

-from mock import Mock

-

-import synapse.rest.admin

-from synapse.api.errors import Codes

-from synapse.rest.client.v1 import directory, events, login, room

-

-from tests import unittest

-

-"""Tests admin REST events for /rooms paths."""

-

-

-class ShutdownRoomTestCase(unittest.HomeserverTestCase):

-    servlets = [

-        synapse.rest.admin.register_servlets_for_client_rest_resource,

-        login.register_servlets,

-        events.register_servlets,

-        room.register_servlets,

-        room.register_deprecated_servlets,

-    ]

-

-    def prepare(self, reactor, clock, hs):

-        self.event_creation_handler = hs.get_event_creation_handler()

-        hs.config.user_consent_version = "1"

-

-        consent_uri_builder = Mock()

-        consent_uri_builder.build_user_consent_uri.return_value = "http://example.com"

-        self.event_creation_handler._consent_uri_builder = consent_uri_builder

-

-        self.store = hs.get_datastore()

-

-        self.admin_user = self.register_user("admin", "pass", admin=True)

-        self.admin_user_tok = self.login("admin", "pass")

-

-        self.other_user = self.register_user("user", "pass")

-        self.other_user_token = self.login("user", "pass")

-

-        # Mark the admin user as having consented

-        self.get_success(self.store.user_set_consent_version(self.admin_user, "1"))

-

-    def test_shutdown_room_consent(self):

-        """Test that we can shutdown rooms with local users who have not

-        yet accepted the privacy policy. This used to fail when we tried to

-        force part the user from the old room.

-        """

-        self.event_creation_handler._block_events_without_consent_error = None

-

-        room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_token)

-

-        # Assert one user in room

-        users_in_room = self.get_success(self.store.get_users_in_room(room_id))

-        self.assertEqual([self.other_user], users_in_room)

-

-        # Enable require consent to send events

-        self.event_creation_handler._block_events_without_consent_error = "Error"

-

-        # Assert that the user is getting consent error

-        self.helper.send(

-            room_id, body="foo", tok=self.other_user_token, expect_code=403

-        )

-

-        # Test that the admin can still send shutdown

-        url = "admin/shutdown_room/" + room_id

-        request, channel = self.make_request(

-            "POST",

-            url.encode("ascii"),

-            json.dumps({"new_room_user_id": self.admin_user}),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-

-        # Assert there is now no longer anyone in the room

-        users_in_room = self.get_success(self.store.get_users_in_room(room_id))

-        self.assertEqual([], users_in_room)

-

-    def test_shutdown_room_block_peek(self):

-        """Test that a world_readable room can no longer be peeked into after

-        it has been shut down.

-        """

-

-        self.event_creation_handler._block_events_without_consent_error = None

-

-        room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_token)

-

-        # Enable world readable

-        url = "rooms/%s/state/m.room.history_visibility" % (room_id,)

-        request, channel = self.make_request(

-            "PUT",

-            url.encode("ascii"),

-            json.dumps({"history_visibility": "world_readable"}),

-            access_token=self.other_user_token,

-        )

-        self.render(request)

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-

-        # Test that the admin can still send shutdown

-        url = "admin/shutdown_room/" + room_id

-        request, channel = self.make_request(

-            "POST",

-            url.encode("ascii"),

-            json.dumps({"new_room_user_id": self.admin_user}),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-

-        # Assert we can no longer peek into the room

-        self._assert_peek(room_id, expect_code=403)

-

-    def _assert_peek(self, room_id, expect_code):

-        """Assert that the admin user can (or cannot) peek into the room.

-        """

-

-        url = "rooms/%s/initialSync" % (room_id,)

-        request, channel = self.make_request(

-            "GET", url.encode("ascii"), access_token=self.admin_user_tok

-        )

-        self.render(request)

-        self.assertEqual(

-            expect_code, int(channel.result["code"]), msg=channel.result["body"]

-        )

-

-        url = "events?timeout=0&room_id=" + room_id

-        request, channel = self.make_request(

-            "GET", url.encode("ascii"), access_token=self.admin_user_tok

-        )

-        self.render(request)

-        self.assertEqual(

-            expect_code, int(channel.result["code"]), msg=channel.result["body"]

-        )

-

-

-class DeleteRoomTestCase(unittest.HomeserverTestCase):

-    servlets = [

-        synapse.rest.admin.register_servlets,

-        login.register_servlets,

-        events.register_servlets,

-        room.register_servlets,

-        room.register_deprecated_servlets,

-    ]

-

-    def prepare(self, reactor, clock, hs):

-        self.event_creation_handler = hs.get_event_creation_handler()

-        hs.config.user_consent_version = "1"

-

-        consent_uri_builder = Mock()

-        consent_uri_builder.build_user_consent_uri.return_value = "http://example.com"

-        self.event_creation_handler._consent_uri_builder = consent_uri_builder

-

-        self.store = hs.get_datastore()

-

-        self.admin_user = self.register_user("admin", "pass", admin=True)

-        self.admin_user_tok = self.login("admin", "pass")

-

-        self.other_user = self.register_user("user", "pass")

-        self.other_user_tok = self.login("user", "pass")

-

-        # Mark the admin user as having consented

-        self.get_success(self.store.user_set_consent_version(self.admin_user, "1"))

-

-        self.room_id = self.helper.create_room_as(

-            self.other_user, tok=self.other_user_tok

-        )

-        self.url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id

-

-    def test_requester_is_no_admin(self):

-        """

-        If the user is not a server admin, an error 403 is returned.

-        """

-

-        request, channel = self.make_request(

-            "POST", self.url, json.dumps({}), access_token=self.other_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])

-

-    def test_room_does_not_exist(self):

-        """

-        Check that unknown rooms/server return error 404.

-        """

-        url = "/_synapse/admin/v1/rooms/!unknown:test/delete"

-

-        request, channel = self.make_request(

-            "POST", url, json.dumps({}), access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])

-

-    def test_room_is_not_valid(self):

-        """

-        Check that invalid room names, return an error 400.

-        """

-        url = "/_synapse/admin/v1/rooms/invalidroom/delete"

-

-        request, channel = self.make_request(

-            "POST", url, json.dumps({}), access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(

-            "invalidroom is not a legal room ID", channel.json_body["error"],

-        )

-

-    def test_new_room_user_does_not_exist(self):

-        """

-        Tests that the user ID must be from local server but it does not have to exist.

-        """

-        body = json.dumps({"new_room_user_id": "@unknown:test"})

-

-        request, channel = self.make_request(

-            "POST",

-            self.url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertIn("new_room_id", channel.json_body)

-        self.assertIn("kicked_users", channel.json_body)

-        self.assertIn("failed_to_kick_users", channel.json_body)

-        self.assertIn("local_aliases", channel.json_body)

-

-    def test_new_room_user_is_not_local(self):

-        """

-        Check that only local users can create new room to move members.

-        """

-        body = json.dumps({"new_room_user_id": "@not:exist.bla"})

-

-        request, channel = self.make_request(

-            "POST",

-            self.url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(

-            "User must be our own: @not:exist.bla", channel.json_body["error"],

-        )

-

-    def test_block_is_not_bool(self):

-        """

-        If parameter `block` is not boolean, return an error

-        """

-        body = json.dumps({"block": "NotBool"})

-

-        request, channel = self.make_request(

-            "POST",

-            self.url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])

-

-    def test_purge_room_and_block(self):

-        """Test to purge a room and block it.

-        Members will not be moved to a new room and will not receive a message.

-        """

-        # Test that room is not purged

-        with self.assertRaises(AssertionError):

-            self._is_purged(self.room_id)

-

-        # Test that room is not blocked

-        self._is_blocked(self.room_id, expect=False)

-

-        # Assert one user in room

-        self._is_member(room_id=self.room_id, user_id=self.other_user)

-

-        body = json.dumps({"block": True})

-

-        request, channel = self.make_request(

-            "POST",

-            self.url.encode("ascii"),

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(None, channel.json_body["new_room_id"])

-        self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])

-        self.assertIn("failed_to_kick_users", channel.json_body)

-        self.assertIn("local_aliases", channel.json_body)

-

-        self._is_purged(self.room_id)

-        self._is_blocked(self.room_id, expect=True)

-        self._has_no_members(self.room_id)

-

-    def test_purge_room_and_not_block(self):

-        """Test to purge a room and do not block it.

-        Members will not be moved to a new room and will not receive a message.

-        """

-        # Test that room is not purged

-        with self.assertRaises(AssertionError):

-            self._is_purged(self.room_id)

-

-        # Test that room is not blocked

-        self._is_blocked(self.room_id, expect=False)

-

-        # Assert one user in room

-        self._is_member(room_id=self.room_id, user_id=self.other_user)

-

-        body = json.dumps({"block": False})

-

-        request, channel = self.make_request(

-            "POST",

-            self.url.encode("ascii"),

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(None, channel.json_body["new_room_id"])

-        self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])

-        self.assertIn("failed_to_kick_users", channel.json_body)

-        self.assertIn("local_aliases", channel.json_body)

-

-        self._is_purged(self.room_id)

-        self._is_blocked(self.room_id, expect=False)

-        self._has_no_members(self.room_id)

-

-    def test_shutdown_room_consent(self):

-        """Test that we can shutdown rooms with local users who have not

-        yet accepted the privacy policy. This used to fail when we tried to

-        force part the user from the old room.

-        Members will be moved to a new room and will receive a message.

-        """

-        self.event_creation_handler._block_events_without_consent_error = None

-

-        # Assert one user in room

-        users_in_room = self.get_success(self.store.get_users_in_room(self.room_id))

-        self.assertEqual([self.other_user], users_in_room)

-

-        # Enable require consent to send events

-        self.event_creation_handler._block_events_without_consent_error = "Error"

-

-        # Assert that the user is getting consent error

-        self.helper.send(

-            self.room_id, body="foo", tok=self.other_user_tok, expect_code=403

-        )

-

-        # Test that room is not purged

-        with self.assertRaises(AssertionError):

-            self._is_purged(self.room_id)

-

-        # Assert one user in room

-        self._is_member(room_id=self.room_id, user_id=self.other_user)

-

-        # Test that the admin can still send shutdown

-        url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id

-        request, channel = self.make_request(

-            "POST",

-            url.encode("ascii"),

-            json.dumps({"new_room_user_id": self.admin_user}),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])

-        self.assertIn("new_room_id", channel.json_body)

-        self.assertIn("failed_to_kick_users", channel.json_body)

-        self.assertIn("local_aliases", channel.json_body)

-

-        # Test that member has moved to new room

-        self._is_member(

-            room_id=channel.json_body["new_room_id"], user_id=self.other_user

-        )

-

-        self._is_purged(self.room_id)

-        self._has_no_members(self.room_id)

-

-    def test_shutdown_room_block_peek(self):

-        """Test that a world_readable room can no longer be peeked into after

-        it has been shut down.

-        Members will be moved to a new room and will receive a message.

-        """

-        self.event_creation_handler._block_events_without_consent_error = None

-

-        # Enable world readable

-        url = "rooms/%s/state/m.room.history_visibility" % (self.room_id,)

-        request, channel = self.make_request(

-            "PUT",

-            url.encode("ascii"),

-            json.dumps({"history_visibility": "world_readable"}),

-            access_token=self.other_user_tok,

-        )

-        self.render(request)

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-

-        # Test that room is not purged

-        with self.assertRaises(AssertionError):

-            self._is_purged(self.room_id)

-

-        # Assert one user in room

-        self._is_member(room_id=self.room_id, user_id=self.other_user)

-

-        # Test that the admin can still send shutdown

-        url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id

-        request, channel = self.make_request(

-            "POST",

-            url.encode("ascii"),

-            json.dumps({"new_room_user_id": self.admin_user}),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])

-        self.assertIn("new_room_id", channel.json_body)

-        self.assertIn("failed_to_kick_users", channel.json_body)

-        self.assertIn("local_aliases", channel.json_body)

-

-        # Test that member has moved to new room

-        self._is_member(

-            room_id=channel.json_body["new_room_id"], user_id=self.other_user

-        )

-

-        self._is_purged(self.room_id)

-        self._has_no_members(self.room_id)

-

-        # Assert we can no longer peek into the room

-        self._assert_peek(self.room_id, expect_code=403)

-

-    def _is_blocked(self, room_id, expect=True):

-        """Assert that the room is blocked or not

-        """

-        d = self.store.is_room_blocked(room_id)

-        if expect:

-            self.assertTrue(self.get_success(d))

-        else:

-            self.assertIsNone(self.get_success(d))

-

-    def _has_no_members(self, room_id):

-        """Assert there is now no longer anyone in the room

-        """

-        users_in_room = self.get_success(self.store.get_users_in_room(room_id))

-        self.assertEqual([], users_in_room)

-

-    def _is_member(self, room_id, user_id):

-        """Test that user is member of the room

-        """

-        users_in_room = self.get_success(self.store.get_users_in_room(room_id))

-        self.assertIn(user_id, users_in_room)

-

-    def _is_purged(self, room_id):

-        """Test that the following tables have been purged of all rows related to the room.

-        """

-        for table in (

-            "current_state_events",

-            "event_backward_extremities",

-            "event_forward_extremities",

-            "event_json",

-            "event_push_actions",

-            "event_search",

-            "events",

-            "group_rooms",

-            "public_room_list_stream",

-            "receipts_graph",

-            "receipts_linearized",

-            "room_aliases",

-            "room_depth",

-            "room_memberships",

-            "room_stats_state",

-            "room_stats_current",

-            "room_stats_historical",

-            "room_stats_earliest_token",

-            "rooms",

-            "stream_ordering_to_exterm",

-            "users_in_public_rooms",

-            "users_who_share_private_rooms",

-            "appservice_room_list",

-            "e2e_room_keys",

-            "event_push_summary",

-            "pusher_throttle",

-            "group_summary_rooms",

-            "local_invites",

-            "room_account_data",

-            "room_tags",

-            # "state_groups",  # Current impl leaves orphaned state groups around.

-            "state_groups_state",

-        ):

-            count = self.get_success(

-                self.store.db.simple_select_one_onecol(

-                    table=table,

-                    keyvalues={"room_id": room_id},

-                    retcol="COUNT(*)",

-                    desc="test_purge_room",

-                )

-            )

-

-            self.assertEqual(count, 0, msg="Rows not purged in {}".format(table))

-

-    def _assert_peek(self, room_id, expect_code):

-        """Assert that the admin user can (or cannot) peek into the room.

-        """

-

-        url = "rooms/%s/initialSync" % (room_id,)

-        request, channel = self.make_request(

-            "GET", url.encode("ascii"), access_token=self.admin_user_tok

-        )

-        self.render(request)

-        self.assertEqual(

-            expect_code, int(channel.result["code"]), msg=channel.result["body"]

-        )

-

-        url = "events?timeout=0&room_id=" + room_id

-        request, channel = self.make_request(

-            "GET", url.encode("ascii"), access_token=self.admin_user_tok

-        )

-        self.render(request)

-        self.assertEqual(

-            expect_code, int(channel.result["code"]), msg=channel.result["body"]

-        )

-

-

-class PurgeRoomTestCase(unittest.HomeserverTestCase):

-    """Test /purge_room admin API.

-    """

-

-    servlets = [

-        synapse.rest.admin.register_servlets,

-        login.register_servlets,

-        room.register_servlets,

-    ]

-

-    def prepare(self, reactor, clock, hs):

-        self.store = hs.get_datastore()

-

-        self.admin_user = self.register_user("admin", "pass", admin=True)

-        self.admin_user_tok = self.login("admin", "pass")

-

-    def test_purge_room(self):

-        room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)

-

-        # All users have to have left the room.

-        self.helper.leave(room_id, user=self.admin_user, tok=self.admin_user_tok)

-

-        url = "/_synapse/admin/v1/purge_room"

-        request, channel = self.make_request(

-            "POST",

-            url.encode("ascii"),

-            {"room_id": room_id},

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-

-        # Test that the following tables have been purged of all rows related to the room.

-        for table in (

-            "current_state_events",

-            "event_backward_extremities",

-            "event_forward_extremities",

-            "event_json",

-            "event_push_actions",

-            "event_search",

-            "events",

-            "group_rooms",

-            "public_room_list_stream",

-            "receipts_graph",

-            "receipts_linearized",

-            "room_aliases",

-            "room_depth",

-            "room_memberships",

-            "room_stats_state",

-            "room_stats_current",

-            "room_stats_historical",

-            "room_stats_earliest_token",

-            "rooms",

-            "stream_ordering_to_exterm",

-            "users_in_public_rooms",

-            "users_who_share_private_rooms",

-            "appservice_room_list",

-            "e2e_room_keys",

-            "event_push_summary",

-            "pusher_throttle",

-            "group_summary_rooms",

-            "room_account_data",

-            "room_tags",

-            # "state_groups",  # Current impl leaves orphaned state groups around.

-            "state_groups_state",

-        ):

-            count = self.get_success(

-                self.store.db.simple_select_one_onecol(

-                    table=table,

-                    keyvalues={"room_id": room_id},

-                    retcol="COUNT(*)",

-                    desc="test_purge_room",

-                )

-            )

-

-            self.assertEqual(count, 0, msg="Rows not purged in {}".format(table))

-

-

-class RoomTestCase(unittest.HomeserverTestCase):

-    """Test /room admin API.

-    """

-

-    servlets = [

-        synapse.rest.admin.register_servlets,

-        login.register_servlets,

-        room.register_servlets,

-        directory.register_servlets,

-    ]

-

-    def prepare(self, reactor, clock, hs):

-        self.store = hs.get_datastore()

-

-        # Create user

-        self.admin_user = self.register_user("admin", "pass", admin=True)

-        self.admin_user_tok = self.login("admin", "pass")

-

-    def test_list_rooms(self):

-        """Test that we can list rooms"""

-        # Create 3 test rooms

-        total_rooms = 3

-        room_ids = []

-        for x in range(total_rooms):

-            room_id = self.helper.create_room_as(

-                self.admin_user, tok=self.admin_user_tok

-            )

-            room_ids.append(room_id)

-

-        # Request the list of rooms

-        url = "/_synapse/admin/v1/rooms"

-        request, channel = self.make_request(

-            "GET", url.encode("ascii"), access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        # Check request completed successfully

-        self.assertEqual(200, int(channel.code), msg=channel.json_body)

-

-        # Check that response json body contains a "rooms" key

-        self.assertTrue(

-            "rooms" in channel.json_body,

-            msg="Response body does not " "contain a 'rooms' key",

-        )

-

-        # Check that 3 rooms were returned

-        self.assertEqual(3, len(channel.json_body["rooms"]), msg=channel.json_body)

-

-        # Check their room_ids match

-        returned_room_ids = [room["room_id"] for room in channel.json_body["rooms"]]

-        self.assertEqual(room_ids, returned_room_ids)

-

-        # Check that all fields are available

-        for r in channel.json_body["rooms"]:

-            self.assertIn("name", r)

-            self.assertIn("canonical_alias", r)

-            self.assertIn("joined_members", r)

-            self.assertIn("joined_local_members", r)

-            self.assertIn("version", r)

-            self.assertIn("creator", r)

-            self.assertIn("encryption", r)

-            self.assertIn("federatable", r)

-            self.assertIn("public", r)

-            self.assertIn("join_rules", r)

-            self.assertIn("guest_access", r)

-            self.assertIn("history_visibility", r)

-            self.assertIn("state_events", r)

-

-        # Check that the correct number of total rooms was returned

-        self.assertEqual(channel.json_body["total_rooms"], total_rooms)

-

-        # Check that the offset is correct

-        # Should be 0 as we aren't paginating

-        self.assertEqual(channel.json_body["offset"], 0)

-

-        # Check that the prev_batch parameter is not present

-        self.assertNotIn("prev_batch", channel.json_body)

-

-        # We shouldn't receive a next token here as there's no further rooms to show

-        self.assertNotIn("next_batch", channel.json_body)

-

-    def test_list_rooms_pagination(self):

-        """Test that we can get a full list of rooms through pagination"""

-        # Create 5 test rooms

-        total_rooms = 5

-        room_ids = []

-        for x in range(total_rooms):

-            room_id = self.helper.create_room_as(

-                self.admin_user, tok=self.admin_user_tok

-            )

-            room_ids.append(room_id)

-

-        # Set the name of the rooms so we get a consistent returned ordering

-        for idx, room_id in enumerate(room_ids):

-            self.helper.send_state(

-                room_id, "m.room.name", {"name": str(idx)}, tok=self.admin_user_tok,

-            )

-

-        # Request the list of rooms

-        returned_room_ids = []

-        start = 0

-        limit = 2

-

-        run_count = 0

-        should_repeat = True

-        while should_repeat:

-            run_count += 1

-

-            url = "/_synapse/admin/v1/rooms?from=%d&limit=%d&order_by=%s" % (

-                start,

-                limit,

-                "name",

-            )

-            request, channel = self.make_request(

-                "GET", url.encode("ascii"), access_token=self.admin_user_tok,

-            )

-            self.render(request)

-            self.assertEqual(

-                200, int(channel.result["code"]), msg=channel.result["body"]

-            )

-

-            self.assertTrue("rooms" in channel.json_body)

-            for r in channel.json_body["rooms"]:

-                returned_room_ids.append(r["room_id"])

-

-            # Check that the correct number of total rooms was returned

-            self.assertEqual(channel.json_body["total_rooms"], total_rooms)

-

-            # Check that the offset is correct

-            # We're only getting 2 rooms each page, so should be 2 * last run_count

-            self.assertEqual(channel.json_body["offset"], 2 * (run_count - 1))

-

-            if run_count > 1:

-                # Check the value of prev_batch is correct

-                self.assertEqual(channel.json_body["prev_batch"], 2 * (run_count - 2))

-

-            if "next_batch" not in channel.json_body:

-                # We have reached the end of the list

-                should_repeat = False

-            else:

-                # Make another query with an updated start value

-                start = channel.json_body["next_batch"]

-

-        # We should've queried the endpoint 3 times

-        self.assertEqual(

-            run_count,

-            3,

-            msg="Should've queried 3 times for 5 rooms with limit 2 per query",

-        )

-

-        # Check that we received all of the room ids

-        self.assertEqual(room_ids, returned_room_ids)

-

-        url = "/_synapse/admin/v1/rooms?from=%d&limit=%d" % (start, limit)

-        request, channel = self.make_request(

-            "GET", url.encode("ascii"), access_token=self.admin_user_tok,

-        )

-        self.render(request)

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-

-    def test_correct_room_attributes(self):

-        """Test the correct attributes for a room are returned"""

-        # Create a test room

-        room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)

-

-        test_alias = "#test:test"

-        test_room_name = "something"

-

-        # Have another user join the room

-        user_2 = self.register_user("user4", "pass")

-        user_tok_2 = self.login("user4", "pass")

-        self.helper.join(room_id, user_2, tok=user_tok_2)

-

-        # Create a new alias to this room

-        url = "/_matrix/client/r0/directory/room/%s" % (urllib.parse.quote(test_alias),)

-        request, channel = self.make_request(

-            "PUT",

-            url.encode("ascii"),

-            {"room_id": room_id},

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-

-        # Set this new alias as the canonical alias for this room

-        self.helper.send_state(

-            room_id,

-            "m.room.aliases",

-            {"aliases": [test_alias]},

-            tok=self.admin_user_tok,

-            state_key="test",

-        )

-        self.helper.send_state(

-            room_id,

-            "m.room.canonical_alias",

-            {"alias": test_alias},

-            tok=self.admin_user_tok,

-        )

-

-        # Set a name for the room

-        self.helper.send_state(

-            room_id, "m.room.name", {"name": test_room_name}, tok=self.admin_user_tok,

-        )

-

-        # Request the list of rooms

-        url = "/_synapse/admin/v1/rooms"

-        request, channel = self.make_request(

-            "GET", url.encode("ascii"), access_token=self.admin_user_tok,

-        )

-        self.render(request)

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-

-        # Check that rooms were returned

-        self.assertTrue("rooms" in channel.json_body)

-        rooms = channel.json_body["rooms"]

-

-        # Check that only one room was returned

-        self.assertEqual(len(rooms), 1)

-

-        # And that the value of the total_rooms key was correct

-        self.assertEqual(channel.json_body["total_rooms"], 1)

-

-        # Check that the offset is correct

-        # We're not paginating, so should be 0

-        self.assertEqual(channel.json_body["offset"], 0)

-

-        # Check that there is no `prev_batch`

-        self.assertNotIn("prev_batch", channel.json_body)

-

-        # Check that there is no `next_batch`

-        self.assertNotIn("next_batch", channel.json_body)

-

-        # Check that all provided attributes are set

-        r = rooms[0]

-        self.assertEqual(room_id, r["room_id"])

-        self.assertEqual(test_room_name, r["name"])

-        self.assertEqual(test_alias, r["canonical_alias"])

-

-    def test_room_list_sort_order(self):

-        """Test room list sort ordering. alphabetical name versus number of members,

-        reversing the order, etc.

-        """

-

-        def _set_canonical_alias(room_id: str, test_alias: str, admin_user_tok: str):

-            # Create a new alias to this room

-            url = "/_matrix/client/r0/directory/room/%s" % (

-                urllib.parse.quote(test_alias),

-            )

-            request, channel = self.make_request(

-                "PUT",

-                url.encode("ascii"),

-                {"room_id": room_id},

-                access_token=admin_user_tok,

-            )

-            self.render(request)

-            self.assertEqual(

-                200, int(channel.result["code"]), msg=channel.result["body"]

-            )

-

-            # Set this new alias as the canonical alias for this room

-            self.helper.send_state(

-                room_id,

-                "m.room.aliases",

-                {"aliases": [test_alias]},

-                tok=admin_user_tok,

-                state_key="test",

-            )

-            self.helper.send_state(

-                room_id,

-                "m.room.canonical_alias",

-                {"alias": test_alias},

-                tok=admin_user_tok,

-            )

-

-        def _order_test(

-            order_type: str, expected_room_list: List[str], reverse: bool = False,

-        ):

-            """Request the list of rooms in a certain order. Assert that order is what

-            we expect

-

-            Args:

-                order_type: The type of ordering to give the server

-                expected_room_list: The list of room_ids in the order we expect to get

-                    back from the server

-            """

-            # Request the list of rooms in the given order

-            url = "/_synapse/admin/v1/rooms?order_by=%s" % (order_type,)

-            if reverse:

-                url += "&dir=b"

-            request, channel = self.make_request(

-                "GET", url.encode("ascii"), access_token=self.admin_user_tok,

-            )

-            self.render(request)

-            self.assertEqual(200, channel.code, msg=channel.json_body)

-

-            # Check that rooms were returned

-            self.assertTrue("rooms" in channel.json_body)

-            rooms = channel.json_body["rooms"]

-

-            # Check for the correct total_rooms value

-            self.assertEqual(channel.json_body["total_rooms"], 3)

-

-            # Check that the offset is correct

-            # We're not paginating, so should be 0

-            self.assertEqual(channel.json_body["offset"], 0)

-

-            # Check that there is no `prev_batch`

-            self.assertNotIn("prev_batch", channel.json_body)

-

-            # Check that there is no `next_batch`

-            self.assertNotIn("next_batch", channel.json_body)

-

-            # Check that rooms were returned in alphabetical order

-            returned_order = [r["room_id"] for r in rooms]

-            self.assertListEqual(expected_room_list, returned_order)  # order is checked

-

-        # Create 3 test rooms

-        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)

-        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)

-        room_id_3 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)

-

-        # Set room names in alphabetical order. room 1 -> A, 2 -> B, 3 -> C

-        self.helper.send_state(

-            room_id_1, "m.room.name", {"name": "A"}, tok=self.admin_user_tok,

-        )

-        self.helper.send_state(

-            room_id_2, "m.room.name", {"name": "B"}, tok=self.admin_user_tok,

-        )

-        self.helper.send_state(

-            room_id_3, "m.room.name", {"name": "C"}, tok=self.admin_user_tok,

-        )

-

-        # Set room canonical room aliases

-        _set_canonical_alias(room_id_1, "#A_alias:test", self.admin_user_tok)

-        _set_canonical_alias(room_id_2, "#B_alias:test", self.admin_user_tok)

-        _set_canonical_alias(room_id_3, "#C_alias:test", self.admin_user_tok)

-

-        # Set room member size in the reverse order. room 1 -> 1 member, 2 -> 2, 3 -> 3

-        user_1 = self.register_user("bob1", "pass")

-        user_1_tok = self.login("bob1", "pass")

-        self.helper.join(room_id_2, user_1, tok=user_1_tok)

-

-        user_2 = self.register_user("bob2", "pass")

-        user_2_tok = self.login("bob2", "pass")

-        self.helper.join(room_id_3, user_2, tok=user_2_tok)

-

-        user_3 = self.register_user("bob3", "pass")

-        user_3_tok = self.login("bob3", "pass")

-        self.helper.join(room_id_3, user_3, tok=user_3_tok)

-

-        # Test different sort orders, with forward and reverse directions

-        _order_test("name", [room_id_1, room_id_2, room_id_3])

-        _order_test("name", [room_id_3, room_id_2, room_id_1], reverse=True)

-

-        _order_test("canonical_alias", [room_id_1, room_id_2, room_id_3])

-        _order_test("canonical_alias", [room_id_3, room_id_2, room_id_1], reverse=True)

-

-        _order_test("joined_members", [room_id_3, room_id_2, room_id_1])

-        _order_test("joined_members", [room_id_1, room_id_2, room_id_3], reverse=True)

-

-        _order_test("joined_local_members", [room_id_3, room_id_2, room_id_1])

-        _order_test(

-            "joined_local_members", [room_id_1, room_id_2, room_id_3], reverse=True

-        )

-

-        _order_test("version", [room_id_1, room_id_2, room_id_3])

-        _order_test("version", [room_id_1, room_id_2, room_id_3], reverse=True)

-

-        _order_test("creator", [room_id_1, room_id_2, room_id_3])

-        _order_test("creator", [room_id_1, room_id_2, room_id_3], reverse=True)

-

-        _order_test("encryption", [room_id_1, room_id_2, room_id_3])

-        _order_test("encryption", [room_id_1, room_id_2, room_id_3], reverse=True)

-

-        _order_test("federatable", [room_id_1, room_id_2, room_id_3])

-        _order_test("federatable", [room_id_1, room_id_2, room_id_3], reverse=True)

-

-        _order_test("public", [room_id_1, room_id_2, room_id_3])

-        # Different sort order of SQlite and PostreSQL

-        # _order_test("public", [room_id_3, room_id_2, room_id_1], reverse=True)

-

-        _order_test("join_rules", [room_id_1, room_id_2, room_id_3])

-        _order_test("join_rules", [room_id_1, room_id_2, room_id_3], reverse=True)

-

-        _order_test("guest_access", [room_id_1, room_id_2, room_id_3])

-        _order_test("guest_access", [room_id_1, room_id_2, room_id_3], reverse=True)

-

-        _order_test("history_visibility", [room_id_1, room_id_2, room_id_3])

-        _order_test(

-            "history_visibility", [room_id_1, room_id_2, room_id_3], reverse=True

-        )

-

-        _order_test("state_events", [room_id_3, room_id_2, room_id_1])

-        _order_test("state_events", [room_id_1, room_id_2, room_id_3], reverse=True)

-

-    def test_search_term(self):

-        """Test that searching for a room works correctly"""

-        # Create two test rooms

-        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)

-        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)

-

-        room_name_1 = "something"

-        room_name_2 = "else"

-

-        # Set the name for each room

-        self.helper.send_state(

-            room_id_1, "m.room.name", {"name": room_name_1}, tok=self.admin_user_tok,

-        )

-        self.helper.send_state(

-            room_id_2, "m.room.name", {"name": room_name_2}, tok=self.admin_user_tok,

-        )

-

-        def _search_test(

-            expected_room_id: Optional[str],

-            search_term: str,

-            expected_http_code: int = 200,

-        ):

-            """Search for a room and check that the returned room's id is a match

-

-            Args:

-                expected_room_id: The room_id expected to be returned by the API. Set

-                    to None to expect zero results for the search

-                search_term: The term to search for room names with

-                expected_http_code: The expected http code for the request

-            """

-            url = "/_synapse/admin/v1/rooms?search_term=%s" % (search_term,)

-            request, channel = self.make_request(

-                "GET", url.encode("ascii"), access_token=self.admin_user_tok,

-            )

-            self.render(request)

-            self.assertEqual(expected_http_code, channel.code, msg=channel.json_body)

-

-            if expected_http_code != 200:

-                return

-

-            # Check that rooms were returned

-            self.assertTrue("rooms" in channel.json_body)

-            rooms = channel.json_body["rooms"]

-

-            # Check that the expected number of rooms were returned

-            expected_room_count = 1 if expected_room_id else 0

-            self.assertEqual(len(rooms), expected_room_count)

-            self.assertEqual(channel.json_body["total_rooms"], expected_room_count)

-

-            # Check that the offset is correct

-            # We're not paginating, so should be 0

-            self.assertEqual(channel.json_body["offset"], 0)

-

-            # Check that there is no `prev_batch`

-            self.assertNotIn("prev_batch", channel.json_body)

-

-            # Check that there is no `next_batch`

-            self.assertNotIn("next_batch", channel.json_body)

-

-            if expected_room_id:

-                # Check that the first returned room id is correct

-                r = rooms[0]

-                self.assertEqual(expected_room_id, r["room_id"])

-

-        # Perform search tests

-        _search_test(room_id_1, "something")

-        _search_test(room_id_1, "thing")

-

-        _search_test(room_id_2, "else")

-        _search_test(room_id_2, "se")

-

-        _search_test(None, "foo")

-        _search_test(None, "bar")

-        _search_test(None, "", expected_http_code=400)

-

-    def test_single_room(self):

-        """Test that a single room can be requested correctly"""

-        # Create two test rooms

-        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)

-        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)

-

-        room_name_1 = "something"

-        room_name_2 = "else"

-

-        # Set the name for each room

-        self.helper.send_state(

-            room_id_1, "m.room.name", {"name": room_name_1}, tok=self.admin_user_tok,

-        )

-        self.helper.send_state(

-            room_id_2, "m.room.name", {"name": room_name_2}, tok=self.admin_user_tok,

-        )

-

-        url = "/_synapse/admin/v1/rooms/%s" % (room_id_1,)

-        request, channel = self.make_request(

-            "GET", url.encode("ascii"), access_token=self.admin_user_tok,

-        )

-        self.render(request)

-        self.assertEqual(200, channel.code, msg=channel.json_body)

-

-        self.assertIn("room_id", channel.json_body)

-        self.assertIn("name", channel.json_body)

-        self.assertIn("canonical_alias", channel.json_body)

-        self.assertIn("joined_members", channel.json_body)

-        self.assertIn("joined_local_members", channel.json_body)

-        self.assertIn("version", channel.json_body)

-        self.assertIn("creator", channel.json_body)

-        self.assertIn("encryption", channel.json_body)

-        self.assertIn("federatable", channel.json_body)

-        self.assertIn("public", channel.json_body)

-        self.assertIn("join_rules", channel.json_body)

-        self.assertIn("guest_access", channel.json_body)

-        self.assertIn("history_visibility", channel.json_body)

-        self.assertIn("state_events", channel.json_body)

-

-        self.assertEqual(room_id_1, channel.json_body["room_id"])

-

-    def test_room_members(self):

-        """Test that room members can be requested correctly"""

-        # Create two test rooms

-        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)

-        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)

-

-        # Have another user join the room

-        user_1 = self.register_user("foo", "pass")

-        user_tok_1 = self.login("foo", "pass")

-        self.helper.join(room_id_1, user_1, tok=user_tok_1)

-

-        # Have another user join the room

-        user_2 = self.register_user("bar", "pass")

-        user_tok_2 = self.login("bar", "pass")

-        self.helper.join(room_id_1, user_2, tok=user_tok_2)

-        self.helper.join(room_id_2, user_2, tok=user_tok_2)

-

-        # Have another user join the room

-        user_3 = self.register_user("foobar", "pass")

-        user_tok_3 = self.login("foobar", "pass")

-        self.helper.join(room_id_2, user_3, tok=user_tok_3)

-

-        url = "/_synapse/admin/v1/rooms/%s/members" % (room_id_1,)

-        request, channel = self.make_request(

-            "GET", url.encode("ascii"), access_token=self.admin_user_tok,

-        )

-        self.render(request)

-        self.assertEqual(200, channel.code, msg=channel.json_body)

-

-        self.assertCountEqual(

-            ["@admin:test", "@foo:test", "@bar:test"], channel.json_body["members"]

-        )

-        self.assertEqual(channel.json_body["total"], 3)

-

-        url = "/_synapse/admin/v1/rooms/%s/members" % (room_id_2,)

-        request, channel = self.make_request(

-            "GET", url.encode("ascii"), access_token=self.admin_user_tok,

-        )

-        self.render(request)

-        self.assertEqual(200, channel.code, msg=channel.json_body)

-

-        self.assertCountEqual(

-            ["@admin:test", "@bar:test", "@foobar:test"], channel.json_body["members"]

-        )

-        self.assertEqual(channel.json_body["total"], 3)

-

-

-class JoinAliasRoomTestCase(unittest.HomeserverTestCase):

-

-    servlets = [

-        synapse.rest.admin.register_servlets,

-        room.register_servlets,

-        login.register_servlets,

-    ]

-

-    def prepare(self, reactor, clock, homeserver):

-        self.admin_user = self.register_user("admin", "pass", admin=True)

-        self.admin_user_tok = self.login("admin", "pass")

-

-        self.creator = self.register_user("creator", "test")

-        self.creator_tok = self.login("creator", "test")

-

-        self.second_user_id = self.register_user("second", "test")

-        self.second_tok = self.login("second", "test")

-

-        self.public_room_id = self.helper.create_room_as(

-            self.creator, tok=self.creator_tok, is_public=True

-        )

-        self.url = "/_synapse/admin/v1/join/{}".format(self.public_room_id)

-

-    def test_requester_is_no_admin(self):

-        """

-        If the user is not a server admin, an error 403 is returned.

-        """

-        body = json.dumps({"user_id": self.second_user_id})

-

-        request, channel = self.make_request(

-            "POST",

-            self.url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.second_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])

-

-    def test_invalid_parameter(self):

-        """

-        If a parameter is missing, return an error

-        """

-        body = json.dumps({"unknown_parameter": "@unknown:test"})

-

-        request, channel = self.make_request(

-            "POST",

-            self.url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])

-

-    def test_local_user_does_not_exist(self):

-        """

-        Tests that a lookup for a user that does not exist returns a 404

-        """

-        body = json.dumps({"user_id": "@unknown:test"})

-

-        request, channel = self.make_request(

-            "POST",

-            self.url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])

-

-    def test_remote_user(self):

-        """

-        Check that only local user can join rooms.

-        """

-        body = json.dumps({"user_id": "@not:exist.bla"})

-

-        request, channel = self.make_request(

-            "POST",

-            self.url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(

-            "This endpoint can only be used with local users",

-            channel.json_body["error"],

-        )

-

-    def test_room_does_not_exist(self):

-        """

-        Check that unknown rooms/server return error 404.

-        """

-        body = json.dumps({"user_id": self.second_user_id})

-        url = "/_synapse/admin/v1/join/!unknown:test"

-

-        request, channel = self.make_request(

-            "POST",

-            url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual("No known servers", channel.json_body["error"])

-

-    def test_room_is_not_valid(self):

-        """

-        Check that invalid room names, return an error 400.

-        """

-        body = json.dumps({"user_id": self.second_user_id})

-        url = "/_synapse/admin/v1/join/invalidroom"

-

-        request, channel = self.make_request(

-            "POST",

-            url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(

-            "invalidroom was not legal room ID or room alias",

-            channel.json_body["error"],

-        )

-

-    def test_join_public_room(self):

-        """

-        Test joining a local user to a public room with "JoinRules.PUBLIC"

-        """

-        body = json.dumps({"user_id": self.second_user_id})

-

-        request, channel = self.make_request(

-            "POST",

-            self.url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(self.public_room_id, channel.json_body["room_id"])

-

-        # Validate if user is a member of the room

-

-        request, channel = self.make_request(

-            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok,

-        )

-        self.render(request)

-        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(self.public_room_id, channel.json_body["joined_rooms"][0])

-

-    def test_join_private_room_if_not_member(self):

-        """

-        Test joining a local user to a private room with "JoinRules.INVITE"

-        when server admin is not member of this room.

-        """

-        private_room_id = self.helper.create_room_as(

-            self.creator, tok=self.creator_tok, is_public=False

-        )

-        url = "/_synapse/admin/v1/join/{}".format(private_room_id)

-        body = json.dumps({"user_id": self.second_user_id})

-

-        request, channel = self.make_request(

-            "POST",

-            url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])

-

-    def test_join_private_room_if_member(self):

-        """

-        Test joining a local user to a private room with "JoinRules.INVITE",

-        when server admin is member of this room.

-        """

-        private_room_id = self.helper.create_room_as(

-            self.creator, tok=self.creator_tok, is_public=False

-        )

-        self.helper.invite(

-            room=private_room_id,

-            src=self.creator,

-            targ=self.admin_user,

-            tok=self.creator_tok,

-        )

-        self.helper.join(

-            room=private_room_id, user=self.admin_user, tok=self.admin_user_tok

-        )

-

-        # Validate if server admin is a member of the room

-

-        request, channel = self.make_request(

-            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.admin_user_tok,

-        )

-        self.render(request)

-        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])

-

-        # Join user to room.

-

-        url = "/_synapse/admin/v1/join/{}".format(private_room_id)

-        body = json.dumps({"user_id": self.second_user_id})

-

-        request, channel = self.make_request(

-            "POST",

-            url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(private_room_id, channel.json_body["room_id"])

-

-        # Validate if user is a member of the room

-

-        request, channel = self.make_request(

-            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok,

-        )

-        self.render(request)

-        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])

-

-    def test_join_private_room_if_owner(self):

-        """

-        Test joining a local user to a private room with "JoinRules.INVITE",

-        when server admin is owner of this room.

-        """

-        private_room_id = self.helper.create_room_as(

-            self.admin_user, tok=self.admin_user_tok, is_public=False

-        )

-        url = "/_synapse/admin/v1/join/{}".format(private_room_id)

-        body = json.dumps({"user_id": self.second_user_id})

-

-        request, channel = self.make_request(

-            "POST",

-            url,

-            content=body.encode(encoding="utf_8"),

-            access_token=self.admin_user_tok,

-        )

-        self.render(request)

-

-        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(private_room_id, channel.json_body["room_id"])

-

-        # Validate if user is a member of the room

-

-        request, channel = self.make_request(

-            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok,

-        )

-        self.render(request)

-        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])

-        self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])

+# -*- coding: utf-8 -*-
+# Copyright 2020 Dirk Klimpel
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import urllib.parse
+from typing import List, Optional
+
+from mock import Mock
+
+import synapse.rest.admin
+from synapse.api.errors import Codes
+from synapse.rest.client.v1 import directory, events, login, room
+
+from tests import unittest
+
+"""Tests admin REST events for /rooms paths."""
+
+
+class ShutdownRoomTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        synapse.rest.admin.register_servlets_for_client_rest_resource,
+        login.register_servlets,
+        events.register_servlets,
+        room.register_servlets,
+        room.register_deprecated_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.event_creation_handler = hs.get_event_creation_handler()
+        hs.config.user_consent_version = "1"
+
+        consent_uri_builder = Mock()
+        consent_uri_builder.build_user_consent_uri.return_value = "http://example.com"
+        self.event_creation_handler._consent_uri_builder = consent_uri_builder
+
+        self.store = hs.get_datastore()
+
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+        self.other_user = self.register_user("user", "pass")
+        self.other_user_token = self.login("user", "pass")
+
+        # Mark the admin user as having consented
+        self.get_success(self.store.user_set_consent_version(self.admin_user, "1"))
+
+    def test_shutdown_room_consent(self):
+        """Test that we can shutdown rooms with local users who have not
+        yet accepted the privacy policy. This used to fail when we tried to
+        force part the user from the old room.
+        """
+        self.event_creation_handler._block_events_without_consent_error = None
+
+        room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_token)
+
+        # Assert one user in room
+        users_in_room = self.get_success(self.store.get_users_in_room(room_id))
+        self.assertEqual([self.other_user], users_in_room)
+
+        # Enable require consent to send events
+        self.event_creation_handler._block_events_without_consent_error = "Error"
+
+        # Assert that the user is getting consent error
+        self.helper.send(
+            room_id, body="foo", tok=self.other_user_token, expect_code=403
+        )
+
+        # Test that the admin can still send shutdown
+        url = "admin/shutdown_room/" + room_id
+        request, channel = self.make_request(
+            "POST",
+            url.encode("ascii"),
+            json.dumps({"new_room_user_id": self.admin_user}),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Assert there is now no longer anyone in the room
+        users_in_room = self.get_success(self.store.get_users_in_room(room_id))
+        self.assertEqual([], users_in_room)
+
+    def test_shutdown_room_block_peek(self):
+        """Test that a world_readable room can no longer be peeked into after
+        it has been shut down.
+        """
+
+        self.event_creation_handler._block_events_without_consent_error = None
+
+        room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_token)
+
+        # Enable world readable
+        url = "rooms/%s/state/m.room.history_visibility" % (room_id,)
+        request, channel = self.make_request(
+            "PUT",
+            url.encode("ascii"),
+            json.dumps({"history_visibility": "world_readable"}),
+            access_token=self.other_user_token,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Test that the admin can still send shutdown
+        url = "admin/shutdown_room/" + room_id
+        request, channel = self.make_request(
+            "POST",
+            url.encode("ascii"),
+            json.dumps({"new_room_user_id": self.admin_user}),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Assert we can no longer peek into the room
+        self._assert_peek(room_id, expect_code=403)
+
+    def _assert_peek(self, room_id, expect_code):
+        """Assert that the admin user can (or cannot) peek into the room.
+        """
+
+        url = "rooms/%s/initialSync" % (room_id,)
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok
+        )
+        self.render(request)
+        self.assertEqual(
+            expect_code, int(channel.result["code"]), msg=channel.result["body"]
+        )
+
+        url = "events?timeout=0&room_id=" + room_id
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok
+        )
+        self.render(request)
+        self.assertEqual(
+            expect_code, int(channel.result["code"]), msg=channel.result["body"]
+        )
+
+
+class DeleteRoomTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        events.register_servlets,
+        room.register_servlets,
+        room.register_deprecated_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.event_creation_handler = hs.get_event_creation_handler()
+        hs.config.user_consent_version = "1"
+
+        consent_uri_builder = Mock()
+        consent_uri_builder.build_user_consent_uri.return_value = "http://example.com"
+        self.event_creation_handler._consent_uri_builder = consent_uri_builder
+
+        self.store = hs.get_datastore()
+
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+        self.other_user = self.register_user("user", "pass")
+        self.other_user_tok = self.login("user", "pass")
+
+        # Mark the admin user as having consented
+        self.get_success(self.store.user_set_consent_version(self.admin_user, "1"))
+
+        self.room_id = self.helper.create_room_as(
+            self.other_user, tok=self.other_user_tok
+        )
+        self.url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id
+
+    def test_requester_is_no_admin(self):
+        """
+        If the user is not a server admin, an error 403 is returned.
+        """
+
+        request, channel = self.make_request(
+            "POST", self.url, json.dumps({}), access_token=self.other_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+    def test_room_does_not_exist(self):
+        """
+        Check that unknown rooms/server return error 404.
+        """
+        url = "/_synapse/admin/v1/rooms/!unknown:test/delete"
+
+        request, channel = self.make_request(
+            "POST", url, json.dumps({}), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
+
+    def test_room_is_not_valid(self):
+        """
+        Check that invalid room names, return an error 400.
+        """
+        url = "/_synapse/admin/v1/rooms/invalidroom/delete"
+
+        request, channel = self.make_request(
+            "POST", url, json.dumps({}), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(
+            "invalidroom is not a legal room ID", channel.json_body["error"],
+        )
+
+    def test_new_room_user_does_not_exist(self):
+        """
+        Tests that the user ID must be from local server but it does not have to exist.
+        """
+        body = json.dumps({"new_room_user_id": "@unknown:test"})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertIn("new_room_id", channel.json_body)
+        self.assertIn("kicked_users", channel.json_body)
+        self.assertIn("failed_to_kick_users", channel.json_body)
+        self.assertIn("local_aliases", channel.json_body)
+
+    def test_new_room_user_is_not_local(self):
+        """
+        Check that only local users can create new room to move members.
+        """
+        body = json.dumps({"new_room_user_id": "@not:exist.bla"})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(
+            "User must be our own: @not:exist.bla", channel.json_body["error"],
+        )
+
+    def test_block_is_not_bool(self):
+        """
+        If parameter `block` is not boolean, return an error
+        """
+        body = json.dumps({"block": "NotBool"})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
+
+    def test_purge_is_not_bool(self):
+        """
+        If parameter `purge` is not boolean, return an error
+        """
+        body = json.dumps({"purge": "NotBool"})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
+
+    def test_purge_room_and_block(self):
+        """Test to purge a room and block it.
+        Members will not be moved to a new room and will not receive a message.
+        """
+        # Test that room is not purged
+        with self.assertRaises(AssertionError):
+            self._is_purged(self.room_id)
+
+        # Test that room is not blocked
+        self._is_blocked(self.room_id, expect=False)
+
+        # Assert one user in room
+        self._is_member(room_id=self.room_id, user_id=self.other_user)
+
+        body = json.dumps({"block": True, "purge": True})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url.encode("ascii"),
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(None, channel.json_body["new_room_id"])
+        self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])
+        self.assertIn("failed_to_kick_users", channel.json_body)
+        self.assertIn("local_aliases", channel.json_body)
+
+        self._is_purged(self.room_id)
+        self._is_blocked(self.room_id, expect=True)
+        self._has_no_members(self.room_id)
+
+    def test_purge_room_and_not_block(self):
+        """Test to purge a room and do not block it.
+        Members will not be moved to a new room and will not receive a message.
+        """
+        # Test that room is not purged
+        with self.assertRaises(AssertionError):
+            self._is_purged(self.room_id)
+
+        # Test that room is not blocked
+        self._is_blocked(self.room_id, expect=False)
+
+        # Assert one user in room
+        self._is_member(room_id=self.room_id, user_id=self.other_user)
+
+        body = json.dumps({"block": False, "purge": True})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url.encode("ascii"),
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(None, channel.json_body["new_room_id"])
+        self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])
+        self.assertIn("failed_to_kick_users", channel.json_body)
+        self.assertIn("local_aliases", channel.json_body)
+
+        self._is_purged(self.room_id)
+        self._is_blocked(self.room_id, expect=False)
+        self._has_no_members(self.room_id)
+
+    def test_block_room_and_not_purge(self):
+        """Test to block a room without purging it.
+        Members will not be moved to a new room and will not receive a message.
+        The room will not be purged.
+        """
+        # Test that room is not purged
+        with self.assertRaises(AssertionError):
+            self._is_purged(self.room_id)
+
+        # Test that room is not blocked
+        self._is_blocked(self.room_id, expect=False)
+
+        # Assert one user in room
+        self._is_member(room_id=self.room_id, user_id=self.other_user)
+
+        body = json.dumps({"block": False, "purge": False})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url.encode("ascii"),
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(None, channel.json_body["new_room_id"])
+        self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])
+        self.assertIn("failed_to_kick_users", channel.json_body)
+        self.assertIn("local_aliases", channel.json_body)
+
+        with self.assertRaises(AssertionError):
+            self._is_purged(self.room_id)
+        self._is_blocked(self.room_id, expect=False)
+        self._has_no_members(self.room_id)
+
+    def test_shutdown_room_consent(self):
+        """Test that we can shutdown rooms with local users who have not
+        yet accepted the privacy policy. This used to fail when we tried to
+        force part the user from the old room.
+        Members will be moved to a new room and will receive a message.
+        """
+        self.event_creation_handler._block_events_without_consent_error = None
+
+        # Assert one user in room
+        users_in_room = self.get_success(self.store.get_users_in_room(self.room_id))
+        self.assertEqual([self.other_user], users_in_room)
+
+        # Enable require consent to send events
+        self.event_creation_handler._block_events_without_consent_error = "Error"
+
+        # Assert that the user is getting consent error
+        self.helper.send(
+            self.room_id, body="foo", tok=self.other_user_tok, expect_code=403
+        )
+
+        # Test that room is not purged
+        with self.assertRaises(AssertionError):
+            self._is_purged(self.room_id)
+
+        # Assert one user in room
+        self._is_member(room_id=self.room_id, user_id=self.other_user)
+
+        # Test that the admin can still send shutdown
+        url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id
+        request, channel = self.make_request(
+            "POST",
+            url.encode("ascii"),
+            json.dumps({"new_room_user_id": self.admin_user}),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])
+        self.assertIn("new_room_id", channel.json_body)
+        self.assertIn("failed_to_kick_users", channel.json_body)
+        self.assertIn("local_aliases", channel.json_body)
+
+        # Test that member has moved to new room
+        self._is_member(
+            room_id=channel.json_body["new_room_id"], user_id=self.other_user
+        )
+
+        self._is_purged(self.room_id)
+        self._has_no_members(self.room_id)
+
+    def test_shutdown_room_block_peek(self):
+        """Test that a world_readable room can no longer be peeked into after
+        it has been shut down.
+        Members will be moved to a new room and will receive a message.
+        """
+        self.event_creation_handler._block_events_without_consent_error = None
+
+        # Enable world readable
+        url = "rooms/%s/state/m.room.history_visibility" % (self.room_id,)
+        request, channel = self.make_request(
+            "PUT",
+            url.encode("ascii"),
+            json.dumps({"history_visibility": "world_readable"}),
+            access_token=self.other_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Test that room is not purged
+        with self.assertRaises(AssertionError):
+            self._is_purged(self.room_id)
+
+        # Assert one user in room
+        self._is_member(room_id=self.room_id, user_id=self.other_user)
+
+        # Test that the admin can still send shutdown
+        url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id
+        request, channel = self.make_request(
+            "POST",
+            url.encode("ascii"),
+            json.dumps({"new_room_user_id": self.admin_user}),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])
+        self.assertIn("new_room_id", channel.json_body)
+        self.assertIn("failed_to_kick_users", channel.json_body)
+        self.assertIn("local_aliases", channel.json_body)
+
+        # Test that member has moved to new room
+        self._is_member(
+            room_id=channel.json_body["new_room_id"], user_id=self.other_user
+        )
+
+        self._is_purged(self.room_id)
+        self._has_no_members(self.room_id)
+
+        # Assert we can no longer peek into the room
+        self._assert_peek(self.room_id, expect_code=403)
+
+    def _is_blocked(self, room_id, expect=True):
+        """Assert that the room is blocked or not
+        """
+        d = self.store.is_room_blocked(room_id)
+        if expect:
+            self.assertTrue(self.get_success(d))
+        else:
+            self.assertIsNone(self.get_success(d))
+
+    def _has_no_members(self, room_id):
+        """Assert there is now no longer anyone in the room
+        """
+        users_in_room = self.get_success(self.store.get_users_in_room(room_id))
+        self.assertEqual([], users_in_room)
+
+    def _is_member(self, room_id, user_id):
+        """Test that user is member of the room
+        """
+        users_in_room = self.get_success(self.store.get_users_in_room(room_id))
+        self.assertIn(user_id, users_in_room)
+
+    def _is_purged(self, room_id):
+        """Test that the following tables have been purged of all rows related to the room.
+        """
+        for table in (
+            "current_state_events",
+            "event_backward_extremities",
+            "event_forward_extremities",
+            "event_json",
+            "event_push_actions",
+            "event_search",
+            "events",
+            "group_rooms",
+            "public_room_list_stream",
+            "receipts_graph",
+            "receipts_linearized",
+            "room_aliases",
+            "room_depth",
+            "room_memberships",
+            "room_stats_state",
+            "room_stats_current",
+            "room_stats_historical",
+            "room_stats_earliest_token",
+            "rooms",
+            "stream_ordering_to_exterm",
+            "users_in_public_rooms",
+            "users_who_share_private_rooms",
+            "appservice_room_list",
+            "e2e_room_keys",
+            "event_push_summary",
+            "pusher_throttle",
+            "group_summary_rooms",
+            "local_invites",
+            "room_account_data",
+            "room_tags",
+            # "state_groups",  # Current impl leaves orphaned state groups around.
+            "state_groups_state",
+        ):
+            count = self.get_success(
+                self.store.db.simple_select_one_onecol(
+                    table=table,
+                    keyvalues={"room_id": room_id},
+                    retcol="COUNT(*)",
+                    desc="test_purge_room",
+                )
+            )
+
+            self.assertEqual(count, 0, msg="Rows not purged in {}".format(table))
+
+    def _assert_peek(self, room_id, expect_code):
+        """Assert that the admin user can (or cannot) peek into the room.
+        """
+
+        url = "rooms/%s/initialSync" % (room_id,)
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok
+        )
+        self.render(request)
+        self.assertEqual(
+            expect_code, int(channel.result["code"]), msg=channel.result["body"]
+        )
+
+        url = "events?timeout=0&room_id=" + room_id
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok
+        )
+        self.render(request)
+        self.assertEqual(
+            expect_code, int(channel.result["code"]), msg=channel.result["body"]
+        )
+
+
+class PurgeRoomTestCase(unittest.HomeserverTestCase):
+    """Test /purge_room admin API.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.store = hs.get_datastore()
+
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+    def test_purge_room(self):
+        room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        # All users have to have left the room.
+        self.helper.leave(room_id, user=self.admin_user, tok=self.admin_user_tok)
+
+        url = "/_synapse/admin/v1/purge_room"
+        request, channel = self.make_request(
+            "POST",
+            url.encode("ascii"),
+            {"room_id": room_id},
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Test that the following tables have been purged of all rows related to the room.
+        for table in (
+            "current_state_events",
+            "event_backward_extremities",
+            "event_forward_extremities",
+            "event_json",
+            "event_push_actions",
+            "event_search",
+            "events",
+            "group_rooms",
+            "public_room_list_stream",
+            "receipts_graph",
+            "receipts_linearized",
+            "room_aliases",
+            "room_depth",
+            "room_memberships",
+            "room_stats_state",
+            "room_stats_current",
+            "room_stats_historical",
+            "room_stats_earliest_token",
+            "rooms",
+            "stream_ordering_to_exterm",
+            "users_in_public_rooms",
+            "users_who_share_private_rooms",
+            "appservice_room_list",
+            "e2e_room_keys",
+            "event_push_summary",
+            "pusher_throttle",
+            "group_summary_rooms",
+            "room_account_data",
+            "room_tags",
+            # "state_groups",  # Current impl leaves orphaned state groups around.
+            "state_groups_state",
+        ):
+            count = self.get_success(
+                self.store.db.simple_select_one_onecol(
+                    table=table,
+                    keyvalues={"room_id": room_id},
+                    retcol="COUNT(*)",
+                    desc="test_purge_room",
+                )
+            )
+
+            self.assertEqual(count, 0, msg="Rows not purged in {}".format(table))
+
+
+class RoomTestCase(unittest.HomeserverTestCase):
+    """Test /room admin API.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        directory.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.store = hs.get_datastore()
+
+        # Create user
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+    def test_list_rooms(self):
+        """Test that we can list rooms"""
+        # Create 3 test rooms
+        total_rooms = 3
+        room_ids = []
+        for x in range(total_rooms):
+            room_id = self.helper.create_room_as(
+                self.admin_user, tok=self.admin_user_tok
+            )
+            room_ids.append(room_id)
+
+        # Request the list of rooms
+        url = "/_synapse/admin/v1/rooms"
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        # Check request completed successfully
+        self.assertEqual(200, int(channel.code), msg=channel.json_body)
+
+        # Check that response json body contains a "rooms" key
+        self.assertTrue(
+            "rooms" in channel.json_body,
+            msg="Response body does not " "contain a 'rooms' key",
+        )
+
+        # Check that 3 rooms were returned
+        self.assertEqual(3, len(channel.json_body["rooms"]), msg=channel.json_body)
+
+        # Check their room_ids match
+        returned_room_ids = [room["room_id"] for room in channel.json_body["rooms"]]
+        self.assertEqual(room_ids, returned_room_ids)
+
+        # Check that all fields are available
+        for r in channel.json_body["rooms"]:
+            self.assertIn("name", r)
+            self.assertIn("canonical_alias", r)
+            self.assertIn("joined_members", r)
+            self.assertIn("joined_local_members", r)
+            self.assertIn("version", r)
+            self.assertIn("creator", r)
+            self.assertIn("encryption", r)
+            self.assertIn("federatable", r)
+            self.assertIn("public", r)
+            self.assertIn("join_rules", r)
+            self.assertIn("guest_access", r)
+            self.assertIn("history_visibility", r)
+            self.assertIn("state_events", r)
+
+        # Check that the correct number of total rooms was returned
+        self.assertEqual(channel.json_body["total_rooms"], total_rooms)
+
+        # Check that the offset is correct
+        # Should be 0 as we aren't paginating
+        self.assertEqual(channel.json_body["offset"], 0)
+
+        # Check that the prev_batch parameter is not present
+        self.assertNotIn("prev_batch", channel.json_body)
+
+        # We shouldn't receive a next token here as there's no further rooms to show
+        self.assertNotIn("next_batch", channel.json_body)
+
+    def test_list_rooms_pagination(self):
+        """Test that we can get a full list of rooms through pagination"""
+        # Create 5 test rooms
+        total_rooms = 5
+        room_ids = []
+        for x in range(total_rooms):
+            room_id = self.helper.create_room_as(
+                self.admin_user, tok=self.admin_user_tok
+            )
+            room_ids.append(room_id)
+
+        # Set the name of the rooms so we get a consistent returned ordering
+        for idx, room_id in enumerate(room_ids):
+            self.helper.send_state(
+                room_id, "m.room.name", {"name": str(idx)}, tok=self.admin_user_tok,
+            )
+
+        # Request the list of rooms
+        returned_room_ids = []
+        start = 0
+        limit = 2
+
+        run_count = 0
+        should_repeat = True
+        while should_repeat:
+            run_count += 1
+
+            url = "/_synapse/admin/v1/rooms?from=%d&limit=%d&order_by=%s" % (
+                start,
+                limit,
+                "name",
+            )
+            request, channel = self.make_request(
+                "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+            )
+            self.render(request)
+            self.assertEqual(
+                200, int(channel.result["code"]), msg=channel.result["body"]
+            )
+
+            self.assertTrue("rooms" in channel.json_body)
+            for r in channel.json_body["rooms"]:
+                returned_room_ids.append(r["room_id"])
+
+            # Check that the correct number of total rooms was returned
+            self.assertEqual(channel.json_body["total_rooms"], total_rooms)
+
+            # Check that the offset is correct
+            # We're only getting 2 rooms each page, so should be 2 * last run_count
+            self.assertEqual(channel.json_body["offset"], 2 * (run_count - 1))
+
+            if run_count > 1:
+                # Check the value of prev_batch is correct
+                self.assertEqual(channel.json_body["prev_batch"], 2 * (run_count - 2))
+
+            if "next_batch" not in channel.json_body:
+                # We have reached the end of the list
+                should_repeat = False
+            else:
+                # Make another query with an updated start value
+                start = channel.json_body["next_batch"]
+
+        # We should've queried the endpoint 3 times
+        self.assertEqual(
+            run_count,
+            3,
+            msg="Should've queried 3 times for 5 rooms with limit 2 per query",
+        )
+
+        # Check that we received all of the room ids
+        self.assertEqual(room_ids, returned_room_ids)
+
+        url = "/_synapse/admin/v1/rooms?from=%d&limit=%d" % (start, limit)
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+    def test_correct_room_attributes(self):
+        """Test the correct attributes for a room are returned"""
+        # Create a test room
+        room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        test_alias = "#test:test"
+        test_room_name = "something"
+
+        # Have another user join the room
+        user_2 = self.register_user("user4", "pass")
+        user_tok_2 = self.login("user4", "pass")
+        self.helper.join(room_id, user_2, tok=user_tok_2)
+
+        # Create a new alias to this room
+        url = "/_matrix/client/r0/directory/room/%s" % (urllib.parse.quote(test_alias),)
+        request, channel = self.make_request(
+            "PUT",
+            url.encode("ascii"),
+            {"room_id": room_id},
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Set this new alias as the canonical alias for this room
+        self.helper.send_state(
+            room_id,
+            "m.room.aliases",
+            {"aliases": [test_alias]},
+            tok=self.admin_user_tok,
+            state_key="test",
+        )
+        self.helper.send_state(
+            room_id,
+            "m.room.canonical_alias",
+            {"alias": test_alias},
+            tok=self.admin_user_tok,
+        )
+
+        # Set a name for the room
+        self.helper.send_state(
+            room_id, "m.room.name", {"name": test_room_name}, tok=self.admin_user_tok,
+        )
+
+        # Request the list of rooms
+        url = "/_synapse/admin/v1/rooms"
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Check that rooms were returned
+        self.assertTrue("rooms" in channel.json_body)
+        rooms = channel.json_body["rooms"]
+
+        # Check that only one room was returned
+        self.assertEqual(len(rooms), 1)
+
+        # And that the value of the total_rooms key was correct
+        self.assertEqual(channel.json_body["total_rooms"], 1)
+
+        # Check that the offset is correct
+        # We're not paginating, so should be 0
+        self.assertEqual(channel.json_body["offset"], 0)
+
+        # Check that there is no `prev_batch`
+        self.assertNotIn("prev_batch", channel.json_body)
+
+        # Check that there is no `next_batch`
+        self.assertNotIn("next_batch", channel.json_body)
+
+        # Check that all provided attributes are set
+        r = rooms[0]
+        self.assertEqual(room_id, r["room_id"])
+        self.assertEqual(test_room_name, r["name"])
+        self.assertEqual(test_alias, r["canonical_alias"])
+
+    def test_room_list_sort_order(self):
+        """Test room list sort ordering. alphabetical name versus number of members,
+        reversing the order, etc.
+        """
+
+        def _set_canonical_alias(room_id: str, test_alias: str, admin_user_tok: str):
+            # Create a new alias to this room
+            url = "/_matrix/client/r0/directory/room/%s" % (
+                urllib.parse.quote(test_alias),
+            )
+            request, channel = self.make_request(
+                "PUT",
+                url.encode("ascii"),
+                {"room_id": room_id},
+                access_token=admin_user_tok,
+            )
+            self.render(request)
+            self.assertEqual(
+                200, int(channel.result["code"]), msg=channel.result["body"]
+            )
+
+            # Set this new alias as the canonical alias for this room
+            self.helper.send_state(
+                room_id,
+                "m.room.aliases",
+                {"aliases": [test_alias]},
+                tok=admin_user_tok,
+                state_key="test",
+            )
+            self.helper.send_state(
+                room_id,
+                "m.room.canonical_alias",
+                {"alias": test_alias},
+                tok=admin_user_tok,
+            )
+
+        def _order_test(
+            order_type: str, expected_room_list: List[str], reverse: bool = False,
+        ):
+            """Request the list of rooms in a certain order. Assert that order is what
+            we expect
+
+            Args:
+                order_type: The type of ordering to give the server
+                expected_room_list: The list of room_ids in the order we expect to get
+                    back from the server
+            """
+            # Request the list of rooms in the given order
+            url = "/_synapse/admin/v1/rooms?order_by=%s" % (order_type,)
+            if reverse:
+                url += "&dir=b"
+            request, channel = self.make_request(
+                "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+            )
+            self.render(request)
+            self.assertEqual(200, channel.code, msg=channel.json_body)
+
+            # Check that rooms were returned
+            self.assertTrue("rooms" in channel.json_body)
+            rooms = channel.json_body["rooms"]
+
+            # Check for the correct total_rooms value
+            self.assertEqual(channel.json_body["total_rooms"], 3)
+
+            # Check that the offset is correct
+            # We're not paginating, so should be 0
+            self.assertEqual(channel.json_body["offset"], 0)
+
+            # Check that there is no `prev_batch`
+            self.assertNotIn("prev_batch", channel.json_body)
+
+            # Check that there is no `next_batch`
+            self.assertNotIn("next_batch", channel.json_body)
+
+            # Check that rooms were returned in alphabetical order
+            returned_order = [r["room_id"] for r in rooms]
+            self.assertListEqual(expected_room_list, returned_order)  # order is checked
+
+        # Create 3 test rooms
+        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+        room_id_3 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        # Set room names in alphabetical order. room 1 -> A, 2 -> B, 3 -> C
+        self.helper.send_state(
+            room_id_1, "m.room.name", {"name": "A"}, tok=self.admin_user_tok,
+        )
+        self.helper.send_state(
+            room_id_2, "m.room.name", {"name": "B"}, tok=self.admin_user_tok,
+        )
+        self.helper.send_state(
+            room_id_3, "m.room.name", {"name": "C"}, tok=self.admin_user_tok,
+        )
+
+        # Set room canonical room aliases
+        _set_canonical_alias(room_id_1, "#A_alias:test", self.admin_user_tok)
+        _set_canonical_alias(room_id_2, "#B_alias:test", self.admin_user_tok)
+        _set_canonical_alias(room_id_3, "#C_alias:test", self.admin_user_tok)
+
+        # Set room member size in the reverse order. room 1 -> 1 member, 2 -> 2, 3 -> 3
+        user_1 = self.register_user("bob1", "pass")
+        user_1_tok = self.login("bob1", "pass")
+        self.helper.join(room_id_2, user_1, tok=user_1_tok)
+
+        user_2 = self.register_user("bob2", "pass")
+        user_2_tok = self.login("bob2", "pass")
+        self.helper.join(room_id_3, user_2, tok=user_2_tok)
+
+        user_3 = self.register_user("bob3", "pass")
+        user_3_tok = self.login("bob3", "pass")
+        self.helper.join(room_id_3, user_3, tok=user_3_tok)
+
+        # Test different sort orders, with forward and reverse directions
+        _order_test("name", [room_id_1, room_id_2, room_id_3])
+        _order_test("name", [room_id_3, room_id_2, room_id_1], reverse=True)
+
+        _order_test("canonical_alias", [room_id_1, room_id_2, room_id_3])
+        _order_test("canonical_alias", [room_id_3, room_id_2, room_id_1], reverse=True)
+
+        _order_test("joined_members", [room_id_3, room_id_2, room_id_1])
+        _order_test("joined_members", [room_id_1, room_id_2, room_id_3], reverse=True)
+
+        _order_test("joined_local_members", [room_id_3, room_id_2, room_id_1])
+        _order_test(
+            "joined_local_members", [room_id_1, room_id_2, room_id_3], reverse=True
+        )
+
+        _order_test("version", [room_id_1, room_id_2, room_id_3])
+        _order_test("version", [room_id_1, room_id_2, room_id_3], reverse=True)
+
+        _order_test("creator", [room_id_1, room_id_2, room_id_3])
+        _order_test("creator", [room_id_1, room_id_2, room_id_3], reverse=True)
+
+        _order_test("encryption", [room_id_1, room_id_2, room_id_3])
+        _order_test("encryption", [room_id_1, room_id_2, room_id_3], reverse=True)
+
+        _order_test("federatable", [room_id_1, room_id_2, room_id_3])
+        _order_test("federatable", [room_id_1, room_id_2, room_id_3], reverse=True)
+
+        _order_test("public", [room_id_1, room_id_2, room_id_3])
+        # Different sort order of SQlite and PostreSQL
+        # _order_test("public", [room_id_3, room_id_2, room_id_1], reverse=True)
+
+        _order_test("join_rules", [room_id_1, room_id_2, room_id_3])
+        _order_test("join_rules", [room_id_1, room_id_2, room_id_3], reverse=True)
+
+        _order_test("guest_access", [room_id_1, room_id_2, room_id_3])
+        _order_test("guest_access", [room_id_1, room_id_2, room_id_3], reverse=True)
+
+        _order_test("history_visibility", [room_id_1, room_id_2, room_id_3])
+        _order_test(
+            "history_visibility", [room_id_1, room_id_2, room_id_3], reverse=True
+        )
+
+        _order_test("state_events", [room_id_3, room_id_2, room_id_1])
+        _order_test("state_events", [room_id_1, room_id_2, room_id_3], reverse=True)
+
+    def test_search_term(self):
+        """Test that searching for a room works correctly"""
+        # Create two test rooms
+        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        room_name_1 = "something"
+        room_name_2 = "else"
+
+        # Set the name for each room
+        self.helper.send_state(
+            room_id_1, "m.room.name", {"name": room_name_1}, tok=self.admin_user_tok,
+        )
+        self.helper.send_state(
+            room_id_2, "m.room.name", {"name": room_name_2}, tok=self.admin_user_tok,
+        )
+
+        def _search_test(
+            expected_room_id: Optional[str],
+            search_term: str,
+            expected_http_code: int = 200,
+        ):
+            """Search for a room and check that the returned room's id is a match
+
+            Args:
+                expected_room_id: The room_id expected to be returned by the API. Set
+                    to None to expect zero results for the search
+                search_term: The term to search for room names with
+                expected_http_code: The expected http code for the request
+            """
+            url = "/_synapse/admin/v1/rooms?search_term=%s" % (search_term,)
+            request, channel = self.make_request(
+                "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+            )
+            self.render(request)
+            self.assertEqual(expected_http_code, channel.code, msg=channel.json_body)
+
+            if expected_http_code != 200:
+                return
+
+            # Check that rooms were returned
+            self.assertTrue("rooms" in channel.json_body)
+            rooms = channel.json_body["rooms"]
+
+            # Check that the expected number of rooms were returned
+            expected_room_count = 1 if expected_room_id else 0
+            self.assertEqual(len(rooms), expected_room_count)
+            self.assertEqual(channel.json_body["total_rooms"], expected_room_count)
+
+            # Check that the offset is correct
+            # We're not paginating, so should be 0
+            self.assertEqual(channel.json_body["offset"], 0)
+
+            # Check that there is no `prev_batch`
+            self.assertNotIn("prev_batch", channel.json_body)
+
+            # Check that there is no `next_batch`
+            self.assertNotIn("next_batch", channel.json_body)
+
+            if expected_room_id:
+                # Check that the first returned room id is correct
+                r = rooms[0]
+                self.assertEqual(expected_room_id, r["room_id"])
+
+        # Perform search tests
+        _search_test(room_id_1, "something")
+        _search_test(room_id_1, "thing")
+
+        _search_test(room_id_2, "else")
+        _search_test(room_id_2, "se")
+
+        _search_test(None, "foo")
+        _search_test(None, "bar")
+        _search_test(None, "", expected_http_code=400)
+
+    def test_single_room(self):
+        """Test that a single room can be requested correctly"""
+        # Create two test rooms
+        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        room_name_1 = "something"
+        room_name_2 = "else"
+
+        # Set the name for each room
+        self.helper.send_state(
+            room_id_1, "m.room.name", {"name": room_name_1}, tok=self.admin_user_tok,
+        )
+        self.helper.send_state(
+            room_id_2, "m.room.name", {"name": room_name_2}, tok=self.admin_user_tok,
+        )
+
+        url = "/_synapse/admin/v1/rooms/%s" % (room_id_1,)
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, channel.code, msg=channel.json_body)
+
+        self.assertIn("room_id", channel.json_body)
+        self.assertIn("name", channel.json_body)
+        self.assertIn("canonical_alias", channel.json_body)
+        self.assertIn("joined_members", channel.json_body)
+        self.assertIn("joined_local_members", channel.json_body)
+        self.assertIn("version", channel.json_body)
+        self.assertIn("creator", channel.json_body)
+        self.assertIn("encryption", channel.json_body)
+        self.assertIn("federatable", channel.json_body)
+        self.assertIn("public", channel.json_body)
+        self.assertIn("join_rules", channel.json_body)
+        self.assertIn("guest_access", channel.json_body)
+        self.assertIn("history_visibility", channel.json_body)
+        self.assertIn("state_events", channel.json_body)
+
+        self.assertEqual(room_id_1, channel.json_body["room_id"])
+
+    def test_room_members(self):
+        """Test that room members can be requested correctly"""
+        # Create two test rooms
+        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        # Have another user join the room
+        user_1 = self.register_user("foo", "pass")
+        user_tok_1 = self.login("foo", "pass")
+        self.helper.join(room_id_1, user_1, tok=user_tok_1)
+
+        # Have another user join the room
+        user_2 = self.register_user("bar", "pass")
+        user_tok_2 = self.login("bar", "pass")
+        self.helper.join(room_id_1, user_2, tok=user_tok_2)
+        self.helper.join(room_id_2, user_2, tok=user_tok_2)
+
+        # Have another user join the room
+        user_3 = self.register_user("foobar", "pass")
+        user_tok_3 = self.login("foobar", "pass")
+        self.helper.join(room_id_2, user_3, tok=user_tok_3)
+
+        url = "/_synapse/admin/v1/rooms/%s/members" % (room_id_1,)
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, channel.code, msg=channel.json_body)
+
+        self.assertCountEqual(
+            ["@admin:test", "@foo:test", "@bar:test"], channel.json_body["members"]
+        )
+        self.assertEqual(channel.json_body["total"], 3)
+
+        url = "/_synapse/admin/v1/rooms/%s/members" % (room_id_2,)
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, channel.code, msg=channel.json_body)
+
+        self.assertCountEqual(
+            ["@admin:test", "@bar:test", "@foobar:test"], channel.json_body["members"]
+        )
+        self.assertEqual(channel.json_body["total"], 3)
+
+
+class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        room.register_servlets,
+        login.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, homeserver):
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+        self.creator = self.register_user("creator", "test")
+        self.creator_tok = self.login("creator", "test")
+
+        self.second_user_id = self.register_user("second", "test")
+        self.second_tok = self.login("second", "test")
+
+        self.public_room_id = self.helper.create_room_as(
+            self.creator, tok=self.creator_tok, is_public=True
+        )
+        self.url = "/_synapse/admin/v1/join/{}".format(self.public_room_id)
+
+    def test_requester_is_no_admin(self):
+        """
+        If the user is not a server admin, an error 403 is returned.
+        """
+        body = json.dumps({"user_id": self.second_user_id})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.second_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+    def test_invalid_parameter(self):
+        """
+        If a parameter is missing, return an error
+        """
+        body = json.dumps({"unknown_parameter": "@unknown:test"})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
+
+    def test_local_user_does_not_exist(self):
+        """
+        Tests that a lookup for a user that does not exist returns a 404
+        """
+        body = json.dumps({"user_id": "@unknown:test"})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
+
+    def test_remote_user(self):
+        """
+        Check that only local user can join rooms.
+        """
+        body = json.dumps({"user_id": "@not:exist.bla"})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(
+            "This endpoint can only be used with local users",
+            channel.json_body["error"],
+        )
+
+    def test_room_does_not_exist(self):
+        """
+        Check that unknown rooms/server return error 404.
+        """
+        body = json.dumps({"user_id": self.second_user_id})
+        url = "/_synapse/admin/v1/join/!unknown:test"
+
+        request, channel = self.make_request(
+            "POST",
+            url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual("No known servers", channel.json_body["error"])
+
+    def test_room_is_not_valid(self):
+        """
+        Check that invalid room names, return an error 400.
+        """
+        body = json.dumps({"user_id": self.second_user_id})
+        url = "/_synapse/admin/v1/join/invalidroom"
+
+        request, channel = self.make_request(
+            "POST",
+            url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(
+            "invalidroom was not legal room ID or room alias",
+            channel.json_body["error"],
+        )
+
+    def test_join_public_room(self):
+        """
+        Test joining a local user to a public room with "JoinRules.PUBLIC"
+        """
+        body = json.dumps({"user_id": self.second_user_id})
+
+        request, channel = self.make_request(
+            "POST",
+            self.url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(self.public_room_id, channel.json_body["room_id"])
+
+        # Validate if user is a member of the room
+
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok,
+        )
+        self.render(request)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(self.public_room_id, channel.json_body["joined_rooms"][0])
+
+    def test_join_private_room_if_not_member(self):
+        """
+        Test joining a local user to a private room with "JoinRules.INVITE"
+        when server admin is not member of this room.
+        """
+        private_room_id = self.helper.create_room_as(
+            self.creator, tok=self.creator_tok, is_public=False
+        )
+        url = "/_synapse/admin/v1/join/{}".format(private_room_id)
+        body = json.dumps({"user_id": self.second_user_id})
+
+        request, channel = self.make_request(
+            "POST",
+            url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+    def test_join_private_room_if_member(self):
+        """
+        Test joining a local user to a private room with "JoinRules.INVITE",
+        when server admin is member of this room.
+        """
+        private_room_id = self.helper.create_room_as(
+            self.creator, tok=self.creator_tok, is_public=False
+        )
+        self.helper.invite(
+            room=private_room_id,
+            src=self.creator,
+            targ=self.admin_user,
+            tok=self.creator_tok,
+        )
+        self.helper.join(
+            room=private_room_id, user=self.admin_user, tok=self.admin_user_tok
+        )
+
+        # Validate if server admin is a member of the room
+
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])
+
+        # Join user to room.
+
+        url = "/_synapse/admin/v1/join/{}".format(private_room_id)
+        body = json.dumps({"user_id": self.second_user_id})
+
+        request, channel = self.make_request(
+            "POST",
+            url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(private_room_id, channel.json_body["room_id"])
+
+        # Validate if user is a member of the room
+
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok,
+        )
+        self.render(request)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])
+
+    def test_join_private_room_if_owner(self):
+        """
+        Test joining a local user to a private room with "JoinRules.INVITE",
+        when server admin is owner of this room.
+        """
+        private_room_id = self.helper.create_room_as(
+            self.admin_user, tok=self.admin_user_tok, is_public=False
+        )
+        url = "/_synapse/admin/v1/join/{}".format(private_room_id)
+        body = json.dumps({"user_id": self.second_user_id})
+
+        request, channel = self.make_request(
+            "POST",
+            url,
+            content=body.encode(encoding="utf_8"),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(private_room_id, channel.json_body["room_id"])
+
+        # Validate if user is a member of the room
+
+        request, channel = self.make_request(
+            "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok,
+        )
+        self.render(request)
+        self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index 22d734e763..7f8252330a 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -143,6 +143,26 @@ class RestHelper(object):
 
         return channel.json_body
 
+    def redact(self, room_id, event_id, txn_id=None, tok=None, expect_code=200):
+        if txn_id is None:
+            txn_id = "m%s" % (str(time.time()))
+
+        path = "/_matrix/client/r0/rooms/%s/redact/%s/%s" % (room_id, event_id, txn_id)
+        if tok:
+            path = path + "?access_token=%s" % tok
+
+        request, channel = make_request(
+            self.hs.get_reactor(), "PUT", path, json.dumps({}).encode("utf8")
+        )
+        render(request, self.resource, self.hs.get_reactor())
+
+        assert int(channel.result["code"]) == expect_code, (
+            "Expected: %d, got: %d, resp: %r"
+            % (expect_code, int(channel.result["code"]), channel.result["body"])
+        )
+
+        return channel.json_body
+
     def _read_write_state(
         self,
         room_id: str,
diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py
index fa3a3ec1bd..a31e44c97e 100644
--- a/tests/rest/client/v2_alpha/test_sync.py
+++ b/tests/rest/client/v2_alpha/test_sync.py
@@ -16,9 +16,9 @@
 import json
 
 import synapse.rest.admin
-from synapse.api.constants import EventContentFields, EventTypes
+from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
 from synapse.rest.client.v1 import login, room
-from synapse.rest.client.v2_alpha import sync
+from synapse.rest.client.v2_alpha import read_marker, sync
 
 from tests import unittest
 from tests.server import TimedOutException
@@ -324,3 +324,156 @@ class SyncTypingTests(unittest.HomeserverTestCase):
             "GET", sync_url % (access_token, next_batch)
         )
         self.assertRaises(TimedOutException, self.render, request)
+
+
+class UnreadMessagesTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        read_marker.register_servlets,
+        room.register_servlets,
+        sync.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.url = "/sync?since=%s"
+        self.next_batch = "s0"
+
+        # Register the first user (used to check the unread counts).
+        self.user_id = self.register_user("kermit", "monkey")
+        self.tok = self.login("kermit", "monkey")
+
+        # Create the room we'll check unread counts for.
+        self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok)
+
+        # Register the second user (used to send events to the room).
+        self.user2 = self.register_user("kermit2", "monkey")
+        self.tok2 = self.login("kermit2", "monkey")
+
+        # Change the power levels of the room so that the second user can send state
+        # events.
+        self.helper.send_state(
+            self.room_id,
+            EventTypes.PowerLevels,
+            {
+                "users": {self.user_id: 100, self.user2: 100},
+                "users_default": 0,
+                "events": {
+                    "m.room.name": 50,
+                    "m.room.power_levels": 100,
+                    "m.room.history_visibility": 100,
+                    "m.room.canonical_alias": 50,
+                    "m.room.avatar": 50,
+                    "m.room.tombstone": 100,
+                    "m.room.server_acl": 100,
+                    "m.room.encryption": 100,
+                },
+                "events_default": 0,
+                "state_default": 50,
+                "ban": 50,
+                "kick": 50,
+                "redact": 50,
+                "invite": 0,
+            },
+            tok=self.tok,
+        )
+
+    def test_unread_counts(self):
+        """Tests that /sync returns the right value for the unread count (MSC2654)."""
+
+        # Check that our own messages don't increase the unread count.
+        self.helper.send(self.room_id, "hello", tok=self.tok)
+        self._check_unread_count(0)
+
+        # Join the new user and check that this doesn't increase the unread count.
+        self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2)
+        self._check_unread_count(0)
+
+        # Check that the new user sending a message increases our unread count.
+        res = self.helper.send(self.room_id, "hello", tok=self.tok2)
+        self._check_unread_count(1)
+
+        # Send a read receipt to tell the server we've read the latest event.
+        body = json.dumps({"m.read": res["event_id"]}).encode("utf8")
+        request, channel = self.make_request(
+            "POST",
+            "/rooms/%s/read_markers" % self.room_id,
+            body,
+            access_token=self.tok,
+        )
+        self.render(request)
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        # Check that the unread counter is back to 0.
+        self._check_unread_count(0)
+
+        # Check that room name changes increase the unread counter.
+        self.helper.send_state(
+            self.room_id, "m.room.name", {"name": "my super room"}, tok=self.tok2,
+        )
+        self._check_unread_count(1)
+
+        # Check that room topic changes increase the unread counter.
+        self.helper.send_state(
+            self.room_id, "m.room.topic", {"topic": "welcome!!!"}, tok=self.tok2,
+        )
+        self._check_unread_count(2)
+
+        # Check that encrypted messages increase the unread counter.
+        self.helper.send_event(self.room_id, EventTypes.Encrypted, {}, tok=self.tok2)
+        self._check_unread_count(3)
+
+        # Check that custom events with a body increase the unread counter.
+        self.helper.send_event(
+            self.room_id, "org.matrix.custom_type", {"body": "hello"}, tok=self.tok2,
+        )
+        self._check_unread_count(4)
+
+        # Check that edits don't increase the unread counter.
+        self.helper.send_event(
+            room_id=self.room_id,
+            type=EventTypes.Message,
+            content={
+                "body": "hello",
+                "msgtype": "m.text",
+                "m.relates_to": {"rel_type": RelationTypes.REPLACE},
+            },
+            tok=self.tok2,
+        )
+        self._check_unread_count(4)
+
+        # Check that notices don't increase the unread counter.
+        self.helper.send_event(
+            room_id=self.room_id,
+            type=EventTypes.Message,
+            content={"body": "hello", "msgtype": "m.notice"},
+            tok=self.tok2,
+        )
+        self._check_unread_count(4)
+
+        # Check that tombstone events changes increase the unread counter.
+        self.helper.send_state(
+            self.room_id,
+            EventTypes.Tombstone,
+            {"replacement_room": "!someroom:test"},
+            tok=self.tok2,
+        )
+        self._check_unread_count(5)
+
+    def _check_unread_count(self, expected_count: True):
+        """Syncs and compares the unread count with the expected value."""
+
+        request, channel = self.make_request(
+            "GET", self.url % self.next_batch, access_token=self.tok,
+        )
+        self.render(request)
+
+        self.assertEqual(channel.code, 200, channel.json_body)
+
+        room_entry = channel.json_body["rooms"]["join"][self.room_id]
+        self.assertEqual(
+            room_entry["org.matrix.msc2654.unread_count"], expected_count, room_entry,
+        )
+
+        # Store the next batch for the next request.
+        self.next_batch = channel.json_body["next_batch"]
diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py
index 99eb477149..6850c666be 100644
--- a/tests/rest/key/v2/test_remote_key_resource.py
+++ b/tests/rest/key/v2/test_remote_key_resource.py
@@ -53,7 +53,7 @@ class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase):
         Tell the mock http client to expect an outgoing GET request for the given key
         """
 
-        def get_json(destination, path, ignore_backoff=False, **kwargs):
+        async def get_json(destination, path, ignore_backoff=False, **kwargs):
             self.assertTrue(ignore_backoff)
             self.assertEqual(destination, server_name)
             key_id = "%s:%s" % (signing_key.alg, signing_key.version)
@@ -177,7 +177,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
 
         # wire up outbound POST /key/v2/query requests from hs2 so that they
         # will be forwarded to hs1
-        def post_json(destination, path, data):
+        async def post_json(destination, path, data):
             self.assertEqual(destination, self.hs.hostname)
             self.assertEqual(
                 path, "/_matrix/key/v2/query",
diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py
index 66fa5978b2..f4f3e56777 100644
--- a/tests/rest/media/v1/test_media_storage.py
+++ b/tests/rest/media/v1/test_media_storage.py
@@ -26,6 +26,7 @@ import attr
 from parameterized import parameterized_class
 from PIL import Image as Image
 
+from twisted.internet import defer
 from twisted.internet.defer import Deferred
 
 from synapse.logging.context import make_deferred_yieldable
@@ -77,7 +78,9 @@ class MediaStorageTests(unittest.HomeserverTestCase):
 
         # This uses a real blocking threadpool so we have to wait for it to be
         # actually done :/
-        x = self.media_storage.ensure_media_is_in_local_cache(file_info)
+        x = defer.ensureDeferred(
+            self.media_storage.ensure_media_is_in_local_cache(file_info)
+        )
 
         # Hotloop until the threadpool does its job...
         self.wait_on_thread(x)
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
index 2826211f32..74765a582b 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -12,8 +12,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
+import json
 import os
+import re
+
+from mock import patch
 
 import attr
 
@@ -131,7 +134,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.reactor.nameResolver = Resolver()
 
     def test_cache_returns_correct_type(self):
-        self.lookups["matrix.org"] = [(IPv4Address, "8.8.8.8")]
+        self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
 
         request, channel = self.make_request(
             "GET", "url_preview?url=http://matrix.org", shorthand=False
@@ -187,7 +190,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         )
 
     def test_non_ascii_preview_httpequiv(self):
-        self.lookups["matrix.org"] = [(IPv4Address, "8.8.8.8")]
+        self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
 
         end_content = (
             b"<html><head>"
@@ -221,7 +224,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430")
 
     def test_non_ascii_preview_content_type(self):
-        self.lookups["matrix.org"] = [(IPv4Address, "8.8.8.8")]
+        self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
 
         end_content = (
             b"<html><head>"
@@ -254,7 +257,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430")
 
     def test_overlong_title(self):
-        self.lookups["matrix.org"] = [(IPv4Address, "8.8.8.8")]
+        self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
 
         end_content = (
             b"<html><head>"
@@ -292,7 +295,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         """
         IP addresses can be previewed directly.
         """
-        self.lookups["example.com"] = [(IPv4Address, "8.8.8.8")]
+        self.lookups["example.com"] = [(IPv4Address, "10.1.2.3")]
 
         request, channel = self.make_request(
             "GET", "url_preview?url=http://example.com", shorthand=False
@@ -439,7 +442,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         # Hardcode the URL resolving to the IP we want.
         self.lookups["example.com"] = [
             (IPv4Address, "1.1.1.2"),
-            (IPv4Address, "8.8.8.8"),
+            (IPv4Address, "10.1.2.3"),
         ]
 
         request, channel = self.make_request(
@@ -518,7 +521,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
         """
         Accept-Language header is sent to the remote server
         """
-        self.lookups["example.com"] = [(IPv4Address, "8.8.8.8")]
+        self.lookups["example.com"] = [(IPv4Address, "10.1.2.3")]
 
         # Build and make a request to the server
         request, channel = self.make_request(
@@ -562,3 +565,126 @@ class URLPreviewTests(unittest.HomeserverTestCase):
             ),
             server.data,
         )
+
+    def test_oembed_photo(self):
+        """Test an oEmbed endpoint which returns a 'photo' type which redirects the preview to a new URL."""
+        # Route the HTTP version to an HTTP endpoint so that the tests work.
+        with patch.dict(
+            "synapse.rest.media.v1.preview_url_resource._oembed_patterns",
+            {
+                re.compile(
+                    r"http://twitter\.com/.+/status/.+"
+                ): "http://publish.twitter.com/oembed",
+            },
+            clear=True,
+        ):
+
+            self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+            self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+
+            result = {
+                "version": "1.0",
+                "type": "photo",
+                "url": "http://cdn.twitter.com/matrixdotorg",
+            }
+            oembed_content = json.dumps(result).encode("utf-8")
+
+            end_content = (
+                b"<html><head>"
+                b"<title>Some Title</title>"
+                b'<meta property="og:description" content="hi" />'
+                b"</head></html>"
+            )
+
+            request, channel = self.make_request(
+                "GET",
+                "url_preview?url=http://twitter.com/matrixdotorg/status/12345",
+                shorthand=False,
+            )
+            request.render(self.preview_url)
+            self.pump()
+
+            client = self.reactor.tcpClients[0][2].buildProtocol(None)
+            server = AccumulatingProtocol()
+            server.makeConnection(FakeTransport(client, self.reactor))
+            client.makeConnection(FakeTransport(server, self.reactor))
+            client.dataReceived(
+                (
+                    b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+                    b'Content-Type: application/json; charset="utf8"\r\n\r\n'
+                )
+                % (len(oembed_content),)
+                + oembed_content
+            )
+
+            self.pump()
+
+            client = self.reactor.tcpClients[1][2].buildProtocol(None)
+            server = AccumulatingProtocol()
+            server.makeConnection(FakeTransport(client, self.reactor))
+            client.makeConnection(FakeTransport(server, self.reactor))
+            client.dataReceived(
+                (
+                    b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+                    b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+                )
+                % (len(end_content),)
+                + end_content
+            )
+
+            self.pump()
+
+            self.assertEqual(channel.code, 200)
+            self.assertEqual(
+                channel.json_body, {"og:title": "Some Title", "og:description": "hi"}
+            )
+
+    def test_oembed_rich(self):
+        """Test an oEmbed endpoint which returns HTML content via the 'rich' type."""
+        # Route the HTTP version to an HTTP endpoint so that the tests work.
+        with patch.dict(
+            "synapse.rest.media.v1.preview_url_resource._oembed_patterns",
+            {
+                re.compile(
+                    r"http://twitter\.com/.+/status/.+"
+                ): "http://publish.twitter.com/oembed",
+            },
+            clear=True,
+        ):
+
+            self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+
+            result = {
+                "version": "1.0",
+                "type": "rich",
+                "html": "<div>Content Preview</div>",
+            }
+            end_content = json.dumps(result).encode("utf-8")
+
+            request, channel = self.make_request(
+                "GET",
+                "url_preview?url=http://twitter.com/matrixdotorg/status/12345",
+                shorthand=False,
+            )
+            request.render(self.preview_url)
+            self.pump()
+
+            client = self.reactor.tcpClients[0][2].buildProtocol(None)
+            server = AccumulatingProtocol()
+            server.makeConnection(FakeTransport(client, self.reactor))
+            client.makeConnection(FakeTransport(server, self.reactor))
+            client.dataReceived(
+                (
+                    b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+                    b'Content-Type: application/json; charset="utf8"\r\n\r\n'
+                )
+                % (len(end_content),)
+                + end_content
+            )
+
+            self.pump()
+            self.assertEqual(channel.code, 200)
+            self.assertEqual(
+                channel.json_body,
+                {"og:title": None, "og:description": "Content Preview"},
+            )
diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py
index 38f9b423ef..f2955a9c69 100644
--- a/tests/state/test_v2.py
+++ b/tests/state/test_v2.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import itertools
+from typing import List
 
 import attr
 
@@ -432,7 +433,7 @@ class StateTestCase(unittest.TestCase):
                     state_res_store=TestStateResolutionStore(event_map),
                 )
 
-                state_before = self.successResultOf(state_d)
+                state_before = self.successResultOf(defer.ensureDeferred(state_d))
 
             state_after = dict(state_before)
             if fake_event.state_key is not None:
@@ -581,7 +582,7 @@ class SimpleParamStateTestCase(unittest.TestCase):
             state_res_store=TestStateResolutionStore(self.event_map),
         )
 
-        state = self.successResultOf(state_d)
+        state = self.successResultOf(defer.ensureDeferred(state_d))
 
         self.assert_dict(self.expected_combined_state, state)
 
@@ -608,9 +609,11 @@ class TestStateResolutionStore(object):
             Deferred[dict[str, FrozenEvent]]: Dict from event_id to event.
         """
 
-        return {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map}
+        return defer.succeed(
+            {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map}
+        )
 
-    def _get_auth_chain(self, event_ids):
+    def _get_auth_chain(self, event_ids: List[str]) -> List[str]:
         """Gets the full auth chain for a set of events (including rejected
         events).
 
@@ -622,10 +625,10 @@ class TestStateResolutionStore(object):
                presence of rejected events
 
         Args:
-            event_ids (list): The event IDs of the events to fetch the auth
+            event_ids: The event IDs of the events to fetch the auth
                 chain for. Must be state events.
         Returns:
-            Deferred[list[str]]: List of event IDs of the auth chain.
+            List of event IDs of the auth chain.
         """
 
         # Simple DFS for auth chain
@@ -648,4 +651,4 @@ class TestStateResolutionStore(object):
         chains = [frozenset(self._get_auth_chain(a)) for a in auth_sets]
 
         common = set(chains[0]).intersection(*chains[1:])
-        return set(chains[0]).union(*chains[1:]) - common
+        return defer.succeed(set(chains[0]).union(*chains[1:]) - common)
diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
index b45bc9c115..2b1580feeb 100644
--- a/tests/storage/test_event_push_actions.py
+++ b/tests/storage/test_event_push_actions.py
@@ -39,14 +39,18 @@ class EventPushActionsStoreTestCase(tests.unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_unread_push_actions_for_user_in_range_for_http(self):
-        yield self.store.get_unread_push_actions_for_user_in_range_for_http(
-            USER_ID, 0, 1000, 20
+        yield defer.ensureDeferred(
+            self.store.get_unread_push_actions_for_user_in_range_for_http(
+                USER_ID, 0, 1000, 20
+            )
         )
 
     @defer.inlineCallbacks
     def test_get_unread_push_actions_for_user_in_range_for_email(self):
-        yield self.store.get_unread_push_actions_for_user_in_range_for_email(
-            USER_ID, 0, 1000, 20
+        yield defer.ensureDeferred(
+            self.store.get_unread_push_actions_for_user_in_range_for_email(
+                USER_ID, 0, 1000, 20
+            )
         )
 
     @defer.inlineCallbacks
@@ -72,8 +76,10 @@ class EventPushActionsStoreTestCase(tests.unittest.TestCase):
             event.internal_metadata.stream_ordering = stream
             event.depth = stream
 
-            yield self.store.add_push_actions_to_staging(
-                event.event_id, {user_id: action}
+            yield defer.ensureDeferred(
+                self.store.add_push_actions_to_staging(
+                    event.event_id, {user_id: action}
+                )
             )
             yield self.store.db.runInteraction(
                 "",
diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py
index b9fafaa1a6..a6012c973d 100644
--- a/tests/storage/test_purge.py
+++ b/tests/storage/test_purge.py
@@ -13,6 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from twisted.internet import defer
+
 from synapse.rest.client.v1 import room
 
 from tests.unittest import HomeserverTestCase
@@ -49,7 +51,9 @@ class PurgeTests(HomeserverTestCase):
         event = self.successResultOf(event)
 
         # Purge everything before this topological token
-        purge = storage.purge_events.purge_history(self.room_id, event, True)
+        purge = defer.ensureDeferred(
+            storage.purge_events.purge_history(self.room_id, event, True)
+        )
         self.pump()
         self.assertEqual(self.successResultOf(purge), None)
 
@@ -88,7 +92,7 @@ class PurgeTests(HomeserverTestCase):
         )
 
         # Purge everything before this topological token
-        purge = storage.purge_history(self.room_id, event, True)
+        purge = defer.ensureDeferred(storage.purge_history(self.room_id, event, True))
         self.pump()
         f = self.failureResultOf(purge)
         self.assertIn("greater than forward", f.value.args[0])
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index db3667dc43..0f0e1cd09b 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -237,7 +237,9 @@ class RedactionTestCase(unittest.HomeserverTestCase):
 
             @defer.inlineCallbacks
             def build(self, prev_event_ids):
-                built_event = yield self._base_builder.build(prev_event_ids)
+                built_event = yield defer.ensureDeferred(
+                    self._base_builder.build(prev_event_ids)
+                )
 
                 built_event._event_id = self._event_id
                 built_event._dict["event_id"] = self._event_id
diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py
index b1dceb2918..d07b985a8e 100644
--- a/tests/storage/test_room.py
+++ b/tests/storage/test_room.py
@@ -37,11 +37,13 @@ class RoomStoreTestCase(unittest.TestCase):
         self.alias = RoomAlias.from_string("#a-room-name:test")
         self.u_creator = UserID.from_string("@creator:test")
 
-        yield self.store.store_room(
-            self.room.to_string(),
-            room_creator_user_id=self.u_creator.to_string(),
-            is_public=True,
-            room_version=RoomVersions.V1,
+        yield defer.ensureDeferred(
+            self.store.store_room(
+                self.room.to_string(),
+                room_creator_user_id=self.u_creator.to_string(),
+                is_public=True,
+                room_version=RoomVersions.V1,
+            )
         )
 
     @defer.inlineCallbacks
@@ -88,17 +90,21 @@ class RoomEventsStoreTestCase(unittest.TestCase):
 
         self.room = RoomID.from_string("!abcde:test")
 
-        yield self.store.store_room(
-            self.room.to_string(),
-            room_creator_user_id="@creator:text",
-            is_public=True,
-            room_version=RoomVersions.V1,
+        yield defer.ensureDeferred(
+            self.store.store_room(
+                self.room.to_string(),
+                room_creator_user_id="@creator:text",
+                is_public=True,
+                room_version=RoomVersions.V1,
+            )
         )
 
     @defer.inlineCallbacks
     def inject_room_event(self, **kwargs):
-        yield self.storage.persistence.persist_event(
-            self.event_factory.create_event(room_id=self.room.to_string(), **kwargs)
+        yield defer.ensureDeferred(
+            self.storage.persistence.persist_event(
+                self.event_factory.create_event(room_id=self.room.to_string(), **kwargs)
+            )
         )
 
     @defer.inlineCallbacks
@@ -109,7 +115,9 @@ class RoomEventsStoreTestCase(unittest.TestCase):
             etype=EventTypes.Name, name=name, content={"name": name}, depth=1
         )
 
-        state = yield self.store.get_current_state(room_id=self.room.to_string())
+        state = yield defer.ensureDeferred(
+            self.store.get_current_state(room_id=self.room.to_string())
+        )
 
         self.assertEquals(1, len(state))
         self.assertObjectHasAttributes(
@@ -125,7 +133,9 @@ class RoomEventsStoreTestCase(unittest.TestCase):
             etype=EventTypes.Topic, topic=topic, content={"topic": topic}, depth=1
         )
 
-        state = yield self.store.get_current_state(room_id=self.room.to_string())
+        state = yield defer.ensureDeferred(
+            self.store.get_current_state(room_id=self.room.to_string())
+        )
 
         self.assertEquals(1, len(state))
         self.assertObjectHasAttributes(
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 5dd46005e6..f282921538 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -118,18 +118,22 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
 
     def test_get_joined_users_from_context(self):
         room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
-        bob_event = event_injection.inject_member_event(
-            self.hs, room, self.u_bob, Membership.JOIN
+        bob_event = self.get_success(
+            event_injection.inject_member_event(
+                self.hs, room, self.u_bob, Membership.JOIN
+            )
         )
 
         # first, create a regular event
-        event, context = event_injection.create_event(
-            self.hs,
-            room_id=room,
-            sender=self.u_alice,
-            prev_event_ids=[bob_event.event_id],
-            type="m.test.1",
-            content={},
+        event, context = self.get_success(
+            event_injection.create_event(
+                self.hs,
+                room_id=room,
+                sender=self.u_alice,
+                prev_event_ids=[bob_event.event_id],
+                type="m.test.1",
+                content={},
+            )
         )
 
         users = self.get_success(
@@ -140,22 +144,26 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
         # Regression test for #7376: create a state event whose key matches bob's
         # user_id, but which is *not* a membership event, and persist that; then check
         # that `get_joined_users_from_context` returns the correct users for the next event.
-        non_member_event = event_injection.inject_event(
-            self.hs,
-            room_id=room,
-            sender=self.u_bob,
-            prev_event_ids=[bob_event.event_id],
-            type="m.test.2",
-            state_key=self.u_bob,
-            content={},
+        non_member_event = self.get_success(
+            event_injection.inject_event(
+                self.hs,
+                room_id=room,
+                sender=self.u_bob,
+                prev_event_ids=[bob_event.event_id],
+                type="m.test.2",
+                state_key=self.u_bob,
+                content={},
+            )
         )
-        event, context = event_injection.create_event(
-            self.hs,
-            room_id=room,
-            sender=self.u_alice,
-            prev_event_ids=[non_member_event.event_id],
-            type="m.test.3",
-            content={},
+        event, context = self.get_success(
+            event_injection.create_event(
+                self.hs,
+                room_id=room,
+                sender=self.u_alice,
+                prev_event_ids=[non_member_event.event_id],
+                type="m.test.3",
+                content={},
+            )
         )
         users = self.get_success(
             self.store.get_joined_users_from_context(event, context)
diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py
index 0b88308ff4..8bd12fa847 100644
--- a/tests/storage/test_state.py
+++ b/tests/storage/test_state.py
@@ -44,11 +44,13 @@ class StateStoreTestCase(tests.unittest.TestCase):
 
         self.room = RoomID.from_string("!abc123:test")
 
-        yield self.store.store_room(
-            self.room.to_string(),
-            room_creator_user_id="@creator:text",
-            is_public=True,
-            room_version=RoomVersions.V1,
+        yield defer.ensureDeferred(
+            self.store.store_room(
+                self.room.to_string(),
+                room_creator_user_id="@creator:text",
+                is_public=True,
+                room_version=RoomVersions.V1,
+            )
         )
 
     @defer.inlineCallbacks
@@ -64,11 +66,13 @@ class StateStoreTestCase(tests.unittest.TestCase):
             },
         )
 
-        event, context = yield self.event_creation_handler.create_new_client_event(
-            builder
+        event, context = yield defer.ensureDeferred(
+            self.event_creation_handler.create_new_client_event(builder)
         )
 
-        yield self.storage.persistence.persist_event(event, context)
+        yield defer.ensureDeferred(
+            self.storage.persistence.persist_event(event, context)
+        )
 
         return event
 
@@ -87,8 +91,8 @@ class StateStoreTestCase(tests.unittest.TestCase):
             self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"}
         )
 
-        state_group_map = yield self.storage.state.get_state_groups_ids(
-            self.room, [e2.event_id]
+        state_group_map = yield defer.ensureDeferred(
+            self.storage.state.get_state_groups_ids(self.room, [e2.event_id])
         )
         self.assertEqual(len(state_group_map), 1)
         state_map = list(state_group_map.values())[0]
@@ -106,8 +110,8 @@ class StateStoreTestCase(tests.unittest.TestCase):
             self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"}
         )
 
-        state_group_map = yield self.storage.state.get_state_groups(
-            self.room, [e2.event_id]
+        state_group_map = yield defer.ensureDeferred(
+            self.storage.state.get_state_groups(self.room, [e2.event_id])
         )
         self.assertEqual(len(state_group_map), 1)
         state_list = list(state_group_map.values())[0]
@@ -148,7 +152,9 @@ class StateStoreTestCase(tests.unittest.TestCase):
         )
 
         # check we get the full state as of the final event
-        state = yield self.storage.state.get_state_for_event(e5.event_id)
+        state = yield defer.ensureDeferred(
+            self.storage.state.get_state_for_event(e5.event_id)
+        )
 
         self.assertIsNotNone(e4)
 
@@ -164,22 +170,28 @@ class StateStoreTestCase(tests.unittest.TestCase):
         )
 
         # check we can filter to the m.room.name event (with a '' state key)
-        state = yield self.storage.state.get_state_for_event(
-            e5.event_id, StateFilter.from_types([(EventTypes.Name, "")])
+        state = yield defer.ensureDeferred(
+            self.storage.state.get_state_for_event(
+                e5.event_id, StateFilter.from_types([(EventTypes.Name, "")])
+            )
         )
 
         self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
 
         # check we can filter to the m.room.name event (with a wildcard None state key)
-        state = yield self.storage.state.get_state_for_event(
-            e5.event_id, StateFilter.from_types([(EventTypes.Name, None)])
+        state = yield defer.ensureDeferred(
+            self.storage.state.get_state_for_event(
+                e5.event_id, StateFilter.from_types([(EventTypes.Name, None)])
+            )
         )
 
         self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
 
         # check we can grab the m.room.member events (with a wildcard None state key)
-        state = yield self.storage.state.get_state_for_event(
-            e5.event_id, StateFilter.from_types([(EventTypes.Member, None)])
+        state = yield defer.ensureDeferred(
+            self.storage.state.get_state_for_event(
+                e5.event_id, StateFilter.from_types([(EventTypes.Member, None)])
+            )
         )
 
         self.assertStateMapEqual(
@@ -188,12 +200,14 @@ class StateStoreTestCase(tests.unittest.TestCase):
 
         # check we can grab a specific room member without filtering out the
         # other event types
-        state = yield self.storage.state.get_state_for_event(
-            e5.event_id,
-            state_filter=StateFilter(
-                types={EventTypes.Member: {self.u_alice.to_string()}},
-                include_others=True,
-            ),
+        state = yield defer.ensureDeferred(
+            self.storage.state.get_state_for_event(
+                e5.event_id,
+                state_filter=StateFilter(
+                    types={EventTypes.Member: {self.u_alice.to_string()}},
+                    include_others=True,
+                ),
+            )
         )
 
         self.assertStateMapEqual(
@@ -206,11 +220,13 @@ class StateStoreTestCase(tests.unittest.TestCase):
         )
 
         # check that we can grab everything except members
-        state = yield self.storage.state.get_state_for_event(
-            e5.event_id,
-            state_filter=StateFilter(
-                types={EventTypes.Member: set()}, include_others=True
-            ),
+        state = yield defer.ensureDeferred(
+            self.storage.state.get_state_for_event(
+                e5.event_id,
+                state_filter=StateFilter(
+                    types={EventTypes.Member: set()}, include_others=True
+                ),
+            )
         )
 
         self.assertStateMapEqual(
@@ -222,8 +238,8 @@ class StateStoreTestCase(tests.unittest.TestCase):
         #######################################################
 
         room_id = self.room.to_string()
-        group_ids = yield self.storage.state.get_state_groups_ids(
-            room_id, [e5.event_id]
+        group_ids = yield defer.ensureDeferred(
+            self.storage.state.get_state_groups_ids(room_id, [e5.event_id])
         )
         group = list(group_ids.keys())[0]
 
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 87a16d7d7a..c2f12c2741 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -95,7 +95,7 @@ class MessageAcceptTests(unittest.HomeserverTestCase):
         prev_events that said event references.
         """
 
-        def post_json(destination, path, data, headers=None, timeout=0):
+        async def post_json(destination, path, data, headers=None, timeout=0):
             # If it asks us for new missing events, give them NOTHING
             if path.startswith("/_matrix/federation/v1/get_missing_events/"):
                 return {"events": []}
diff --git a/tests/test_server.py b/tests/test_server.py
index 030f58cbdc..073b2362cc 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -12,26 +12,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
 import re
-from io import StringIO
 
 from twisted.internet.defer import Deferred
-from twisted.python.failure import Failure
-from twisted.test.proto_helpers import AccumulatingProtocol
 from twisted.web.resource import Resource
-from twisted.web.server import NOT_DONE_YET
 
 from synapse.api.errors import Codes, RedirectException, SynapseError
 from synapse.config.server import parse_listener_def
 from synapse.http.server import DirectServeHtmlResource, JsonResource, OptionsResource
-from synapse.http.site import SynapseSite, logger
+from synapse.http.site import SynapseSite
 from synapse.logging.context import make_deferred_yieldable
 from synapse.util import Clock
 
 from tests import unittest
 from tests.server import (
-    FakeTransport,
     ThreadedMemoryReactorClock,
     make_request,
     render,
@@ -199,10 +193,10 @@ class OptionsResourceTests(unittest.TestCase):
         return channel
 
     def test_unknown_options_request(self):
-        """An OPTIONS requests to an unknown URL still returns 200 OK."""
+        """An OPTIONS requests to an unknown URL still returns 204 No Content."""
         channel = self._make_request(b"OPTIONS", b"/foo/")
-        self.assertEqual(channel.result["code"], b"200")
-        self.assertEqual(channel.result["body"], b"{}")
+        self.assertEqual(channel.result["code"], b"204")
+        self.assertNotIn("body", channel.result)
 
         # Ensure the correct CORS headers have been added
         self.assertTrue(
@@ -219,10 +213,10 @@ class OptionsResourceTests(unittest.TestCase):
         )
 
     def test_known_options_request(self):
-        """An OPTIONS requests to an known URL still returns 200 OK."""
+        """An OPTIONS requests to an known URL still returns 204 No Content."""
         channel = self._make_request(b"OPTIONS", b"/res/")
-        self.assertEqual(channel.result["code"], b"200")
-        self.assertEqual(channel.result["body"], b"{}")
+        self.assertEqual(channel.result["code"], b"204")
+        self.assertNotIn("body", channel.result)
 
         # Ensure the correct CORS headers have been added
         self.assertTrue(
@@ -318,54 +312,3 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
         self.assertEqual(location_headers, [b"/no/over/there"])
         cookies_headers = [v for k, v in headers if k == b"Set-Cookie"]
         self.assertEqual(cookies_headers, [b"session=yespls"])
-
-
-class SiteTestCase(unittest.HomeserverTestCase):
-    def test_lose_connection(self):
-        """
-        We log the URI correctly redacted when we lose the connection.
-        """
-
-        class HangingResource(Resource):
-            """
-            A Resource that strategically hangs, as if it were processing an
-            answer.
-            """
-
-            def render(self, request):
-                return NOT_DONE_YET
-
-        # Set up a logging handler that we can inspect afterwards
-        output = StringIO()
-        handler = logging.StreamHandler(output)
-        logger.addHandler(handler)
-        old_level = logger.level
-        logger.setLevel(10)
-        self.addCleanup(logger.setLevel, old_level)
-        self.addCleanup(logger.removeHandler, handler)
-
-        # Make a resource and a Site, the resource will hang and allow us to
-        # time out the request while it's 'processing'
-        base_resource = Resource()
-        base_resource.putChild(b"", HangingResource())
-        site = SynapseSite(
-            "test", "site_tag", self.hs.config.listeners[0], base_resource, "1.0"
-        )
-
-        server = site.buildProtocol(None)
-        client = AccumulatingProtocol()
-        client.makeConnection(FakeTransport(server, self.reactor))
-        server.makeConnection(FakeTransport(client, self.reactor))
-
-        # Send a request with an access token that will get redacted
-        server.dataReceived(b"GET /?access_token=bar HTTP/1.0\r\n\r\n")
-        self.pump()
-
-        # Lose the connection
-        e = Failure(Exception("Failed123"))
-        server.connectionLost(e)
-        handler.flush()
-
-        # Our access token is redacted and the failure reason is logged.
-        self.assertIn("/?access_token=<redacted>", output.getvalue())
-        self.assertIn("Failed123", output.getvalue())
diff --git a/tests/test_state.py b/tests/test_state.py
index 66f22f6813..b5c3667d2a 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -97,17 +97,19 @@ class StateGroupStore(object):
 
         self._group_to_state[state_group] = dict(current_state_ids)
 
-        return state_group
+        return defer.succeed(state_group)
 
     def get_events(self, event_ids, **kwargs):
-        return {
-            e_id: self._event_id_to_event[e_id]
-            for e_id in event_ids
-            if e_id in self._event_id_to_event
-        }
+        return defer.succeed(
+            {
+                e_id: self._event_id_to_event[e_id]
+                for e_id in event_ids
+                if e_id in self._event_id_to_event
+            }
+        )
 
     def get_state_group_delta(self, name):
-        return None, None
+        return defer.succeed((None, None))
 
     def register_events(self, events):
         for e in events:
@@ -120,7 +122,7 @@ class StateGroupStore(object):
         self._event_to_state_group[event_id] = state_group
 
     def get_room_version_id(self, room_id):
-        return RoomVersions.V1.identifier
+        return defer.succeed(RoomVersions.V1.identifier)
 
 
 class DictObj(dict):
@@ -202,14 +204,16 @@ class StateTestCase(unittest.TestCase):
         context_store = {}  # type: dict[str, EventContext]
 
         for event in graph.walk():
-            context = yield self.state.compute_event_context(event)
+            context = yield defer.ensureDeferred(
+                self.state.compute_event_context(event)
+            )
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
         ctx_c = context_store["C"]
         ctx_d = context_store["D"]
 
-        prev_state_ids = yield ctx_d.get_prev_state_ids()
+        prev_state_ids = yield defer.ensureDeferred(ctx_d.get_prev_state_ids())
         self.assertEqual(2, len(prev_state_ids))
 
         self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event)
@@ -244,7 +248,9 @@ class StateTestCase(unittest.TestCase):
         context_store = {}
 
         for event in graph.walk():
-            context = yield self.state.compute_event_context(event)
+            context = yield defer.ensureDeferred(
+                self.state.compute_event_context(event)
+            )
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
@@ -253,7 +259,7 @@ class StateTestCase(unittest.TestCase):
         ctx_c = context_store["C"]
         ctx_d = context_store["D"]
 
-        prev_state_ids = yield ctx_d.get_prev_state_ids()
+        prev_state_ids = yield defer.ensureDeferred(ctx_d.get_prev_state_ids())
         self.assertSetEqual({"START", "A", "C"}, set(prev_state_ids.values()))
 
         self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event)
@@ -300,7 +306,9 @@ class StateTestCase(unittest.TestCase):
         context_store = {}
 
         for event in graph.walk():
-            context = yield self.state.compute_event_context(event)
+            context = yield defer.ensureDeferred(
+                self.state.compute_event_context(event)
+            )
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
@@ -310,7 +318,7 @@ class StateTestCase(unittest.TestCase):
         ctx_c = context_store["C"]
         ctx_e = context_store["E"]
 
-        prev_state_ids = yield ctx_e.get_prev_state_ids()
+        prev_state_ids = yield defer.ensureDeferred(ctx_e.get_prev_state_ids())
         self.assertSetEqual({"START", "A", "B", "C"}, set(prev_state_ids.values()))
         self.assertEqual(ctx_c.state_group, ctx_e.state_group_before_event)
         self.assertEqual(ctx_e.state_group_before_event, ctx_e.state_group)
@@ -373,7 +381,9 @@ class StateTestCase(unittest.TestCase):
         context_store = {}
 
         for event in graph.walk():
-            context = yield self.state.compute_event_context(event)
+            context = yield defer.ensureDeferred(
+                self.state.compute_event_context(event)
+            )
             self.store.register_event_context(event, context)
             context_store[event.event_id] = context
 
@@ -383,7 +393,7 @@ class StateTestCase(unittest.TestCase):
         ctx_b = context_store["B"]
         ctx_d = context_store["D"]
 
-        prev_state_ids = yield ctx_d.get_prev_state_ids()
+        prev_state_ids = yield defer.ensureDeferred(ctx_d.get_prev_state_ids())
         self.assertSetEqual({"A1", "A2", "A3", "A5", "B"}, set(prev_state_ids.values()))
 
         self.assertEqual(ctx_b.state_group, ctx_d.state_group_before_event)
@@ -411,12 +421,14 @@ class StateTestCase(unittest.TestCase):
             create_event(type="test2", state_key=""),
         ]
 
-        context = yield self.state.compute_event_context(event, old_state=old_state)
+        context = yield defer.ensureDeferred(
+            self.state.compute_event_context(event, old_state=old_state)
+        )
 
-        prev_state_ids = yield context.get_prev_state_ids()
+        prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids())
         self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values())
 
-        current_state_ids = yield context.get_current_state_ids()
+        current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
         self.assertCountEqual(
             (e.event_id for e in old_state), current_state_ids.values()
         )
@@ -434,12 +446,14 @@ class StateTestCase(unittest.TestCase):
             create_event(type="test2", state_key=""),
         ]
 
-        context = yield self.state.compute_event_context(event, old_state=old_state)
+        context = yield defer.ensureDeferred(
+            self.state.compute_event_context(event, old_state=old_state)
+        )
 
-        prev_state_ids = yield context.get_prev_state_ids()
+        prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids())
         self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values())
 
-        current_state_ids = yield context.get_current_state_ids()
+        current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
         self.assertCountEqual(
             (e.event_id for e in old_state + [event]), current_state_ids.values()
         )
@@ -462,7 +476,7 @@ class StateTestCase(unittest.TestCase):
             create_event(type="test2", state_key=""),
         ]
 
-        group_name = self.store.store_state_group(
+        group_name = yield self.store.store_state_group(
             prev_event_id,
             event.room_id,
             None,
@@ -471,9 +485,9 @@ class StateTestCase(unittest.TestCase):
         )
         self.store.register_event_id_state_group(prev_event_id, group_name)
 
-        context = yield self.state.compute_event_context(event)
+        context = yield defer.ensureDeferred(self.state.compute_event_context(event))
 
-        current_state_ids = yield context.get_current_state_ids()
+        current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
 
         self.assertEqual(
             {e.event_id for e in old_state}, set(current_state_ids.values())
@@ -494,7 +508,7 @@ class StateTestCase(unittest.TestCase):
             create_event(type="test2", state_key=""),
         ]
 
-        group_name = self.store.store_state_group(
+        group_name = yield self.store.store_state_group(
             prev_event_id,
             event.room_id,
             None,
@@ -503,9 +517,9 @@ class StateTestCase(unittest.TestCase):
         )
         self.store.register_event_id_state_group(prev_event_id, group_name)
 
-        context = yield self.state.compute_event_context(event)
+        context = yield defer.ensureDeferred(self.state.compute_event_context(event))
 
-        prev_state_ids = yield context.get_prev_state_ids()
+        prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids())
 
         self.assertEqual({e.event_id for e in old_state}, set(prev_state_ids.values()))
 
@@ -544,7 +558,7 @@ class StateTestCase(unittest.TestCase):
             event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        current_state_ids = yield context.get_current_state_ids()
+        current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
 
         self.assertEqual(len(current_state_ids), 6)
 
@@ -586,7 +600,7 @@ class StateTestCase(unittest.TestCase):
             event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        current_state_ids = yield context.get_current_state_ids()
+        current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
 
         self.assertEqual(len(current_state_ids), 6)
 
@@ -641,7 +655,7 @@ class StateTestCase(unittest.TestCase):
             event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        current_state_ids = yield context.get_current_state_ids()
+        current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
 
         self.assertEqual(old_state_2[3].event_id, current_state_ids[("test1", "1")])
 
@@ -669,14 +683,15 @@ class StateTestCase(unittest.TestCase):
             event, prev_event_id1, old_state_1, prev_event_id2, old_state_2
         )
 
-        current_state_ids = yield context.get_current_state_ids()
+        current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
 
         self.assertEqual(old_state_1[3].event_id, current_state_ids[("test1", "1")])
 
+    @defer.inlineCallbacks
     def _get_context(
         self, event, prev_event_id_1, old_state_1, prev_event_id_2, old_state_2
     ):
-        sg1 = self.store.store_state_group(
+        sg1 = yield self.store.store_state_group(
             prev_event_id_1,
             event.room_id,
             None,
@@ -685,7 +700,7 @@ class StateTestCase(unittest.TestCase):
         )
         self.store.register_event_id_state_group(prev_event_id_1, sg1)
 
-        sg2 = self.store.store_state_group(
+        sg2 = yield self.store.store_state_group(
             prev_event_id_2,
             event.room_id,
             None,
@@ -694,4 +709,5 @@ class StateTestCase(unittest.TestCase):
         )
         self.store.register_event_id_state_group(prev_event_id_2, sg2)
 
-        return self.state.compute_event_context(event)
+        result = yield defer.ensureDeferred(self.state.compute_event_context(event))
+        return result
diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py
index 7b345b03bb..508aeba078 100644
--- a/tests/test_utils/__init__.py
+++ b/tests/test_utils/__init__.py
@@ -17,7 +17,7 @@
 """
 Utilities for running the unit tests
 """
-from typing import Awaitable, TypeVar
+from typing import Any, Awaitable, TypeVar
 
 TV = TypeVar("TV")
 
@@ -36,3 +36,8 @@ def get_awaitable_result(awaitable: Awaitable[TV]) -> TV:
 
     # if next didn't raise, the awaitable hasn't completed.
     raise Exception("awaitable has not yet completed")
+
+
+async def make_awaitable(result: Any):
+    """Create an awaitable that just returns a result."""
+    return result
diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py
index 43297b530c..8522c6fc09 100644
--- a/tests/test_utils/event_injection.py
+++ b/tests/test_utils/event_injection.py
@@ -22,14 +22,12 @@ from synapse.events import EventBase
 from synapse.events.snapshot import EventContext
 from synapse.types import Collection
 
-from tests.test_utils import get_awaitable_result
-
 """
 Utility functions for poking events into the storage of the server under test.
 """
 
 
-def inject_member_event(
+async def inject_member_event(
     hs: synapse.server.HomeServer,
     room_id: str,
     sender: str,
@@ -46,7 +44,7 @@ def inject_member_event(
     if extra_content:
         content.update(extra_content)
 
-    return inject_event(
+    return await inject_event(
         hs,
         room_id=room_id,
         type=EventTypes.Member,
@@ -57,7 +55,7 @@ def inject_member_event(
     )
 
 
-def inject_event(
+async def inject_event(
     hs: synapse.server.HomeServer,
     room_version: Optional[str] = None,
     prev_event_ids: Optional[Collection[str]] = None,
@@ -72,37 +70,27 @@ def inject_event(
         prev_event_ids: prev_events for the event. If not specified, will be looked up
         kwargs: fields for the event to be created
     """
-    test_reactor = hs.get_reactor()
-
-    event, context = create_event(hs, room_version, prev_event_ids, **kwargs)
+    event, context = await create_event(hs, room_version, prev_event_ids, **kwargs)
 
-    d = hs.get_storage().persistence.persist_event(event, context)
-    test_reactor.advance(0)
-    get_awaitable_result(d)
+    await hs.get_storage().persistence.persist_event(event, context)
 
     return event
 
 
-def create_event(
+async def create_event(
     hs: synapse.server.HomeServer,
     room_version: Optional[str] = None,
     prev_event_ids: Optional[Collection[str]] = None,
     **kwargs
 ) -> Tuple[EventBase, EventContext]:
-    test_reactor = hs.get_reactor()
-
     if room_version is None:
-        d = hs.get_datastore().get_room_version_id(kwargs["room_id"])
-        test_reactor.advance(0)
-        room_version = get_awaitable_result(d)
+        room_version = await hs.get_datastore().get_room_version_id(kwargs["room_id"])
 
     builder = hs.get_event_builder_factory().for_room_version(
         KNOWN_ROOM_VERSIONS[room_version], kwargs
     )
-    d = hs.get_event_creation_handler().create_new_client_event(
+    event, context = await hs.get_event_creation_handler().create_new_client_event(
         builder, prev_event_ids=prev_event_ids
     )
-    test_reactor.advance(0)
-    event, context = get_awaitable_result(d)
 
     return event, context
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index f7381b2885..531a9b9118 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -40,7 +40,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
         self.store = self.hs.get_datastore()
         self.storage = self.hs.get_storage()
 
-        yield create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")
+        yield defer.ensureDeferred(create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM"))
 
     @defer.inlineCallbacks
     def test_filtering(self):
@@ -53,7 +53,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
         #
 
         # before we do that, we persist some other events to act as state.
-        self.inject_visibility("@admin:hs", "joined")
+        yield self.inject_visibility("@admin:hs", "joined")
         for i in range(0, 10):
             yield self.inject_room_member("@resident%i:hs" % i)
 
@@ -64,8 +64,8 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
             evt = yield self.inject_room_member(user, extra_content={"a": "b"})
             events_to_filter.append(evt)
 
-        filtered = yield filter_events_for_server(
-            self.storage, "test_server", events_to_filter
+        filtered = yield defer.ensureDeferred(
+            filter_events_for_server(self.storage, "test_server", events_to_filter)
         )
 
         # the result should be 5 redacted events, and 5 unredacted events.
@@ -102,8 +102,8 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
         yield self.hs.get_datastore().mark_user_erased("@erased:local_hs")
 
         # ... and the filtering happens.
-        filtered = yield filter_events_for_server(
-            self.storage, "test_server", events_to_filter
+        filtered = yield defer.ensureDeferred(
+            filter_events_for_server(self.storage, "test_server", events_to_filter)
         )
 
         for i in range(0, len(events_to_filter)):
@@ -137,10 +137,12 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
             },
         )
 
-        event, context = yield self.event_creation_handler.create_new_client_event(
-            builder
+        event, context = yield defer.ensureDeferred(
+            self.event_creation_handler.create_new_client_event(builder)
+        )
+        yield defer.ensureDeferred(
+            self.storage.persistence.persist_event(event, context)
         )
-        yield self.storage.persistence.persist_event(event, context)
         return event
 
     @defer.inlineCallbacks
@@ -158,11 +160,13 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
             },
         )
 
-        event, context = yield self.event_creation_handler.create_new_client_event(
-            builder
+        event, context = yield defer.ensureDeferred(
+            self.event_creation_handler.create_new_client_event(builder)
         )
 
-        yield self.storage.persistence.persist_event(event, context)
+        yield defer.ensureDeferred(
+            self.storage.persistence.persist_event(event, context)
+        )
         return event
 
     @defer.inlineCallbacks
@@ -179,11 +183,13 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
             },
         )
 
-        event, context = yield self.event_creation_handler.create_new_client_event(
-            builder
+        event, context = yield defer.ensureDeferred(
+            self.event_creation_handler.create_new_client_event(builder)
         )
 
-        yield self.storage.persistence.persist_event(event, context)
+        yield defer.ensureDeferred(
+            self.storage.persistence.persist_event(event, context)
+        )
         return event
 
     @defer.inlineCallbacks
@@ -265,8 +271,8 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
         storage.main = test_store
         storage.state = test_store
 
-        filtered = yield filter_events_for_server(
-            test_store, "test_server", events_to_filter
+        filtered = yield defer.ensureDeferred(
+            filter_events_for_server(test_store, "test_server", events_to_filter)
         )
         logger.info("Filtering took %f seconds", time.time() - start)
 
diff --git a/tests/unittest.py b/tests/unittest.py
index 3175a3fa02..68d2586efd 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -603,7 +603,9 @@ class HomeserverTestCase(TestCase):
             user: MXID of the user to inject the membership for.
             membership: The membership type.
         """
-        event_injection.inject_member_event(self.hs, room, user, membership)
+        self.get_success(
+            event_injection.inject_member_event(self.hs, room, user, membership)
+        )
 
 
 class FederatingHomeserverTestCase(HomeserverTestCase):
diff --git a/tests/utils.py b/tests/utils.py
index 4d17355a5c..b33b6860d4 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -638,14 +638,8 @@ class DeferredMockCallable(object):
             )
 
 
-@defer.inlineCallbacks
-def create_room(hs, room_id, creator_id):
+async def create_room(hs, room_id: str, creator_id: str):
     """Creates and persist a creation event for the given room
-
-    Args:
-        hs
-        room_id (str)
-        creator_id (str)
     """
 
     persistence_store = hs.get_storage().persistence
@@ -653,7 +647,7 @@ def create_room(hs, room_id, creator_id):
     event_builder_factory = hs.get_event_builder_factory()
     event_creation_handler = hs.get_event_creation_handler()
 
-    yield store.store_room(
+    await store.store_room(
         room_id=room_id,
         room_creator_user_id=creator_id,
         is_public=False,
@@ -671,6 +665,6 @@ def create_room(hs, room_id, creator_id):
         },
     )
 
-    event, context = yield event_creation_handler.create_new_client_event(builder)
+    event, context = await event_creation_handler.create_new_client_event(builder)
 
-    yield persistence_store.persist_event(event, context)
+    await persistence_store.persist_event(event, context)
diff --git a/tox.ini b/tox.ini
index 834d68aea5..a394f6eadc 100644
--- a/tox.ini
+++ b/tox.ini
@@ -185,6 +185,7 @@ commands = mypy \
             synapse/handlers/cas_handler.py \
             synapse/handlers/directory.py \
             synapse/handlers/federation.py \
+            synapse/handlers/identity.py \
             synapse/handlers/oidc_handler.py \
             synapse/handlers/presence.py \
             synapse/handlers/room_member.py \
@@ -205,6 +206,7 @@ commands = mypy \
             synapse/storage/data_stores/main/ui_auth.py \
             synapse/storage/database.py \
             synapse/storage/engines \
+            synapse/storage/state.py \
             synapse/storage/util \
             synapse/streams \
             synapse/util/caches/stream_change_cache.py \