summary refs log tree commit diff
diff options
context:
space:
mode:
authorHubert Chathi <hubert@uhoreg.ca>2020-09-03 16:38:20 -0400
committerHubert Chathi <hubert@uhoreg.ca>2020-09-03 16:38:20 -0400
commitafd9aa673a2aa018de19d50d6b44e0fcfc85230f (patch)
tree10327f31b0e0022af88e9a05b3a9f19c454d408f
parentadd support for fallback keys (diff)
parent1.19.1 (diff)
downloadsynapse-afd9aa673a2aa018de19d50d6b44e0fcfc85230f.tar.xz
Merge remote-tracking branch 'origin/master' into uhoreg/dehydration_release
-rw-r--r--.circleci/config.yml6
-rw-r--r--CHANGES.md90
-rw-r--r--changelog.d/7314.misc1
-rw-r--r--changelog.d/7736.feature1
-rw-r--r--changelog.d/7899.doc1
-rw-r--r--changelog.d/7902.feature1
-rw-r--r--changelog.d/7936.misc1
-rw-r--r--changelog.d/7947.misc1
-rw-r--r--changelog.d/7948.misc1
-rw-r--r--changelog.d/7949.misc1
-rw-r--r--changelog.d/7951.misc1
-rw-r--r--changelog.d/7952.misc1
-rw-r--r--changelog.d/7963.misc1
-rw-r--r--changelog.d/7964.feature1
-rw-r--r--changelog.d/7965.misc1
-rw-r--r--changelog.d/7970.misc1
-rw-r--r--changelog.d/7971.misc1
-rw-r--r--changelog.d/7973.misc1
-rw-r--r--changelog.d/7975.misc1
-rw-r--r--changelog.d/7976.misc1
-rw-r--r--changelog.d/7977.bugfix1
-rw-r--r--changelog.d/7978.bugfix1
-rw-r--r--changelog.d/7979.misc1
-rw-r--r--changelog.d/7980.bugfix1
-rw-r--r--changelog.d/7981.misc1
-rw-r--r--changelog.d/7987.misc1
-rw-r--r--changelog.d/7989.misc1
-rw-r--r--changelog.d/7990.doc1
-rw-r--r--changelog.d/7992.doc1
-rw-r--r--changelog.d/7996.bugfix1
-rw-r--r--changelog.d/7998.doc1
-rw-r--r--changelog.d/7999.bugfix1
-rw-r--r--changelog.d/8001.misc1
-rw-r--r--changelog.d/8003.misc1
-rw-r--r--changelog.d/8008.feature1
-rw-r--r--changelog.d/8011.bugfix1
-rw-r--r--changelog.d/8012.bugfix1
-rw-r--r--changelog.d/8014.misc1
-rw-r--r--changelog.d/8016.misc1
-rw-r--r--changelog.d/8024.misc1
-rw-r--r--changelog.d/8027.misc1
-rw-r--r--changelog.d/8033.misc1
-rw-r--r--debian/changelog12
-rw-r--r--docker/conf/log.config6
-rw-r--r--docs/admin_api/shutdown_room.md13
-rw-r--r--docs/reverse_proxy.md7
-rw-r--r--docs/sample_config.yaml11
-rw-r--r--docs/sample_log_config.yaml46
-rw-r--r--docs/systemd-with-workers/workers/federation_reader.yaml2
-rw-r--r--docs/workers.md54
-rw-r--r--mypy.ini3
-rwxr-xr-xscripts/synapse_port_db2
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/api/auth.py123
-rw-r--r--synapse/api/auth_blocking.py13
-rw-r--r--synapse/api/errors.py4
-rw-r--r--synapse/api/filtering.py7
-rw-r--r--synapse/api/ratelimiting.py37
-rw-r--r--synapse/app/generic_worker.py14
-rw-r--r--synapse/app/homeserver.py5
-rw-r--r--synapse/config/_util.py49
-rw-r--r--synapse/config/logger.py63
-rw-r--r--synapse/config/saml2_config.py50
-rw-r--r--synapse/config/server.py15
-rw-r--r--synapse/crypto/context_factory.py8
-rw-r--r--synapse/events/builder.py60
-rw-r--r--synapse/federation/sender/transaction_manager.py7
-rw-r--r--synapse/handlers/appservice.py2
-rw-r--r--synapse/handlers/auth.py19
-rw-r--r--synapse/handlers/events.py4
-rw-r--r--synapse/handlers/federation.py2
-rw-r--r--synapse/handlers/message.py33
-rw-r--r--synapse/handlers/oidc_handler.py8
-rw-r--r--synapse/handlers/room_member.py62
-rw-r--r--synapse/handlers/saml_handler.py42
-rw-r--r--synapse/handlers/sync.py6
-rw-r--r--synapse/http/client.py2
-rw-r--r--synapse/http/federation/matrix_federation_agent.py2
-rw-r--r--synapse/http/matrixfederationclient.py94
-rw-r--r--synapse/http/server.py5
-rw-r--r--synapse/http/site.py20
-rw-r--r--synapse/metrics/background_process_metrics.py34
-rw-r--r--synapse/module_api/__init__.py8
-rw-r--r--synapse/notifier.py135
-rw-r--r--synapse/push/baserules.py216
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py2
-rw-r--r--synapse/push/push_tools.py17
-rw-r--r--synapse/replication/slave/storage/client_ips.py2
-rw-r--r--synapse/replication/tcp/commands.py5
-rw-r--r--synapse/res/templates/saml_error.html17
-rw-r--r--synapse/rest/client/v1/directory.py2
-rw-r--r--synapse/rest/client/v1/push_rule.py11
-rw-r--r--synapse/rest/client/v2_alpha/account.py86
-rw-r--r--synapse/rest/client/v2_alpha/register.py112
-rw-r--r--synapse/rest/client/v2_alpha/sync.py1
-rw-r--r--synapse/rest/consent/consent_resource.py4
-rw-r--r--synapse/rest/health.py31
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py7
-rw-r--r--synapse/secrets.py8
-rw-r--r--synapse/server.py457
-rw-r--r--synapse/server.pyi155
-rw-r--r--synapse/storage/_base.py1
-rw-r--r--synapse/storage/databases/__init__.py28
-rw-r--r--synapse/storage/databases/main/account_data.py84
-rw-r--r--synapse/storage/databases/main/appservice.py34
-rw-r--r--synapse/storage/databases/main/cache.py1
-rw-r--r--synapse/storage/databases/main/censor_events.py11
-rw-r--r--synapse/storage/databases/main/client_ips.py59
-rw-r--r--synapse/storage/databases/main/deviceinbox.py103
-rw-r--r--synapse/storage/databases/main/devices.py335
-rw-r--r--synapse/storage/databases/main/directory.py51
-rw-r--r--synapse/storage/databases/main/e2e_room_keys.py41
-rw-r--r--synapse/storage/databases/main/end_to_end_keys.py78
-rw-r--r--synapse/storage/databases/main/event_federation.py38
-rw-r--r--synapse/storage/databases/main/event_push_actions.py5
-rw-r--r--synapse/storage/databases/main/events.py48
-rw-r--r--synapse/storage/databases/main/events_worker.py86
-rw-r--r--synapse/storage/databases/main/filtering.py8
-rw-r--r--synapse/storage/databases/main/group_server.py103
-rw-r--r--synapse/storage/databases/main/metrics.py20
-rw-r--r--synapse/storage/databases/main/monthly_active_users.py31
-rw-r--r--synapse/storage/databases/main/presence.py7
-rw-r--r--synapse/storage/databases/main/profile.py21
-rw-r--r--synapse/storage/databases/main/push_rule.py100
-rw-r--r--synapse/storage/databases/main/receipts.py9
-rw-r--r--synapse/storage/databases/main/registration.py233
-rw-r--r--synapse/storage/databases/main/relations.py19
-rw-r--r--synapse/storage/databases/main/roommember.py263
-rw-r--r--synapse/storage/databases/main/schema/delta/58/12unread_messages.sql18
-rw-r--r--synapse/storage/databases/main/search.py69
-rw-r--r--synapse/storage/databases/main/signatures.py7
-rw-r--r--synapse/storage/databases/main/tags.py103
-rw-r--r--synapse/storage/databases/main/transactions.py7
-rw-r--r--synapse/storage/databases/main/user_directory.py124
-rw-r--r--synapse/storage/databases/state/bg_updates.py18
-rw-r--r--synapse/types.py23
-rw-r--r--synapse/util/__init__.py4
-rw-r--r--synapse/util/caches/descriptors.py2
-rw-r--r--synapse/util/frozenutils.py7
-rw-r--r--synapse/util/metrics.py46
-rw-r--r--synapse/util/retryutils.py16
-rw-r--r--tests/api/test_auth.py69
-rw-r--r--tests/api/test_filtering.py36
-rw-r--r--tests/api/test_ratelimiting.py73
-rw-r--r--tests/handlers/test_appservice.py2
-rw-r--r--tests/handlers/test_register.py11
-rw-r--r--tests/handlers/test_typing.py9
-rw-r--r--tests/rest/admin/test_user.py10
-rw-r--r--tests/rest/client/test_retention.py4
-rw-r--r--tests/rest/client/v1/test_profile.py4
-rw-r--r--tests/rest/client/v1/test_rooms.py93
-rw-r--r--tests/rest/client/v1/test_typing.py6
-rw-r--r--tests/rest/client/v1/utils.py30
-rw-r--r--tests/rest/client/v2_alpha/test_register.py2
-rw-r--r--tests/rest/client/v2_alpha/test_sync.py157
-rw-r--r--tests/rest/test_health.py34
-rw-r--r--tests/server_notices/test_resource_limits_server_notices.py7
-rw-r--r--tests/storage/test_appservice.py24
-rw-r--r--tests/storage/test_devices.py44
-rw-r--r--tests/storage/test_directory.py32
-rw-r--r--tests/storage/test_end_to_end_keys.py28
-rw-r--r--tests/storage/test_monthly_active_users.py25
-rw-r--r--tests/storage/test_redaction.py4
-rw-r--r--tests/storage/test_registration.py18
-rw-r--r--tests/storage/test_user_directory.py4
-rw-r--r--tests/test_federation.py18
-rw-r--r--tests/unittest.py24
-rw-r--r--tests/util/test_retryutils.py44
-rw-r--r--tox.ini6
169 files changed, 2924 insertions, 2515 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 98c217dd1d..5bd2ab2b76 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -4,18 +4,16 @@ jobs:
     machine: true
     steps:
       - checkout
-      - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 .
+      - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} .
       - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
       - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
-      - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
   dockerhubuploadlatest:
     machine: true
     steps:
       - checkout
-      - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 .
+      - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest .
       - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
       - run: docker push matrixdotorg/synapse:latest
-      - run: docker push matrixdotorg/synapse:latest-py3
 
 workflows:
   version: 2
diff --git a/CHANGES.md b/CHANGES.md
index 6c986808eb..d859baa9ff 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,93 @@
+Synapse 1.19.1 (2020-08-27)
+===========================
+
+No significant changes.
+
+
+Synapse 1.19.1rc1 (2020-08-25)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in v1.19.0 where appservices with ratelimiting disabled would still be ratelimited when joining rooms. ([\#8139](https://github.com/matrix-org/synapse/issues/8139))
+- Fix a bug introduced in v1.19.0 that would cause e.g. profile updates to fail due to incorrect application of rate limits on join requests. ([\#8153](https://github.com/matrix-org/synapse/issues/8153))
+
+
+Synapse 1.19.0 (2020-08-17)
+===========================
+
+No significant changes since 1.19.0rc1.
+
+Removal warning
+---------------
+
+As outlined in the [previous release](https://github.com/matrix-org/synapse/releases/tag/v1.18.0), we are no longer publishing Docker images with the `-py3` tag suffix. On top of that, we have also removed the `latest-py3` tag. Please see [the announcement in the upgrade notes for 1.18.0](https://github.com/matrix-org/synapse/blob/develop/UPGRADE.rst#upgrading-to-v1180).
+
+
+Synapse 1.19.0rc1 (2020-08-13)
+==============================
+
+Features
+--------
+
+- Add option to allow server admins to join rooms which fail complexity checks. Contributed by @lugino-emeritus. ([\#7902](https://github.com/matrix-org/synapse/issues/7902))
+- Add an option to purge room or not with delete room admin endpoint (`POST /_synapse/admin/v1/rooms/<room_id>/delete`). Contributed by @dklimpel. ([\#7964](https://github.com/matrix-org/synapse/issues/7964))
+- Add rate limiting to users joining rooms. ([\#8008](https://github.com/matrix-org/synapse/issues/8008))
+- Add a `/health` endpoint to every configured HTTP listener that can be used as a health check endpoint by load balancers. ([\#8048](https://github.com/matrix-org/synapse/issues/8048))
+- Allow login to be blocked based on the values of SAML attributes. ([\#8052](https://github.com/matrix-org/synapse/issues/8052))
+- Allow guest access to the `GET /_matrix/client/r0/rooms/{room_id}/members` endpoint, according to MSC2689. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#7314](https://github.com/matrix-org/synapse/issues/7314))
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse v1.7.2 which caused inaccurate membership counts in the room directory. ([\#7977](https://github.com/matrix-org/synapse/issues/7977))
+- Fix a long standing bug: 'Duplicate key value violates unique constraint "event_relations_id"' when message retention is configured. ([\#7978](https://github.com/matrix-org/synapse/issues/7978))
+- Fix "no create event in auth events" when trying to reject invitation after inviter leaves. Bug introduced in Synapse v1.10.0. ([\#7980](https://github.com/matrix-org/synapse/issues/7980))
+- Fix various comments and minor discrepencies in server notices code. ([\#7996](https://github.com/matrix-org/synapse/issues/7996))
+- Fix a long standing bug where HTTP HEAD requests resulted in a 400 error. ([\#7999](https://github.com/matrix-org/synapse/issues/7999))
+- Fix a long-standing bug which caused two copies of some log lines to be written when synctl was used along with a MemoryHandler logger. ([\#8011](https://github.com/matrix-org/synapse/issues/8011), [\#8012](https://github.com/matrix-org/synapse/issues/8012))
+
+
+Updates to the Docker image
+---------------------------
+
+- We no longer publish Docker images with the `-py3` tag suffix, as [announced in the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/UPGRADE.rst#upgrading-to-v1180). ([\#8056](https://github.com/matrix-org/synapse/issues/8056))
+
+
+Improved Documentation
+----------------------
+
+- Document how to set up a client .well-known file and fix several pieces of outdated documentation. ([\#7899](https://github.com/matrix-org/synapse/issues/7899))
+- Improve workers docs. ([\#7990](https://github.com/matrix-org/synapse/issues/7990), [\#8000](https://github.com/matrix-org/synapse/issues/8000))
+- Fix typo in `docs/workers.md`. ([\#7992](https://github.com/matrix-org/synapse/issues/7992))
+- Add documentation for how to undo a room shutdown. ([\#7998](https://github.com/matrix-org/synapse/issues/7998), [\#8010](https://github.com/matrix-org/synapse/issues/8010))
+
+
+Internal Changes
+----------------
+
+- Reduce the amount of whitespace in JSON stored and sent in responses. Contributed by David Vo. ([\#7372](https://github.com/matrix-org/synapse/issues/7372))
+- Switch to the JSON implementation from the standard library and bump the minimum version of the canonicaljson library to 1.2.0. ([\#7936](https://github.com/matrix-org/synapse/issues/7936), [\#7979](https://github.com/matrix-org/synapse/issues/7979))
+- Convert various parts of the codebase to async/await. ([\#7947](https://github.com/matrix-org/synapse/issues/7947), [\#7948](https://github.com/matrix-org/synapse/issues/7948), [\#7949](https://github.com/matrix-org/synapse/issues/7949), [\#7951](https://github.com/matrix-org/synapse/issues/7951), [\#7963](https://github.com/matrix-org/synapse/issues/7963), [\#7973](https://github.com/matrix-org/synapse/issues/7973), [\#7975](https://github.com/matrix-org/synapse/issues/7975), [\#7976](https://github.com/matrix-org/synapse/issues/7976), [\#7981](https://github.com/matrix-org/synapse/issues/7981), [\#7987](https://github.com/matrix-org/synapse/issues/7987), [\#7989](https://github.com/matrix-org/synapse/issues/7989), [\#8003](https://github.com/matrix-org/synapse/issues/8003), [\#8014](https://github.com/matrix-org/synapse/issues/8014), [\#8016](https://github.com/matrix-org/synapse/issues/8016), [\#8027](https://github.com/matrix-org/synapse/issues/8027), [\#8031](https://github.com/matrix-org/synapse/issues/8031), [\#8032](https://github.com/matrix-org/synapse/issues/8032), [\#8035](https://github.com/matrix-org/synapse/issues/8035), [\#8042](https://github.com/matrix-org/synapse/issues/8042), [\#8044](https://github.com/matrix-org/synapse/issues/8044), [\#8045](https://github.com/matrix-org/synapse/issues/8045), [\#8061](https://github.com/matrix-org/synapse/issues/8061), [\#8062](https://github.com/matrix-org/synapse/issues/8062), [\#8063](https://github.com/matrix-org/synapse/issues/8063), [\#8066](https://github.com/matrix-org/synapse/issues/8066), [\#8069](https://github.com/matrix-org/synapse/issues/8069), [\#8070](https://github.com/matrix-org/synapse/issues/8070))
+- Move some database-related log lines from the default logger to the database/transaction loggers. ([\#7952](https://github.com/matrix-org/synapse/issues/7952))
+- Add a script to detect source code files using non-unix line terminators. ([\#7965](https://github.com/matrix-org/synapse/issues/7965), [\#7970](https://github.com/matrix-org/synapse/issues/7970))
+- Log the SAML session ID during creation. ([\#7971](https://github.com/matrix-org/synapse/issues/7971))
+- Implement new experimental push rules for some users. ([\#7997](https://github.com/matrix-org/synapse/issues/7997))
+- Remove redundant and unreliable signature check for v1 Identity Service lookup responses. ([\#8001](https://github.com/matrix-org/synapse/issues/8001))
+- Improve the performance of the register endpoint. ([\#8009](https://github.com/matrix-org/synapse/issues/8009))
+- Reduce less useful output in the newsfragment CI step. Add a link to the changelog section of the contributing guide on error. ([\#8024](https://github.com/matrix-org/synapse/issues/8024))
+- Rename storage layer objects to be more sensible. ([\#8033](https://github.com/matrix-org/synapse/issues/8033))
+- Change the default log config to reduce disk I/O and storage for new servers. ([\#8040](https://github.com/matrix-org/synapse/issues/8040))
+- Add an assertion on `prev_events` in `create_new_client_event`. ([\#8041](https://github.com/matrix-org/synapse/issues/8041))
+- Add a comment to `ServerContextFactory` about the use of `SSLv23_METHOD`. ([\#8043](https://github.com/matrix-org/synapse/issues/8043))
+- Log `OPTIONS` requests at `DEBUG` rather than `INFO` level to reduce amount logged at `INFO`. ([\#8049](https://github.com/matrix-org/synapse/issues/8049))
+- Reduce amount of outbound request logging at `INFO` level. ([\#8050](https://github.com/matrix-org/synapse/issues/8050))
+- It is no longer necessary to explicitly define `filters` in the logging configuration. (Continuing to do so is redundant but harmless.) ([\#8051](https://github.com/matrix-org/synapse/issues/8051))
+- Add and improve type hints. ([\#8058](https://github.com/matrix-org/synapse/issues/8058), [\#8064](https://github.com/matrix-org/synapse/issues/8064), [\#8060](https://github.com/matrix-org/synapse/issues/8060), [\#8067](https://github.com/matrix-org/synapse/issues/8067))
+
+
 Synapse 1.18.0 (2020-07-30)
 ===========================
 
diff --git a/changelog.d/7314.misc b/changelog.d/7314.misc
deleted file mode 100644
index 30720100c2..0000000000
--- a/changelog.d/7314.misc
+++ /dev/null
@@ -1 +0,0 @@
-Allow guest access to the `GET /_matrix/client/r0/rooms/{room_id}/members` endpoint, according to MSC2689. Contributed by Awesome Technologies Innovationslabor GmbH.
diff --git a/changelog.d/7736.feature b/changelog.d/7736.feature
deleted file mode 100644
index feb02be234..0000000000
--- a/changelog.d/7736.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add unread messages count to sync responses, as specified in [MSC2654](https://github.com/matrix-org/matrix-doc/pull/2654).
diff --git a/changelog.d/7899.doc b/changelog.d/7899.doc
deleted file mode 100644
index 847c2cb62c..0000000000
--- a/changelog.d/7899.doc
+++ /dev/null
@@ -1 +0,0 @@
-Document how to set up a Client Well-Known file and fix several pieces of outdated documentation.
diff --git a/changelog.d/7902.feature b/changelog.d/7902.feature
deleted file mode 100644
index 4feae8cc29..0000000000
--- a/changelog.d/7902.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add option to allow server admins to join rooms which fail complexity checks. Contributed by @lugino-emeritus.
diff --git a/changelog.d/7936.misc b/changelog.d/7936.misc
deleted file mode 100644
index 4304bbdd25..0000000000
--- a/changelog.d/7936.misc
+++ /dev/null
@@ -1 +0,0 @@
-Switch to the JSON implementation from the standard library and bump the minimum version of the canonicaljson library to 1.2.0.
diff --git a/changelog.d/7947.misc b/changelog.d/7947.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/7947.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7948.misc b/changelog.d/7948.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/7948.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7949.misc b/changelog.d/7949.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/7949.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7951.misc b/changelog.d/7951.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/7951.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7952.misc b/changelog.d/7952.misc
deleted file mode 100644
index 93c25cb386..0000000000
--- a/changelog.d/7952.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move some database-related log lines from the default logger to the database/transaction loggers.
\ No newline at end of file
diff --git a/changelog.d/7963.misc b/changelog.d/7963.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/7963.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7964.feature b/changelog.d/7964.feature
deleted file mode 100644
index ffe861650c..0000000000
--- a/changelog.d/7964.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add an option to purge room or not with delete room admin endpoint (`POST /_synapse/admin/v1/rooms/<room_id>/delete`). Contributed by @dklimpel.
\ No newline at end of file
diff --git a/changelog.d/7965.misc b/changelog.d/7965.misc
deleted file mode 100644
index ee9f1a7114..0000000000
--- a/changelog.d/7965.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a script to detect source code files using non-unix line terminators.
\ No newline at end of file
diff --git a/changelog.d/7970.misc b/changelog.d/7970.misc
deleted file mode 100644
index ee9f1a7114..0000000000
--- a/changelog.d/7970.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a script to detect source code files using non-unix line terminators.
\ No newline at end of file
diff --git a/changelog.d/7971.misc b/changelog.d/7971.misc
deleted file mode 100644
index 87a4eb1f4d..0000000000
--- a/changelog.d/7971.misc
+++ /dev/null
@@ -1 +0,0 @@
-Log the SAML session ID during creation.
diff --git a/changelog.d/7973.misc b/changelog.d/7973.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/7973.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7975.misc b/changelog.d/7975.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/7975.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7976.misc b/changelog.d/7976.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/7976.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7977.bugfix b/changelog.d/7977.bugfix
deleted file mode 100644
index c587f13055..0000000000
--- a/changelog.d/7977.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse v1.7.2 which caused inaccurate membership counts in the room directory.
diff --git a/changelog.d/7978.bugfix b/changelog.d/7978.bugfix
deleted file mode 100644
index 247b18db20..0000000000
--- a/changelog.d/7978.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long standing bug: 'Duplicate key value violates unique constraint "event_relations_id"' when message retention is configured.
diff --git a/changelog.d/7979.misc b/changelog.d/7979.misc
deleted file mode 100644
index 4304bbdd25..0000000000
--- a/changelog.d/7979.misc
+++ /dev/null
@@ -1 +0,0 @@
-Switch to the JSON implementation from the standard library and bump the minimum version of the canonicaljson library to 1.2.0.
diff --git a/changelog.d/7980.bugfix b/changelog.d/7980.bugfix
deleted file mode 100644
index fa351b4b77..0000000000
--- a/changelog.d/7980.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix "no create event in auth events" when trying to reject invitation after inviter leaves. Bug introduced in Synapse v1.10.0.
diff --git a/changelog.d/7981.misc b/changelog.d/7981.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/7981.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7987.misc b/changelog.d/7987.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/7987.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7989.misc b/changelog.d/7989.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/7989.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/7990.doc b/changelog.d/7990.doc
deleted file mode 100644
index 8d8fd926e9..0000000000
--- a/changelog.d/7990.doc
+++ /dev/null
@@ -1 +0,0 @@
-Improve workers docs.
diff --git a/changelog.d/7992.doc b/changelog.d/7992.doc
deleted file mode 100644
index 3368fb5912..0000000000
--- a/changelog.d/7992.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix typo in `docs/workers.md`.
diff --git a/changelog.d/7996.bugfix b/changelog.d/7996.bugfix
deleted file mode 100644
index 1e51f20558..0000000000
--- a/changelog.d/7996.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix various comments and minor discrepencies in server notices code.
diff --git a/changelog.d/7998.doc b/changelog.d/7998.doc
deleted file mode 100644
index fc8b3f0c3d..0000000000
--- a/changelog.d/7998.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add documentation for how to undo a room shutdown.
diff --git a/changelog.d/7999.bugfix b/changelog.d/7999.bugfix
deleted file mode 100644
index e0b8c4922f..0000000000
--- a/changelog.d/7999.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long standing bug where HTTP HEAD requests resulted in a 400 error.
diff --git a/changelog.d/8001.misc b/changelog.d/8001.misc
deleted file mode 100644
index 0be4b37d22..0000000000
--- a/changelog.d/8001.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove redundant and unreliable signature check for v1 Identity Service lookup responses.
diff --git a/changelog.d/8003.misc b/changelog.d/8003.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/8003.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/8008.feature b/changelog.d/8008.feature
deleted file mode 100644
index c6d381809a..0000000000
--- a/changelog.d/8008.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add rate limiting to users joining rooms.
diff --git a/changelog.d/8011.bugfix b/changelog.d/8011.bugfix
deleted file mode 100644
index c673040de9..0000000000
--- a/changelog.d/8011.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug which caused two copies of some log lines to be written when synctl was used along with a MemoryHandler logger.
diff --git a/changelog.d/8012.bugfix b/changelog.d/8012.bugfix
deleted file mode 100644
index c673040de9..0000000000
--- a/changelog.d/8012.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug which caused two copies of some log lines to be written when synctl was used along with a MemoryHandler logger.
diff --git a/changelog.d/8014.misc b/changelog.d/8014.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/8014.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/8016.misc b/changelog.d/8016.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/8016.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/8024.misc b/changelog.d/8024.misc
deleted file mode 100644
index 4bc739502b..0000000000
--- a/changelog.d/8024.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reduce less useful output in the newsfragment CI step. Add a link to the changelog section of the contributing guide on error.
\ No newline at end of file
diff --git a/changelog.d/8027.misc b/changelog.d/8027.misc
deleted file mode 100644
index dfe4c03171..0000000000
--- a/changelog.d/8027.misc
+++ /dev/null
@@ -1 +0,0 @@
-Convert various parts of the codebase to async/await.
diff --git a/changelog.d/8033.misc b/changelog.d/8033.misc
deleted file mode 100644
index 7a9782d14b..0000000000
--- a/changelog.d/8033.misc
+++ /dev/null
@@ -1 +0,0 @@
-Rename storage layer objects to be more sensible.
diff --git a/debian/changelog b/debian/changelog
index a0af2b78a8..6676706dea 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,12 +1,18 @@
-matrix-synapse-py3 (1.xx.0) stable; urgency=medium
+matrix-synapse-py3 (1.19.1) stable; urgency=medium
+
+  * New synapse release 1.19.1.
+
+ -- Synapse Packaging team <packages@matrix.org>  Thu, 27 Aug 2020 10:50:19 +0100
+
+matrix-synapse-py3 (1.19.0) stable; urgency=medium
 
   [ Synapse Packaging team ]
-  * New synapse release 1.xx.0.
+  * New synapse release 1.19.0.
 
   [ Aaron Raimist ]
   * Fix outdated documentation for SYNAPSE_CACHE_FACTOR
 
- -- Synapse Packaging team <packages@matrix.org>  XXXXX
+ -- Synapse Packaging team <packages@matrix.org>  Mon, 17 Aug 2020 14:06:42 +0100
 
 matrix-synapse-py3 (1.18.0) stable; urgency=medium
 
diff --git a/docker/conf/log.config b/docker/conf/log.config
index ed418a57cd..491bbcc87a 100644
--- a/docker/conf/log.config
+++ b/docker/conf/log.config
@@ -4,16 +4,10 @@ formatters:
   precise:
    format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
 
-filters:
-  context:
-    (): synapse.logging.context.LoggingContextFilter
-    request: ""
-
 handlers:
   console:
     class: logging.StreamHandler
     formatter: precise
-    filters: [context]
 
 loggers:
     synapse.storage.SQL:
diff --git a/docs/admin_api/shutdown_room.md b/docs/admin_api/shutdown_room.md
index 2ff552bcb3..9b1cb1c184 100644
--- a/docs/admin_api/shutdown_room.md
+++ b/docs/admin_api/shutdown_room.md
@@ -79,13 +79,20 @@ Response:
 the structure can and does change without notice.
 
 First, it's important to understand that a room shutdown is very destructive. Undoing a shutdown is not as simple as pretending it
-never happened - work has to be done to move forward instead of resetting the past.
+never happened - work has to be done to move forward instead of resetting the past. In fact, in some cases it might not be possible
+to recover at all:
 
-1. For safety reasons, it is recommended to shut down Synapse prior to continuing.
+* If the room was invite-only, your users will need to be re-invited.
+* If the room no longer has any members at all, it'll be impossible to rejoin.
+* The first user to rejoin will have to do so via an alias on a different server.
+
+With all that being said, if you still want to try and recover the room:
+
+1. For safety reasons, shut down Synapse.
 2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
    * For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
    * The room ID is the same one supplied to the shutdown room API, not the Content Violation room.
-3. Restart Synapse (required).
+3. Restart Synapse.
 
 You will have to manually handle, if you so choose, the following:
 
diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md
index 7bfb96eff6..fd48ba0874 100644
--- a/docs/reverse_proxy.md
+++ b/docs/reverse_proxy.md
@@ -139,3 +139,10 @@ client IP addresses are recorded correctly.
 Having done so, you can then use `https://matrix.example.com` (instead
 of `https://matrix.example.com:8448`) as the "Custom server" when
 connecting to Synapse from a client.
+
+
+## Health check endpoint
+
+Synapse exposes a health check endpoint for use by reverse proxies.
+Each configured HTTP listener has a `/health` endpoint which always returns
+200 OK (and doesn't get logged).
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index fe85978a1f..9235b89fb1 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -1577,6 +1577,17 @@ saml2_config:
   #
   #grandfathered_mxid_source_attribute: upn
 
+  # It is possible to configure Synapse to only allow logins if SAML attributes
+  # match particular values. The requirements can be listed under
+  # `attribute_requirements` as shown below. All of the listed attributes must
+  # match for the login to be permitted.
+  #
+  #attribute_requirements:
+  #  - attribute: userGroup
+  #    value: "staff"
+  #  - attribute: department
+  #    value: "sales"
+
   # Directory in which Synapse will try to find the template files below.
   # If not set, default templates from within the Synapse package will be used.
   #
diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml
index 1a2739455e..55a48a9ed6 100644
--- a/docs/sample_log_config.yaml
+++ b/docs/sample_log_config.yaml
@@ -11,24 +11,33 @@ formatters:
     precise:
         format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
 
-filters:
-    context:
-        (): synapse.logging.context.LoggingContextFilter
-        request: ""
-
 handlers:
     file:
-        class: logging.handlers.RotatingFileHandler
+        class: logging.handlers.TimedRotatingFileHandler
         formatter: precise
         filename: /var/log/matrix-synapse/homeserver.log
-        maxBytes: 104857600
-        backupCount: 10
-        filters: [context]
+        when: midnight
+        backupCount: 3  # Does not include the current log file.
         encoding: utf8
+
+    # Default to buffering writes to log file for efficiency. This means that
+    # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
+    # logs will still be flushed immediately.
+    buffer:
+        class: logging.handlers.MemoryHandler
+        target: file
+        # The capacity is the number of log lines that are buffered before
+        # being written to disk. Increasing this will lead to better
+        # performance, at the expensive of it taking longer for log lines to
+        # be written to disk.
+        capacity: 10
+        flushLevel: 30  # Flush for WARNING logs as well
+
+    # A handler that writes logs to stderr. Unused by default, but can be used
+    # instead of "buffer" and "file" in the logger handlers.
     console:
         class: logging.StreamHandler
         formatter: precise
-        filters: [context]
 
 loggers:
     synapse.storage.SQL:
@@ -36,8 +45,23 @@ loggers:
         # information such as access tokens.
         level: INFO
 
+    twisted:
+        # We send the twisted logging directly to the file handler,
+        # to work around https://github.com/matrix-org/synapse/issues/3471
+        # when using "buffer" logger. Use "console" to log to stderr instead.
+        handlers: [file]
+        propagate: false
+
 root:
     level: INFO
-    handlers: [file, console]
+
+    # Write logs to the `buffer` handler, which will buffer them together in memory,
+    # then write them to a file.
+    #
+    # Replace "buffer" with "console" to log to stderr instead. (Note that you'll
+    # also need to update the configuation for the `twisted` logger above, in
+    # this case.)
+    #
+    handlers: [buffer]
 
 disable_existing_loggers: false
diff --git a/docs/systemd-with-workers/workers/federation_reader.yaml b/docs/systemd-with-workers/workers/federation_reader.yaml
index 5b65c7040d..13e69e62c9 100644
--- a/docs/systemd-with-workers/workers/federation_reader.yaml
+++ b/docs/systemd-with-workers/workers/federation_reader.yaml
@@ -1,7 +1,7 @@
 worker_app: synapse.app.federation_reader
+worker_name: federation_reader1
 
 worker_replication_host: 127.0.0.1
-worker_replication_port: 9092
 worker_replication_http_port: 9093
 
 worker_listeners:
diff --git a/docs/workers.md b/docs/workers.md
index 80b65a0cec..bfec745897 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -23,7 +23,7 @@ The processes communicate with each other via a Synapse-specific protocol called
 feeds streams of newly written data between processes so they can be kept in
 sync with the database state.
 
-When configured to do so, Synapse uses a 
+When configured to do so, Synapse uses a
 [Redis pub/sub channel](https://redis.io/topics/pubsub) to send the replication
 stream between all configured Synapse processes. Additionally, processes may
 make HTTP requests to each other, primarily for operations which need to wait
@@ -66,23 +66,31 @@ https://hub.docker.com/r/matrixdotorg/synapse/.
 
 To make effective use of the workers, you will need to configure an HTTP
 reverse-proxy such as nginx or haproxy, which will direct incoming requests to
-the correct worker, or to the main synapse instance. See 
+the correct worker, or to the main synapse instance. See
 [reverse_proxy.md](reverse_proxy.md) for information on setting up a reverse
 proxy.
 
-To enable workers you should create a configuration file for each worker
-process. Each worker configuration file inherits the configuration of the shared
-homeserver configuration file.  You can then override configuration specific to
-that worker, e.g. the HTTP listener that it provides (if any); logging
-configuration; etc.  You should minimise the number of overrides though to
-maintain a usable config.
+When using workers, each worker process has its own configuration file which
+contains settings specific to that worker, such as the HTTP listener that it
+provides (if any), logging configuration, etc.
 
+Normally, the worker processes are configured to read from a shared
+configuration file as well as the worker-specific configuration files. This
+makes it easier to keep common configuration settings synchronised across all
+the processes.
 
-### Shared Configuration
+The main process is somewhat special in this respect: it does not normally
+need its own configuration file and can take all of its configuration from the
+shared configuration file.
+
+
+### Shared configuration
+
+Normally, only a couple of changes are needed to make an existing configuration
+file suitable for use with workers. First, you need to enable an "HTTP replication
+listener" for the main process; and secondly, you need to enable redis-based
+replication. For example:
 
-Next you need to add both a HTTP replication listener, used for HTTP requests
-between processes, and redis config to the shared Synapse configuration file
-(`homeserver.yaml`). For example:
 
 ```yaml
 # extend the existing `listeners` section. This defines the ports that the
@@ -105,7 +113,7 @@ Under **no circumstances** should the replication listener be exposed to the
 public internet; it has no authentication and is unencrypted.
 
 
-### Worker Configuration
+### Worker configuration
 
 In the config file for each worker, you must specify the type of worker
 application (`worker_app`), and you should specify a unqiue name for the worker
@@ -145,6 +153,9 @@ plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
 Obviously you should configure your reverse-proxy to route the relevant
 endpoints to the worker (`localhost:8083` in the above example).
 
+
+### Running Synapse with workers
+
 Finally, you need to start your worker processes. This can be done with either
 `synctl` or your distribution's preferred service manager such as `systemd`. We
 recommend the use of `systemd` where available: for information on setting up
@@ -407,6 +418,23 @@ all these to be folded into the `generic_worker` app and to use config to define
 which processes handle the various proccessing such as push notifications.
 
 
+## Migration from old config
+
+There are two main independent changes that have been made: introducing Redis
+support and merging apps into `synapse.app.generic_worker`. Both these changes
+are backwards compatible and so no changes to the config are required, however
+server admins are encouraged to plan to migrate to Redis as the old style direct
+TCP replication config is deprecated.
+
+To migrate to Redis add the `redis` config as above, and optionally remove the
+TCP `replication` listener from master and `worker_replication_port` from worker
+config.
+
+To migrate apps to use `synapse.app.generic_worker` simply update the
+`worker_app` option in the worker configs, and where worker are started (e.g.
+in systemd service files, but not required for synctl).
+
+
 ## Architectural diagram
 
 The following shows an example setup using Redis and a reverse proxy:
diff --git a/mypy.ini b/mypy.ini
index a61009b197..c69cb5dc40 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -81,3 +81,6 @@ ignore_missing_imports = True
 
 [mypy-rust_python_jaeger_reporter.*]
 ignore_missing_imports = True
+
+[mypy-nacl.*]
+ignore_missing_imports = True
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index ae5e1810fc..a34bdf1830 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -67,7 +67,7 @@ logger = logging.getLogger("synapse_port_db")
 
 
 BOOLEAN_COLUMNS = {
-    "events": ["processed", "outlier", "contains_url", "count_as_unread"],
+    "events": ["processed", "outlier", "contains_url"],
     "rooms": ["is_public"],
     "event_edges": ["is_state"],
     "presence_list": ["accepted"],
diff --git a/synapse/__init__.py b/synapse/__init__.py
index f70381bc71..1282d19b3c 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -48,7 +48,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.18.0"
+__version__ = "1.19.1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 2178e623da..d8190f92ab 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -13,12 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import Optional
+from typing import List, Optional, Tuple
 
 import pymacaroons
 from netaddr import IPAddress
 
-from twisted.internet import defer
 from twisted.web.server import Request
 
 import synapse.types
@@ -80,13 +79,14 @@ class Auth(object):
         self._track_appservice_user_ips = hs.config.track_appservice_user_ips
         self._macaroon_secret_key = hs.config.macaroon_secret_key
 
-    @defer.inlineCallbacks
-    def check_from_context(self, room_version: str, event, context, do_sig_check=True):
-        prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids())
-        auth_events_ids = yield self.compute_auth_events(
+    async def check_from_context(
+        self, room_version: str, event, context, do_sig_check=True
+    ):
+        prev_state_ids = await context.get_prev_state_ids()
+        auth_events_ids = self.compute_auth_events(
             event, prev_state_ids, for_verification=True
         )
-        auth_events = yield self.store.get_events(auth_events_ids)
+        auth_events = await self.store.get_events(auth_events_ids)
         auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
 
         room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
@@ -94,14 +94,13 @@ class Auth(object):
             room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
         )
 
-    @defer.inlineCallbacks
-    def check_user_in_room(
+    async def check_user_in_room(
         self,
         room_id: str,
         user_id: str,
         current_state: Optional[StateMap[EventBase]] = None,
         allow_departed_users: bool = False,
-    ):
+    ) -> EventBase:
         """Check if the user is in the room, or was at some point.
         Args:
             room_id: The room to check.
@@ -119,37 +118,35 @@ class Auth(object):
         Raises:
             AuthError if the user is/was not in the room.
         Returns:
-            Deferred[Optional[EventBase]]:
-                Membership event for the user if the user was in the
-                room. This will be the join event if they are currently joined to
-                the room. This will be the leave event if they have left the room.
+            Membership event for the user if the user was in the
+            room. This will be the join event if they are currently joined to
+            the room. This will be the leave event if they have left the room.
         """
         if current_state:
             member = current_state.get((EventTypes.Member, user_id), None)
         else:
-            member = yield defer.ensureDeferred(
-                self.state.get_current_state(
-                    room_id=room_id, event_type=EventTypes.Member, state_key=user_id
-                )
+            member = await self.state.get_current_state(
+                room_id=room_id, event_type=EventTypes.Member, state_key=user_id
             )
-        membership = member.membership if member else None
 
-        if membership == Membership.JOIN:
-            return member
+        if member:
+            membership = member.membership
 
-        # XXX this looks totally bogus. Why do we not allow users who have been banned,
-        # or those who were members previously and have been re-invited?
-        if allow_departed_users and membership == Membership.LEAVE:
-            forgot = yield self.store.did_forget(user_id, room_id)
-            if not forgot:
+            if membership == Membership.JOIN:
                 return member
 
+            # XXX this looks totally bogus. Why do we not allow users who have been banned,
+            # or those who were members previously and have been re-invited?
+            if allow_departed_users and membership == Membership.LEAVE:
+                forgot = await self.store.did_forget(user_id, room_id)
+                if not forgot:
+                    return member
+
         raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
 
-    @defer.inlineCallbacks
-    def check_host_in_room(self, room_id, host):
+    async def check_host_in_room(self, room_id, host):
         with Measure(self.clock, "check_host_in_room"):
-            latest_event_ids = yield self.store.is_host_joined(room_id, host)
+            latest_event_ids = await self.store.is_host_joined(room_id, host)
             return latest_event_ids
 
     def can_federate(self, event, auth_events):
@@ -160,14 +157,13 @@ class Auth(object):
     def get_public_keys(self, invite_event):
         return event_auth.get_public_keys(invite_event)
 
-    @defer.inlineCallbacks
-    def get_user_by_req(
+    async def get_user_by_req(
         self,
         request: Request,
         allow_guest: bool = False,
         rights: str = "access",
         allow_expired: bool = False,
-    ):
+    ) -> synapse.types.Requester:
         """ Get a registered user's ID.
 
         Args:
@@ -180,7 +176,7 @@ class Auth(object):
                 /login will deliver access tokens regardless of expiration.
 
         Returns:
-            defer.Deferred: resolves to a `synapse.types.Requester` object
+            Resolves to the requester
         Raises:
             InvalidClientCredentialsError if no user by that token exists or the token
                 is invalid.
@@ -194,14 +190,14 @@ class Auth(object):
 
             access_token = self.get_access_token_from_request(request)
 
-            user_id, app_service = yield self._get_appservice_user_id(request)
+            user_id, app_service = await self._get_appservice_user_id(request)
             if user_id:
                 request.authenticated_entity = user_id
                 opentracing.set_tag("authenticated_entity", user_id)
                 opentracing.set_tag("appservice_id", app_service.id)
 
                 if ip_addr and self._track_appservice_user_ips:
-                    yield self.store.insert_client_ip(
+                    await self.store.insert_client_ip(
                         user_id=user_id,
                         access_token=access_token,
                         ip=ip_addr,
@@ -211,7 +207,7 @@ class Auth(object):
 
                 return synapse.types.create_requester(user_id, app_service=app_service)
 
-            user_info = yield self.get_user_by_access_token(
+            user_info = await self.get_user_by_access_token(
                 access_token, rights, allow_expired=allow_expired
             )
             user = user_info["user"]
@@ -221,7 +217,7 @@ class Auth(object):
             # Deny the request if the user account has expired.
             if self._account_validity.enabled and not allow_expired:
                 user_id = user.to_string()
-                expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
+                expiration_ts = await self.store.get_expiration_ts_for_user(user_id)
                 if (
                     expiration_ts is not None
                     and self.clock.time_msec() >= expiration_ts
@@ -235,7 +231,7 @@ class Auth(object):
             device_id = user_info.get("device_id")
 
             if user and access_token and ip_addr:
-                yield self.store.insert_client_ip(
+                await self.store.insert_client_ip(
                     user_id=user.to_string(),
                     access_token=access_token,
                     ip=ip_addr,
@@ -261,8 +257,7 @@ class Auth(object):
         except KeyError:
             raise MissingClientTokenError()
 
-    @defer.inlineCallbacks
-    def _get_appservice_user_id(self, request):
+    async def _get_appservice_user_id(self, request):
         app_service = self.store.get_app_service_by_token(
             self.get_access_token_from_request(request)
         )
@@ -283,14 +278,13 @@ class Auth(object):
 
         if not app_service.is_interested_in_user(user_id):
             raise AuthError(403, "Application service cannot masquerade as this user.")
-        if not (yield self.store.get_user_by_id(user_id)):
+        if not (await self.store.get_user_by_id(user_id)):
             raise AuthError(403, "Application service has not registered this user")
         return user_id, app_service
 
-    @defer.inlineCallbacks
-    def get_user_by_access_token(
+    async def get_user_by_access_token(
         self, token: str, rights: str = "access", allow_expired: bool = False,
-    ):
+    ) -> dict:
         """ Validate access token and get user_id from it
 
         Args:
@@ -300,7 +294,7 @@ class Auth(object):
             allow_expired: If False, raises an InvalidClientTokenError
                 if the token is expired
         Returns:
-            Deferred[dict]: dict that includes:
+            dict that includes:
                `user` (UserID)
                `is_guest` (bool)
                `token_id` (int|None): access token id. May be None if guest
@@ -314,7 +308,7 @@ class Auth(object):
 
         if rights == "access":
             # first look in the database
-            r = yield self._look_up_user_by_access_token(token)
+            r = await self._look_up_user_by_access_token(token)
             if r:
                 valid_until_ms = r["valid_until_ms"]
                 if (
@@ -352,7 +346,7 @@ class Auth(object):
                 # It would of course be much easier to store guest access
                 # tokens in the database as well, but that would break existing
                 # guest tokens.
-                stored_user = yield self.store.get_user_by_id(user_id)
+                stored_user = await self.store.get_user_by_id(user_id)
                 if not stored_user:
                     raise InvalidClientTokenError("Unknown user_id %s" % user_id)
                 if not stored_user["is_guest"]:
@@ -482,9 +476,8 @@ class Auth(object):
         now = self.hs.get_clock().time_msec()
         return now < expiry
 
-    @defer.inlineCallbacks
-    def _look_up_user_by_access_token(self, token):
-        ret = yield self.store.get_user_by_access_token(token)
+    async def _look_up_user_by_access_token(self, token):
+        ret = await self.store.get_user_by_access_token(token)
         if not ret:
             return None
 
@@ -507,7 +500,7 @@ class Auth(object):
             logger.warning("Unrecognised appservice access token.")
             raise InvalidClientTokenError()
         request.authenticated_entity = service.sender
-        return defer.succeed(service)
+        return service
 
     async def is_server_admin(self, user: UserID) -> bool:
         """ Check if the given user is a local server admin.
@@ -522,7 +515,7 @@ class Auth(object):
 
     def compute_auth_events(
         self, event, current_state_ids: StateMap[str], for_verification: bool = False,
-    ):
+    ) -> List[str]:
         """Given an event and current state return the list of event IDs used
         to auth an event.
 
@@ -530,11 +523,11 @@ class Auth(object):
         should be added to the event's `auth_events`.
 
         Returns:
-            defer.Deferred(list[str]): List of event IDs.
+            List of event IDs.
         """
 
         if event.type == EventTypes.Create:
-            return defer.succeed([])
+            return []
 
         # Currently we ignore the `for_verification` flag even though there are
         # some situations where we can drop particular auth events when adding
@@ -553,7 +546,7 @@ class Auth(object):
             if auth_ev_id:
                 auth_ids.append(auth_ev_id)
 
-        return defer.succeed(auth_ids)
+        return auth_ids
 
     async def check_can_change_room_list(self, room_id: str, user: UserID):
         """Determine whether the user is allowed to edit the room's entry in the
@@ -636,10 +629,9 @@ class Auth(object):
 
             return query_params[0].decode("ascii")
 
-    @defer.inlineCallbacks
-    def check_user_in_room_or_world_readable(
+    async def check_user_in_room_or_world_readable(
         self, room_id: str, user_id: str, allow_departed_users: bool = False
-    ):
+    ) -> Tuple[str, Optional[str]]:
         """Checks that the user is or was in the room or the room is world
         readable. If it isn't then an exception is raised.
 
@@ -650,10 +642,9 @@ class Auth(object):
                 members but have now departed
 
         Returns:
-            Deferred[tuple[str, str|None]]: Resolves to the current membership of
-                the user in the room and the membership event ID of the user. If
-                the user is not in the room and never has been, then
-                `(Membership.JOIN, None)` is returned.
+            Resolves to the current membership of the user in the room and the
+            membership event ID of the user. If the user is not in the room and
+            never has been, then `(Membership.JOIN, None)` is returned.
         """
 
         try:
@@ -662,15 +653,13 @@ class Auth(object):
             #  * The user is a non-guest user, and was ever in the room
             #  * The user is a guest user, and has joined the room
             # else it will throw.
-            member_event = yield self.check_user_in_room(
+            member_event = await self.check_user_in_room(
                 room_id, user_id, allow_departed_users=allow_departed_users
             )
             return member_event.membership, member_event.event_id
         except AuthError:
-            visibility = yield defer.ensureDeferred(
-                self.state.get_current_state(
-                    room_id, EventTypes.RoomHistoryVisibility, ""
-                )
+            visibility = await self.state.get_current_state(
+                room_id, EventTypes.RoomHistoryVisibility, ""
             )
             if (
                 visibility
diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py
index 5c499b6b4e..49093bf181 100644
--- a/synapse/api/auth_blocking.py
+++ b/synapse/api/auth_blocking.py
@@ -15,8 +15,6 @@
 
 import logging
 
-from twisted.internet import defer
-
 from synapse.api.constants import LimitBlockingTypes, UserTypes
 from synapse.api.errors import Codes, ResourceLimitError
 from synapse.config.server import is_threepid_reserved
@@ -36,8 +34,7 @@ class AuthBlocking(object):
         self._limit_usage_by_mau = hs.config.limit_usage_by_mau
         self._mau_limits_reserved_threepids = hs.config.mau_limits_reserved_threepids
 
-    @defer.inlineCallbacks
-    def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
+    async def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
         """Checks if the user should be rejected for some external reason,
         such as monthly active user limiting or global disable flag
 
@@ -60,7 +57,7 @@ class AuthBlocking(object):
         if user_id is not None:
             if user_id == self._server_notices_mxid:
                 return
-            if (yield self.store.is_support_user(user_id)):
+            if await self.store.is_support_user(user_id):
                 return
 
         if self._hs_disabled:
@@ -76,11 +73,11 @@ class AuthBlocking(object):
 
             # If the user is already part of the MAU cohort or a trial user
             if user_id:
-                timestamp = yield self.store.user_last_seen_monthly_active(user_id)
+                timestamp = await self.store.user_last_seen_monthly_active(user_id)
                 if timestamp:
                     return
 
-                is_trial = yield self.store.is_trial_user(user_id)
+                is_trial = await self.store.is_trial_user(user_id)
                 if is_trial:
                     return
             elif threepid:
@@ -93,7 +90,7 @@ class AuthBlocking(object):
                 # allow registration. Support users are excluded from MAU checks.
                 return
             # Else if there is no room in the MAU bucket, bail
-            current_mau = yield self.store.get_monthly_active_count()
+            current_mau = await self.store.get_monthly_active_count()
             if current_mau >= self._max_mau_value:
                 raise ResourceLimitError(
                     403,
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index b3bab1aa52..6e40630ab6 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -238,14 +238,16 @@ class InteractiveAuthIncompleteError(Exception):
     (This indicates we should return a 401 with 'result' as the body)
 
     Attributes:
+        session_id: The ID of the ongoing interactive auth session.
         result: the server response to the request, which should be
             passed back to the client
     """
 
-    def __init__(self, result: "JsonDict"):
+    def __init__(self, session_id: str, result: "JsonDict"):
         super(InteractiveAuthIncompleteError, self).__init__(
             "Interactive auth not yet complete"
         )
+        self.session_id = session_id
         self.result = result
 
 
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index f988f62a1e..7393d6cb74 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -21,8 +21,6 @@ import jsonschema
 from canonicaljson import json
 from jsonschema import FormatChecker
 
-from twisted.internet import defer
-
 from synapse.api.constants import EventContentFields
 from synapse.api.errors import SynapseError
 from synapse.storage.presence import UserPresenceState
@@ -137,9 +135,8 @@ class Filtering(object):
         super(Filtering, self).__init__()
         self.store = hs.get_datastore()
 
-    @defer.inlineCallbacks
-    def get_user_filter(self, user_localpart, filter_id):
-        result = yield self.store.get_user_filter(user_localpart, filter_id)
+    async def get_user_filter(self, user_localpart, filter_id):
+        result = await self.store.get_user_filter(user_localpart, filter_id)
         return FilterCollection(result)
 
     def add_user_filter(self, user_localpart, user_filter):
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index ec6b3a69a2..e62ae50ac2 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -17,6 +17,7 @@ from collections import OrderedDict
 from typing import Any, Optional, Tuple
 
 from synapse.api.errors import LimitExceededError
+from synapse.types import Requester
 from synapse.util import Clock
 
 
@@ -43,6 +44,42 @@ class Ratelimiter(object):
         #   * The rate_hz of this particular entry. This can vary per request
         self.actions = OrderedDict()  # type: OrderedDict[Any, Tuple[float, int, float]]
 
+    def can_requester_do_action(
+        self,
+        requester: Requester,
+        rate_hz: Optional[float] = None,
+        burst_count: Optional[int] = None,
+        update: bool = True,
+        _time_now_s: Optional[int] = None,
+    ) -> Tuple[bool, float]:
+        """Can the requester perform the action?
+
+        Args:
+            requester: The requester to key off when rate limiting. The user property
+                will be used.
+            rate_hz: The long term number of actions that can be performed in a second.
+                Overrides the value set during instantiation if set.
+            burst_count: How many actions that can be performed before being limited.
+                Overrides the value set during instantiation if set.
+            update: Whether to count this check as performing the action
+            _time_now_s: The current time. Optional, defaults to the current time according
+                to self.clock. Only used by tests.
+
+        Returns:
+            A tuple containing:
+                * A bool indicating if they can perform the action now
+                * The reactor timestamp for when the action can be performed next.
+                  -1 if rate_hz is less than or equal to zero
+        """
+        # Disable rate limiting of users belonging to any AS that is configured
+        # not to be rate limited in its registration file (rate_limited: true|false).
+        if requester.app_service and not requester.app_service.is_rate_limited():
+            return True, -1.0
+
+        return self.can_do_action(
+            requester.user.to_string(), rate_hz, burst_count, update, _time_now_s
+        )
+
     def can_do_action(
         self,
         key: Any,
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 1a16d0b9f8..739b013d4c 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -123,8 +123,9 @@ from synapse.rest.client.v2_alpha.account_data import (
 from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
 from synapse.rest.client.v2_alpha.register import RegisterRestServlet
 from synapse.rest.client.versions import VersionsRestServlet
+from synapse.rest.health import HealthResource
 from synapse.rest.key.v2 import KeyApiV2Resource
-from synapse.server import HomeServer
+from synapse.server import HomeServer, cache_in_self
 from synapse.storage.databases.main.censor_events import CensorEventsStore
 from synapse.storage.databases.main.media_repository import MediaRepositoryStore
 from synapse.storage.databases.main.monthly_active_users import (
@@ -493,7 +494,10 @@ class GenericWorkerServer(HomeServer):
         site_tag = listener_config.http_options.tag
         if site_tag is None:
             site_tag = port
-        resources = {}
+
+        # We always include a health resource.
+        resources = {"/health": HealthResource()}
+
         for res in listener_config.http_options.resources:
             for name in res.names:
                 if name == "metrics":
@@ -631,10 +635,12 @@ class GenericWorkerServer(HomeServer):
     async def remove_pusher(self, app_id, push_key, user_id):
         self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
 
-    def build_replication_data_handler(self):
+    @cache_in_self
+    def get_replication_data_handler(self):
         return GenericWorkerReplicationHandler(self)
 
-    def build_presence_handler(self):
+    @cache_in_self
+    def get_presence_handler(self):
         return GenericWorkerPresence(self)
 
 
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index d87a77718e..98d0d14a12 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -68,6 +68,7 @@ from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
 from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
 from synapse.rest import ClientRestResource
 from synapse.rest.admin import AdminRestResource
+from synapse.rest.health import HealthResource
 from synapse.rest.key.v2 import KeyApiV2Resource
 from synapse.rest.well_known import WellKnownResource
 from synapse.server import HomeServer
@@ -98,7 +99,9 @@ class SynapseHomeServer(HomeServer):
         if site_tag is None:
             site_tag = port
 
-        resources = {}
+        # We always include a health resource.
+        resources = {"/health": HealthResource()}
+
         for res in listener_config.http_options.resources:
             for name in res.names:
                 if name == "openid" and "federation" in res.names:
diff --git a/synapse/config/_util.py b/synapse/config/_util.py
new file mode 100644
index 0000000000..cd31b1c3c9
--- /dev/null
+++ b/synapse/config/_util.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Any, List
+
+import jsonschema
+
+from synapse.config._base import ConfigError
+from synapse.types import JsonDict
+
+
+def validate_config(json_schema: JsonDict, config: Any, config_path: List[str]) -> None:
+    """Validates a config setting against a JsonSchema definition
+
+    This can be used to validate a section of the config file against a schema
+    definition. If the validation fails, a ConfigError is raised with a textual
+    description of the problem.
+
+    Args:
+        json_schema: the schema to validate against
+        config: the configuration value to be validated
+        config_path: the path within the config file. This will be used as a basis
+           for the error message.
+    """
+    try:
+        jsonschema.validate(config, json_schema)
+    except jsonschema.ValidationError as e:
+        # copy `config_path` before modifying it.
+        path = list(config_path)
+        for p in list(e.path):
+            if isinstance(p, int):
+                path.append("<item %i>" % p)
+            else:
+                path.append(str(p))
+
+        raise ConfigError(
+            "Unable to parse configuration: %s at %s" % (e.message, ".".join(path))
+        )
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index dd775a97e8..c96e6ef62a 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -55,24 +55,33 @@ formatters:
         format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - \
 %(request)s - %(message)s'
 
-filters:
-    context:
-        (): synapse.logging.context.LoggingContextFilter
-        request: ""
-
 handlers:
     file:
-        class: logging.handlers.RotatingFileHandler
+        class: logging.handlers.TimedRotatingFileHandler
         formatter: precise
         filename: ${log_file}
-        maxBytes: 104857600
-        backupCount: 10
-        filters: [context]
+        when: midnight
+        backupCount: 3  # Does not include the current log file.
         encoding: utf8
+
+    # Default to buffering writes to log file for efficiency. This means that
+    # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
+    # logs will still be flushed immediately.
+    buffer:
+        class: logging.handlers.MemoryHandler
+        target: file
+        # The capacity is the number of log lines that are buffered before
+        # being written to disk. Increasing this will lead to better
+        # performance, at the expensive of it taking longer for log lines to
+        # be written to disk.
+        capacity: 10
+        flushLevel: 30  # Flush for WARNING logs as well
+
+    # A handler that writes logs to stderr. Unused by default, but can be used
+    # instead of "buffer" and "file" in the logger handlers.
     console:
         class: logging.StreamHandler
         formatter: precise
-        filters: [context]
 
 loggers:
     synapse.storage.SQL:
@@ -80,9 +89,24 @@ loggers:
         # information such as access tokens.
         level: INFO
 
+    twisted:
+        # We send the twisted logging directly to the file handler,
+        # to work around https://github.com/matrix-org/synapse/issues/3471
+        # when using "buffer" logger. Use "console" to log to stderr instead.
+        handlers: [file]
+        propagate: false
+
 root:
     level: INFO
-    handlers: [file, console]
+
+    # Write logs to the `buffer` handler, which will buffer them together in memory,
+    # then write them to a file.
+    #
+    # Replace "buffer" with "console" to log to stderr instead. (Note that you'll
+    # also need to update the configuation for the `twisted` logger above, in
+    # this case.)
+    #
+    handlers: [buffer]
 
 disable_existing_loggers: false
 """
@@ -168,11 +192,26 @@ def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner):
 
         handler = logging.StreamHandler()
         handler.setFormatter(formatter)
-        handler.addFilter(LoggingContextFilter(request=""))
         logger.addHandler(handler)
     else:
         logging.config.dictConfig(log_config)
 
+    # We add a log record factory that runs all messages through the
+    # LoggingContextFilter so that we get the context *at the time we log*
+    # rather than when we write to a handler. This can be done in config using
+    # filter options, but care must when using e.g. MemoryHandler to buffer
+    # writes.
+
+    log_filter = LoggingContextFilter(request="")
+    old_factory = logging.getLogRecordFactory()
+
+    def factory(*args, **kwargs):
+        record = old_factory(*args, **kwargs)
+        log_filter.filter(record)
+        return record
+
+    logging.setLogRecordFactory(factory)
+
     # Route Twisted's native logging through to the standard library logging
     # system.
     observer = STDLibLogObserver()
diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py
index 293643b2de..9277b5f342 100644
--- a/synapse/config/saml2_config.py
+++ b/synapse/config/saml2_config.py
@@ -15,7 +15,9 @@
 # limitations under the License.
 
 import logging
+from typing import Any, List
 
+import attr
 import jinja2
 import pkg_resources
 
@@ -23,6 +25,7 @@ from synapse.python_dependencies import DependencyException, check_requirements
 from synapse.util.module_loader import load_module, load_python_module
 
 from ._base import Config, ConfigError
+from ._util import validate_config
 
 logger = logging.getLogger(__name__)
 
@@ -80,6 +83,11 @@ class SAML2Config(Config):
 
         self.saml2_enabled = True
 
+        attribute_requirements = saml2_config.get("attribute_requirements") or []
+        self.attribute_requirements = _parse_attribute_requirements_def(
+            attribute_requirements
+        )
+
         self.saml2_grandfathered_mxid_source_attribute = saml2_config.get(
             "grandfathered_mxid_source_attribute", "uid"
         )
@@ -341,6 +349,17 @@ class SAML2Config(Config):
           #
           #grandfathered_mxid_source_attribute: upn
 
+          # It is possible to configure Synapse to only allow logins if SAML attributes
+          # match particular values. The requirements can be listed under
+          # `attribute_requirements` as shown below. All of the listed attributes must
+          # match for the login to be permitted.
+          #
+          #attribute_requirements:
+          #  - attribute: userGroup
+          #    value: "staff"
+          #  - attribute: department
+          #    value: "sales"
+
           # Directory in which Synapse will try to find the template files below.
           # If not set, default templates from within the Synapse package will be used.
           #
@@ -368,3 +387,34 @@ class SAML2Config(Config):
         """ % {
             "config_dir_path": config_dir_path
         }
+
+
+@attr.s(frozen=True)
+class SamlAttributeRequirement:
+    """Object describing a single requirement for SAML attributes."""
+
+    attribute = attr.ib(type=str)
+    value = attr.ib(type=str)
+
+    JSON_SCHEMA = {
+        "type": "object",
+        "properties": {"attribute": {"type": "string"}, "value": {"type": "string"}},
+        "required": ["attribute", "value"],
+    }
+
+
+ATTRIBUTE_REQUIREMENTS_SCHEMA = {
+    "type": "array",
+    "items": SamlAttributeRequirement.JSON_SCHEMA,
+}
+
+
+def _parse_attribute_requirements_def(
+    attribute_requirements: Any,
+) -> List[SamlAttributeRequirement]:
+    validate_config(
+        ATTRIBUTE_REQUIREMENTS_SCHEMA,
+        attribute_requirements,
+        config_path=["saml2_config", "attribute_requirements"],
+    )
+    return [SamlAttributeRequirement(**x) for x in attribute_requirements]
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 848587d232..9f15ed109e 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -530,6 +530,21 @@ class ServerConfig(Config):
             "request_token_inhibit_3pid_errors", False,
         )
 
+        # List of users trialing the new experimental default push rules. This setting is
+        # not included in the sample configuration file on purpose as it's a temporary
+        # hack, so that some users can trial the new defaults without impacting every
+        # user on the homeserver.
+        users_new_default_push_rules = (
+            config.get("users_new_default_push_rules") or []
+        )  # type: list
+        if not isinstance(users_new_default_push_rules, list):
+            raise ConfigError("'users_new_default_push_rules' must be a list")
+
+        # Turn the list into a set to improve lookup speed.
+        self.users_new_default_push_rules = set(
+            users_new_default_push_rules
+        )  # type: set
+
     def has_tls_listener(self) -> bool:
         return any(listener.tls for listener in self.listeners)
 
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
index a5a2a7815d..777c0f00b1 100644
--- a/synapse/crypto/context_factory.py
+++ b/synapse/crypto/context_factory.py
@@ -48,6 +48,14 @@ class ServerContextFactory(ContextFactory):
     connections."""
 
     def __init__(self, config):
+        # TODO: once pyOpenSSL exposes TLS_METHOD and SSL_CTX_set_min_proto_version,
+        # switch to those (see https://github.com/pyca/cryptography/issues/5379).
+        #
+        # note that, despite the confusing name, SSLv23_METHOD does *not* enforce SSLv2
+        # or v3, but is a synonym for TLS_METHOD, which allows the client and server
+        # to negotiate an appropriate version of TLS constrained by the version options
+        # set with context.set_options.
+        #
         self._context = SSL.Context(SSL.SSLv23_METHOD)
         self.configure_context(self._context, config)
 
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index 69b53ca2bc..9ed24380dd 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -17,6 +17,7 @@ from typing import Optional
 import attr
 from nacl.signing import SigningKey
 
+from synapse.api.auth import Auth
 from synapse.api.constants import MAX_DEPTH
 from synapse.api.errors import UnsupportedRoomVersionError
 from synapse.api.room_versions import (
@@ -27,6 +28,8 @@ from synapse.api.room_versions import (
 )
 from synapse.crypto.event_signing import add_hashes_and_signatures
 from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
+from synapse.state import StateHandler
+from synapse.storage.databases.main import DataStore
 from synapse.types import EventID, JsonDict
 from synapse.util import Clock
 from synapse.util.stringutils import random_string
@@ -42,45 +45,46 @@ class EventBuilder(object):
 
     Attributes:
         room_version: Version of the target room
-        room_id (str)
-        type (str)
-        sender (str)
-        content (dict)
-        unsigned (dict)
-        internal_metadata (_EventInternalMetadata)
-
-        _state (StateHandler)
-        _auth (synapse.api.Auth)
-        _store (DataStore)
-        _clock (Clock)
-        _hostname (str): The hostname of the server creating the event
+        room_id
+        type
+        sender
+        content
+        unsigned
+        internal_metadata
+
+        _state
+        _auth
+        _store
+        _clock
+        _hostname: The hostname of the server creating the event
         _signing_key: The signing key to use to sign the event as the server
     """
 
-    _state = attr.ib()
-    _auth = attr.ib()
-    _store = attr.ib()
-    _clock = attr.ib()
-    _hostname = attr.ib()
-    _signing_key = attr.ib()
+    _state = attr.ib(type=StateHandler)
+    _auth = attr.ib(type=Auth)
+    _store = attr.ib(type=DataStore)
+    _clock = attr.ib(type=Clock)
+    _hostname = attr.ib(type=str)
+    _signing_key = attr.ib(type=SigningKey)
 
     room_version = attr.ib(type=RoomVersion)
 
-    room_id = attr.ib()
-    type = attr.ib()
-    sender = attr.ib()
+    room_id = attr.ib(type=str)
+    type = attr.ib(type=str)
+    sender = attr.ib(type=str)
 
-    content = attr.ib(default=attr.Factory(dict))
-    unsigned = attr.ib(default=attr.Factory(dict))
+    content = attr.ib(default=attr.Factory(dict), type=JsonDict)
+    unsigned = attr.ib(default=attr.Factory(dict), type=JsonDict)
 
     # These only exist on a subset of events, so they raise AttributeError if
     # someone tries to get them when they don't exist.
-    _state_key = attr.ib(default=None)
-    _redacts = attr.ib(default=None)
-    _origin_server_ts = attr.ib(default=None)
+    _state_key = attr.ib(default=None, type=Optional[str])
+    _redacts = attr.ib(default=None, type=Optional[str])
+    _origin_server_ts = attr.ib(default=None, type=Optional[int])
 
     internal_metadata = attr.ib(
-        default=attr.Factory(lambda: _EventInternalMetadata({}))
+        default=attr.Factory(lambda: _EventInternalMetadata({})),
+        type=_EventInternalMetadata,
     )
 
     @property
@@ -106,7 +110,7 @@ class EventBuilder(object):
         state_ids = await self._state.get_current_state_ids(
             self.room_id, prev_event_ids
         )
-        auth_ids = await self._auth.compute_auth_events(self, state_ids)
+        auth_ids = self._auth.compute_auth_events(self, state_ids)
 
         format_version = self.room_version.event_format
         if format_version == EventFormatVersions.V1:
diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
index 8280f8b900..c7f6cb3d73 100644
--- a/synapse/federation/sender/transaction_manager.py
+++ b/synapse/federation/sender/transaction_manager.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import TYPE_CHECKING, List
+from typing import TYPE_CHECKING, List, Tuple
 
 from canonicaljson import json
 
@@ -54,7 +54,10 @@ class TransactionManager(object):
 
     @measure_func("_send_new_transaction")
     async def send_new_transaction(
-        self, destination: str, pending_pdus: List[EventBase], pending_edus: List[Edu]
+        self,
+        destination: str,
+        pending_pdus: List[Tuple[EventBase, int]],
+        pending_edus: List[Edu],
     ):
 
         # Make a transaction-sending opentracing span. This span follows on from
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index fbc56c351b..c9044a5019 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -101,7 +101,7 @@ class ApplicationServicesHandler(object):
 
                             async def start_scheduler():
                                 try:
-                                    return self.scheduler.start()
+                                    return await self.scheduler.start()
                                 except Exception:
                                     logger.error("Application Services Failure")
 
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index c7d921c21a..c24e7bafe0 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -162,7 +162,7 @@ class AuthHandler(BaseHandler):
         request_body: Dict[str, Any],
         clientip: str,
         description: str,
-    ) -> dict:
+    ) -> Tuple[dict, str]:
         """
         Checks that the user is who they claim to be, via a UI auth.
 
@@ -183,9 +183,14 @@ class AuthHandler(BaseHandler):
                          describes the operation happening on their account.
 
         Returns:
-            The parameters for this request (which may
+            A tuple of (params, session_id).
+
+                'params' contains the parameters for this request (which may
                 have been given only in a previous call).
 
+                'session_id' is the ID of this session, either passed in by the
+                client or assigned by this call
+
         Raises:
             InteractiveAuthIncompleteError if the client has not yet completed
                 any of the permitted login flows
@@ -207,7 +212,7 @@ class AuthHandler(BaseHandler):
         flows = [[login_type] for login_type in self._supported_ui_auth_types]
 
         try:
-            result, params, _ = await self.check_auth(
+            result, params, session_id = await self.check_ui_auth(
                 flows, request, request_body, clientip, description
             )
         except LoginError:
@@ -230,7 +235,7 @@ class AuthHandler(BaseHandler):
         if user_id != requester.user.to_string():
             raise AuthError(403, "Invalid auth")
 
-        return params
+        return params, session_id
 
     def get_enabled_auth_types(self):
         """Return the enabled user-interactive authentication types
@@ -240,7 +245,7 @@ class AuthHandler(BaseHandler):
         """
         return self.checkers.keys()
 
-    async def check_auth(
+    async def check_ui_auth(
         self,
         flows: List[List[str]],
         request: SynapseRequest,
@@ -363,7 +368,7 @@ class AuthHandler(BaseHandler):
 
         if not authdict:
             raise InteractiveAuthIncompleteError(
-                self._auth_dict_for_flows(flows, session.session_id)
+                session.session_id, self._auth_dict_for_flows(flows, session.session_id)
             )
 
         # check auth type currently being presented
@@ -410,7 +415,7 @@ class AuthHandler(BaseHandler):
         ret = self._auth_dict_for_flows(flows, session.session_id)
         ret["completed"] = list(creds)
         ret.update(errordict)
-        raise InteractiveAuthIncompleteError(ret)
+        raise InteractiveAuthIncompleteError(session.session_id, ret)
 
     async def add_oob_auth(
         self, stagetype: str, authdict: Dict[str, Any], clientip: str
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index 71a89f09c7..1924636c4d 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -57,13 +57,10 @@ class EventStreamHandler(BaseHandler):
         timeout=0,
         as_client_event=True,
         affect_presence=True,
-        only_keys=None,
         room_id=None,
         is_guest=False,
     ):
         """Fetches the events stream for a given user.
-
-        If `only_keys` is not None, events from keys will be sent down.
         """
 
         if room_id:
@@ -93,7 +90,6 @@ class EventStreamHandler(BaseHandler):
                 auth_user,
                 pagin_config,
                 timeout,
-                only_keys=only_keys,
                 is_guest=is_guest,
                 explicit_room_id=room_id,
             )
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index b3764dedae..593932adb7 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -2064,7 +2064,7 @@ class FederationHandler(BaseHandler):
 
         if not auth_events:
             prev_state_ids = await context.get_prev_state_ids()
-            auth_events_ids = await self.auth.compute_auth_events(
+            auth_events_ids = self.auth.compute_auth_events(
                 event, prev_state_ids, for_verification=True
             )
             auth_events_x = await self.store.get_events(auth_events_ids)
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 43901d0934..2643438e84 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -15,7 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import TYPE_CHECKING, List, Optional, Tuple
+from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
 
 from canonicaljson import encode_canonical_json, json
 
@@ -93,11 +93,11 @@ class MessageHandler(object):
 
     async def get_room_data(
         self,
-        user_id: str = None,
-        room_id: str = None,
-        event_type: Optional[str] = None,
-        state_key: str = "",
-        is_guest: bool = False,
+        user_id: str,
+        room_id: str,
+        event_type: str,
+        state_key: str,
+        is_guest: bool,
     ) -> dict:
         """ Get data from a room.
 
@@ -407,7 +407,7 @@ class EventCreationHandler(object):
         #
         # map from room id to time-of-last-attempt.
         #
-        self._rooms_to_exclude_from_dummy_event_insertion = {}  # type: dict[str, int]
+        self._rooms_to_exclude_from_dummy_event_insertion = {}  # type: Dict[str, int]
 
         # we need to construct a ConsentURIBuilder here, as it checks that the necessary
         # config options, but *only* if we have a configuration for which we are
@@ -707,7 +707,7 @@ class EventCreationHandler(object):
     async def create_and_send_nonmember_event(
         self,
         requester: Requester,
-        event_dict: EventBase,
+        event_dict: dict,
         ratelimit: bool = True,
         txn_id: Optional[str] = None,
     ) -> Tuple[EventBase, int]:
@@ -768,6 +768,15 @@ class EventCreationHandler(object):
         else:
             prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
 
+        # we now ought to have some prev_events (unless it's a create event).
+        #
+        # do a quick sanity check here, rather than waiting until we've created the
+        # event and then try to auth it (which fails with a somewhat confusing "No
+        # create event in auth events")
+        assert (
+            builder.type == EventTypes.Create or len(prev_event_ids) > 0
+        ), "Attempting to create an event with no prev_events"
+
         event = await builder.build(prev_event_ids=prev_event_ids)
         context = await self.state.compute_event_context(event)
         if requester:
@@ -962,7 +971,7 @@ class EventCreationHandler(object):
             # Validate a newly added alias or newly added alt_aliases.
 
             original_alias = None
-            original_alt_aliases = set()
+            original_alt_aliases = []  # type: List[str]
 
             original_event_id = event.unsigned.get("replaces_state")
             if original_event_id:
@@ -1010,6 +1019,10 @@ class EventCreationHandler(object):
 
                 current_state_ids = await context.get_current_state_ids()
 
+                # We know this event is not an outlier, so this must be
+                # non-None.
+                assert current_state_ids is not None
+
                 state_to_include_ids = [
                     e_id
                     for k, e_id in current_state_ids.items()
@@ -1061,7 +1074,7 @@ class EventCreationHandler(object):
                     raise SynapseError(400, "Cannot redact event from a different room")
 
             prev_state_ids = await context.get_prev_state_ids()
-            auth_events_ids = await self.auth.compute_auth_events(
+            auth_events_ids = self.auth.compute_auth_events(
                 event, prev_state_ids, for_verification=True
             )
             auth_events = await self.store.get_events(auth_events_ids)
diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py
index 87f0c5e197..fa5ee5de8f 100644
--- a/synapse/handlers/oidc_handler.py
+++ b/synapse/handlers/oidc_handler.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 import json
 import logging
-from typing import Dict, Generic, List, Optional, Tuple, TypeVar
+from typing import TYPE_CHECKING, Dict, Generic, List, Optional, Tuple, TypeVar
 from urllib.parse import urlencode
 
 import attr
@@ -39,9 +39,11 @@ from synapse.http.server import respond_with_html
 from synapse.http.site import SynapseRequest
 from synapse.logging.context import make_deferred_yieldable
 from synapse.push.mailer import load_jinja2_templates
-from synapse.server import HomeServer
 from synapse.types import UserID, map_username_to_mxid_localpart
 
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
 logger = logging.getLogger(__name__)
 
 SESSION_COOKIE_NAME = b"oidc_session"
@@ -91,7 +93,7 @@ class OidcHandler:
     """Handles requests related to the OpenID Connect login flow.
     """
 
-    def __init__(self, hs: HomeServer):
+    def __init__(self, hs: "HomeServer"):
         self._callback_url = hs.config.oidc_callback_url  # type: str
         self._scopes = hs.config.oidc_scopes  # type: List[str]
         self._client_auth = ClientAuth(
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 8e409f24e8..9fcabb22c7 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -16,7 +16,7 @@
 import abc
 import logging
 from http import HTTPStatus
-from typing import Dict, Iterable, List, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union
 
 from unpaddedbase64 import encode_base64
 
@@ -37,6 +37,10 @@ from synapse.util.distributor import user_joined_room, user_left_room
 
 from ._base import BaseHandler
 
+if TYPE_CHECKING:
+    from synapse.server import HomeServer
+
+
 logger = logging.getLogger(__name__)
 
 
@@ -48,7 +52,7 @@ class RoomMemberHandler(object):
 
     __metaclass__ = abc.ABCMeta
 
-    def __init__(self, hs):
+    def __init__(self, hs: "HomeServer"):
         self.hs = hs
         self.store = hs.get_datastore()
         self.auth = hs.get_auth()
@@ -206,24 +210,40 @@ class RoomMemberHandler(object):
             _, stream_id = await self.store.get_event_ordering(duplicate.event_id)
             return duplicate.event_id, stream_id
 
-        stream_id = await self.event_creation_handler.handle_new_client_event(
-            requester, event, context, extra_users=[target], ratelimit=ratelimit
-        )
-
         prev_state_ids = await context.get_prev_state_ids()
 
         prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
 
+        newly_joined = False
         if event.membership == Membership.JOIN:
-            # Only fire user_joined_room if the user has actually joined the
-            # room. Don't bother if the user is just changing their profile
-            # info.
             newly_joined = True
             if prev_member_event_id:
                 prev_member_event = await self.store.get_event(prev_member_event_id)
                 newly_joined = prev_member_event.membership != Membership.JOIN
+
+            # Only rate-limit if the user actually joined the room, otherwise we'll end
+            # up blocking profile updates.
             if newly_joined:
-                await self._user_joined_room(target, room_id)
+                time_now_s = self.clock.time()
+                (
+                    allowed,
+                    time_allowed,
+                ) = self._join_rate_limiter_local.can_requester_do_action(requester)
+
+                if not allowed:
+                    raise LimitExceededError(
+                        retry_after_ms=int(1000 * (time_allowed - time_now_s))
+                    )
+
+        stream_id = await self.event_creation_handler.handle_new_client_event(
+            requester, event, context, extra_users=[target], ratelimit=ratelimit,
+        )
+
+        if event.membership == Membership.JOIN and newly_joined:
+            # Only fire user_joined_room if the user has actually joined the
+            # room. Don't bother if the user is just changing their profile
+            # info.
+            await self._user_joined_room(target, room_id)
         elif event.membership == Membership.LEAVE:
             if prev_member_event_id:
                 prev_member_event = await self.store.get_event(prev_member_event_id)
@@ -453,22 +473,12 @@ class RoomMemberHandler(object):
                     # so don't really fit into the general auth process.
                     raise AuthError(403, "Guest access not allowed")
 
-            if is_host_in_room:
-                time_now_s = self.clock.time()
-                allowed, time_allowed = self._join_rate_limiter_local.can_do_action(
-                    requester.user.to_string(),
-                )
-
-                if not allowed:
-                    raise LimitExceededError(
-                        retry_after_ms=int(1000 * (time_allowed - time_now_s))
-                    )
-
-            else:
+            if not is_host_in_room:
                 time_now_s = self.clock.time()
-                allowed, time_allowed = self._join_rate_limiter_remote.can_do_action(
-                    requester.user.to_string(),
-                )
+                (
+                    allowed,
+                    time_allowed,
+                ) = self._join_rate_limiter_remote.can_requester_do_action(requester,)
 
                 if not allowed:
                     raise LimitExceededError(
@@ -1000,7 +1010,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
 
         check_complexity = self.hs.config.limit_remote_rooms.enabled
         if check_complexity and self.hs.config.limit_remote_rooms.admins_can_join:
-            check_complexity = not await self.hs.auth.is_server_admin(user)
+            check_complexity = not await self.auth.is_server_admin(user)
 
         if check_complexity:
             # Fetch the room complexity
diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py
index 2d506dc1f2..c1fcb98454 100644
--- a/synapse/handlers/saml_handler.py
+++ b/synapse/handlers/saml_handler.py
@@ -14,15 +14,16 @@
 # limitations under the License.
 import logging
 import re
-from typing import Callable, Dict, Optional, Set, Tuple
+from typing import TYPE_CHECKING, Callable, Dict, Optional, Set, Tuple
 
 import attr
 import saml2
 import saml2.response
 from saml2.client import Saml2Client
 
-from synapse.api.errors import SynapseError
+from synapse.api.errors import AuthError, SynapseError
 from synapse.config import ConfigError
+from synapse.config.saml2_config import SamlAttributeRequirement
 from synapse.http.servlet import parse_string
 from synapse.http.site import SynapseRequest
 from synapse.module_api import ModuleApi
@@ -34,6 +35,9 @@ from synapse.types import (
 from synapse.util.async_helpers import Linearizer
 from synapse.util.iterutils import chunk_seq
 
+if TYPE_CHECKING:
+    import synapse.server
+
 logger = logging.getLogger(__name__)
 
 
@@ -49,7 +53,7 @@ class Saml2SessionData:
 
 
 class SamlHandler:
-    def __init__(self, hs):
+    def __init__(self, hs: "synapse.server.HomeServer"):
         self._saml_client = Saml2Client(hs.config.saml2_sp_config)
         self._auth = hs.get_auth()
         self._auth_handler = hs.get_auth_handler()
@@ -62,6 +66,7 @@ class SamlHandler:
         self._grandfathered_mxid_source_attribute = (
             hs.config.saml2_grandfathered_mxid_source_attribute
         )
+        self._saml2_attribute_requirements = hs.config.saml2.attribute_requirements
 
         # plugin to do custom mapping from saml response to mxid
         self._user_mapping_provider = hs.config.saml2_user_mapping_provider_class(
@@ -73,7 +78,7 @@ class SamlHandler:
         self._auth_provider_id = "saml"
 
         # a map from saml session id to Saml2SessionData object
-        self._outstanding_requests_dict = {}
+        self._outstanding_requests_dict = {}  # type: Dict[str, Saml2SessionData]
 
         # a lock on the mappings
         self._mapping_lock = Linearizer(name="saml_mapping", clock=self._clock)
@@ -165,11 +170,18 @@ class SamlHandler:
                 saml2.BINDING_HTTP_POST,
                 outstanding=self._outstanding_requests_dict,
             )
+        except saml2.response.UnsolicitedResponse as e:
+            # the pysaml2 library helpfully logs an ERROR here, but neglects to log
+            # the session ID. I don't really want to put the full text of the exception
+            # in the (user-visible) exception message, so let's log the exception here
+            # so we can track down the session IDs later.
+            logger.warning(str(e))
+            raise SynapseError(400, "Unexpected SAML2 login.")
         except Exception as e:
-            raise SynapseError(400, "Unable to parse SAML2 response: %s" % (e,))
+            raise SynapseError(400, "Unable to parse SAML2 response: %s." % (e,))
 
         if saml2_auth.not_signed:
-            raise SynapseError(400, "SAML2 response was not signed")
+            raise SynapseError(400, "SAML2 response was not signed.")
 
         logger.debug("SAML2 response: %s", saml2_auth.origxml)
         for assertion in saml2_auth.assertions:
@@ -188,6 +200,9 @@ class SamlHandler:
             saml2_auth.in_response_to, None
         )
 
+        for requirement in self._saml2_attribute_requirements:
+            _check_attribute_requirement(saml2_auth.ava, requirement)
+
         remote_user_id = self._user_mapping_provider.get_remote_user_id(
             saml2_auth, client_redirect_url
         )
@@ -294,6 +309,21 @@ class SamlHandler:
             del self._outstanding_requests_dict[reqid]
 
 
+def _check_attribute_requirement(ava: dict, req: SamlAttributeRequirement):
+    values = ava.get(req.attribute, [])
+    for v in values:
+        if v == req.value:
+            return
+
+    logger.info(
+        "SAML2 attribute %s did not match required value '%s' (was '%s')",
+        req.attribute,
+        req.value,
+        values,
+    )
+    raise AuthError(403, "You are not authorized to log in here.")
+
+
 DOT_REPLACE_PATTERN = re.compile(
     ("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),))
 )
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 5b814731ca..e340b1e615 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -103,7 +103,6 @@ class JoinedSyncResult:
     account_data = attr.ib(type=List[JsonDict])
     unread_notifications = attr.ib(type=JsonDict)
     summary = attr.ib(type=Optional[JsonDict])
-    unread_count = attr.ib(type=int)
 
     def __nonzero__(self) -> bool:
         """Make the result appear empty if there are no updates. This is used
@@ -1895,10 +1894,6 @@ class SyncHandler(object):
 
         if room_builder.rtype == "joined":
             unread_notifications = {}  # type: Dict[str, str]
-
-            unread_count = await self.store.get_unread_message_count_for_user(
-                room_id, sync_config.user.to_string(),
-            )
             room_sync = JoinedSyncResult(
                 room_id=room_id,
                 timeline=batch,
@@ -1907,7 +1902,6 @@ class SyncHandler(object):
                 account_data=account_data_events,
                 unread_notifications=unread_notifications,
                 summary=summary,
-                unread_count=unread_count,
             )
 
             if room_sync or always_include:
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 529532a063..8aeb70cdec 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -297,7 +297,7 @@ class SimpleHttpClient(object):
         outgoing_requests_counter.labels(method).inc()
 
         # log request but strip `access_token` (AS requests for example include this)
-        logger.info("Sending request %s %s", method, redact_uri(uri))
+        logger.debug("Sending request %s %s", method, redact_uri(uri))
 
         with start_active_span(
             "outgoing-client-request",
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 0c02648015..369bf9c2fc 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -247,7 +247,7 @@ class MatrixHostnameEndpoint(object):
             port = server.port
 
             try:
-                logger.info("Connecting to %s:%i", host.decode("ascii"), port)
+                logger.debug("Connecting to %s:%i", host.decode("ascii"), port)
                 endpoint = HostnameEndpoint(self._reactor, host, port)
                 if self._tls_options:
                     endpoint = wrapClientTLS(self._tls_options, endpoint)
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 2a6373937a..738be43f46 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -29,10 +29,11 @@ from zope.interface import implementer
 
 from twisted.internet import defer, protocol
 from twisted.internet.error import DNSLookupError
-from twisted.internet.interfaces import IReactorPluggableNameResolver
+from twisted.internet.interfaces import IReactorPluggableNameResolver, IReactorTime
 from twisted.internet.task import _EPSILON, Cooperator
 from twisted.web._newclient import ResponseDone
 from twisted.web.http_headers import Headers
+from twisted.web.iweb import IResponse
 
 import synapse.metrics
 import synapse.util.retryutils
@@ -74,7 +75,7 @@ MAXINT = sys.maxsize
 _next_id = 1
 
 
-@attr.s
+@attr.s(frozen=True)
 class MatrixFederationRequest(object):
     method = attr.ib()
     """HTTP method
@@ -110,26 +111,52 @@ class MatrixFederationRequest(object):
     :type: str|None
     """
 
+    uri = attr.ib(init=False, type=bytes)
+    """The URI of this request
+    """
+
     def __attrs_post_init__(self):
         global _next_id
-        self.txn_id = "%s-O-%s" % (self.method, _next_id)
+        txn_id = "%s-O-%s" % (self.method, _next_id)
         _next_id = (_next_id + 1) % (MAXINT - 1)
 
+        object.__setattr__(self, "txn_id", txn_id)
+
+        destination_bytes = self.destination.encode("ascii")
+        path_bytes = self.path.encode("ascii")
+        if self.query:
+            query_bytes = encode_query_args(self.query)
+        else:
+            query_bytes = b""
+
+        # The object is frozen so we can pre-compute this.
+        uri = urllib.parse.urlunparse(
+            (b"matrix", destination_bytes, path_bytes, None, query_bytes, b"")
+        )
+        object.__setattr__(self, "uri", uri)
+
     def get_json(self):
         if self.json_callback:
             return self.json_callback()
         return self.json
 
 
-async def _handle_json_response(reactor, timeout_sec, request, response):
+async def _handle_json_response(
+    reactor: IReactorTime,
+    timeout_sec: float,
+    request: MatrixFederationRequest,
+    response: IResponse,
+    start_ms: int,
+):
     """
     Reads the JSON body of a response, with a timeout
 
     Args:
-        reactor (IReactor): twisted reactor, for the timeout
-        timeout_sec (float): number of seconds to wait for response to complete
-        request (MatrixFederationRequest): the request that triggered the response
-        response (IResponse): response to the request
+        reactor: twisted reactor, for the timeout
+        timeout_sec: number of seconds to wait for response to complete
+        request: the request that triggered the response
+        response: response to the request
+        start_ms: Timestamp when request was made
 
     Returns:
         dict: parsed JSON response
@@ -143,23 +170,35 @@ async def _handle_json_response(reactor, timeout_sec, request, response):
         body = await make_deferred_yieldable(d)
     except TimeoutError as e:
         logger.warning(
-            "{%s} [%s] Timed out reading response", request.txn_id, request.destination,
+            "{%s} [%s] Timed out reading response - %s %s",
+            request.txn_id,
+            request.destination,
+            request.method,
+            request.uri.decode("ascii"),
         )
         raise RequestSendFailed(e, can_retry=True) from e
     except Exception as e:
         logger.warning(
-            "{%s} [%s] Error reading response: %s",
+            "{%s} [%s] Error reading response %s %s: %s",
             request.txn_id,
             request.destination,
+            request.method,
+            request.uri.decode("ascii"),
             e,
         )
         raise
+
+    time_taken_secs = reactor.seconds() - start_ms / 1000
+
     logger.info(
-        "{%s} [%s] Completed: %d %s",
+        "{%s} [%s] Completed request: %d %s in %.2f secs - %s %s",
         request.txn_id,
         request.destination,
         response.code,
         response.phrase.decode("ascii", errors="replace"),
+        time_taken_secs,
+        request.method,
+        request.uri.decode("ascii"),
     )
     return body
 
@@ -261,7 +300,9 @@ class MatrixFederationHttpClient(object):
             # 'M_UNRECOGNIZED' which some endpoints can return when omitting a
             # trailing slash on Synapse <= v0.99.3.
             logger.info("Retrying request with trailing slash")
-            request.path += "/"
+
+            # Request is frozen so we create a new instance
+            request = attr.evolve(request, path=request.path + "/")
 
             response = await self._send_request(request, **send_request_args)
 
@@ -373,9 +414,7 @@ class MatrixFederationHttpClient(object):
             else:
                 retries_left = MAX_SHORT_RETRIES
 
-            url_bytes = urllib.parse.urlunparse(
-                (b"matrix", destination_bytes, path_bytes, None, query_bytes, b"")
-            )
+            url_bytes = request.uri
             url_str = url_bytes.decode("ascii")
 
             url_to_sign_bytes = urllib.parse.urlunparse(
@@ -402,7 +441,7 @@ class MatrixFederationHttpClient(object):
 
                     headers_dict[b"Authorization"] = auth_headers
 
-                    logger.info(
+                    logger.debug(
                         "{%s} [%s] Sending request: %s %s; timeout %fs",
                         request.txn_id,
                         request.destination,
@@ -436,7 +475,6 @@ class MatrixFederationHttpClient(object):
                     except DNSLookupError as e:
                         raise RequestSendFailed(e, can_retry=retry_on_dns_fail) from e
                     except Exception as e:
-                        logger.info("Failed to send request: %s", e)
                         raise RequestSendFailed(e, can_retry=True) from e
 
                     incoming_responses_counter.labels(
@@ -496,7 +534,7 @@ class MatrixFederationHttpClient(object):
 
                     break
                 except RequestSendFailed as e:
-                    logger.warning(
+                    logger.info(
                         "{%s} [%s] Request failed: %s %s: %s",
                         request.txn_id,
                         request.destination,
@@ -654,6 +692,8 @@ class MatrixFederationHttpClient(object):
             json=data,
         )
 
+        start_ms = self.clock.time_msec()
+
         response = await self._send_request_with_optional_trailing_slash(
             request,
             try_trailing_slash_on_400,
@@ -664,7 +704,7 @@ class MatrixFederationHttpClient(object):
         )
 
         body = await _handle_json_response(
-            self.reactor, self.default_timeout, request, response
+            self.reactor, self.default_timeout, request, response, start_ms
         )
 
         return body
@@ -720,6 +760,8 @@ class MatrixFederationHttpClient(object):
             method="POST", destination=destination, path=path, query=args, json=data
         )
 
+        start_ms = self.clock.time_msec()
+
         response = await self._send_request(
             request,
             long_retries=long_retries,
@@ -733,7 +775,7 @@ class MatrixFederationHttpClient(object):
             _sec_timeout = self.default_timeout
 
         body = await _handle_json_response(
-            self.reactor, _sec_timeout, request, response
+            self.reactor, _sec_timeout, request, response, start_ms,
         )
         return body
 
@@ -786,6 +828,8 @@ class MatrixFederationHttpClient(object):
             method="GET", destination=destination, path=path, query=args
         )
 
+        start_ms = self.clock.time_msec()
+
         response = await self._send_request_with_optional_trailing_slash(
             request,
             try_trailing_slash_on_400,
@@ -796,7 +840,7 @@ class MatrixFederationHttpClient(object):
         )
 
         body = await _handle_json_response(
-            self.reactor, self.default_timeout, request, response
+            self.reactor, self.default_timeout, request, response, start_ms
         )
 
         return body
@@ -846,6 +890,8 @@ class MatrixFederationHttpClient(object):
             method="DELETE", destination=destination, path=path, query=args
         )
 
+        start_ms = self.clock.time_msec()
+
         response = await self._send_request(
             request,
             long_retries=long_retries,
@@ -854,7 +900,7 @@ class MatrixFederationHttpClient(object):
         )
 
         body = await _handle_json_response(
-            self.reactor, self.default_timeout, request, response
+            self.reactor, self.default_timeout, request, response, start_ms
         )
         return body
 
@@ -914,12 +960,14 @@ class MatrixFederationHttpClient(object):
             )
             raise
         logger.info(
-            "{%s} [%s] Completed: %d %s [%d bytes]",
+            "{%s} [%s] Completed: %d %s [%d bytes] %s %s",
             request.txn_id,
             request.destination,
             response.code,
             response.phrase.decode("ascii", errors="replace"),
             length,
+            request.method,
+            request.uri.decode("ascii"),
         )
         return (length, headers)
 
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 94ab29974a..ffe6cfa09e 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -25,7 +25,7 @@ from io import BytesIO
 from typing import Any, Callable, Dict, Tuple, Union
 
 import jinja2
-from canonicaljson import encode_canonical_json, encode_pretty_printed_json, json
+from canonicaljson import encode_canonical_json, encode_pretty_printed_json
 
 from twisted.internet import defer
 from twisted.python import failure
@@ -46,6 +46,7 @@ from synapse.api.errors import (
 from synapse.http.site import SynapseRequest
 from synapse.logging.context import preserve_fn
 from synapse.logging.opentracing import trace_servlet
+from synapse.util import json_encoder
 from synapse.util.caches import intern_dict
 
 logger = logging.getLogger(__name__)
@@ -538,7 +539,7 @@ def respond_with_json(
             # canonicaljson already encodes to bytes
             json_bytes = encode_canonical_json(json_object)
         else:
-            json_bytes = json.dumps(json_object).encode("utf-8")
+            json_bytes = json_encoder.encode(json_object).encode("utf-8")
 
     return respond_with_json_bytes(request, code, json_bytes, send_cors=send_cors)
 
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 6f3b2258cc..6e79b47828 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -146,10 +146,9 @@ class SynapseRequest(Request):
 
         Returns a context manager; the correct way to use this is:
 
-        @defer.inlineCallbacks
-        def handle_request(request):
+        async def handle_request(request):
             with request.processing("FooServlet"):
-                yield really_handle_the_request()
+                await really_handle_the_request()
 
         Once the context manager is closed, the completion of the request will be logged,
         and the various metrics will be updated.
@@ -287,7 +286,9 @@ class SynapseRequest(Request):
             # the connection dropped)
             code += "!"
 
-        self.site.access_logger.info(
+        log_level = logging.INFO if self._should_log_request() else logging.DEBUG
+        self.site.access_logger.log(
+            log_level,
             "%s - %s - {%s}"
             " Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
             ' %sB %s "%s %s %s" "%s" [%d dbevts]',
@@ -315,6 +316,17 @@ class SynapseRequest(Request):
         except Exception as e:
             logger.warning("Failed to stop metrics: %r", e)
 
+    def _should_log_request(self) -> bool:
+        """Whether we should log at INFO that we processed the request.
+        """
+        if self.path == b"/health":
+            return False
+
+        if self.method == b"OPTIONS":
+            return False
+
+        return True
+
 
 class XForwardedForRequest(SynapseRequest):
     def __init__(self, *args, **kw):
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index a9269196b3..f766d16db6 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -13,16 +13,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import inspect
 import logging
 import threading
-from asyncio import iscoroutine
 from functools import wraps
 from typing import TYPE_CHECKING, Dict, Optional, Set
 
 from prometheus_client.core import REGISTRY, Counter, Gauge
 
 from twisted.internet import defer
-from twisted.python.failure import Failure
 
 from synapse.logging.context import LoggingContext, PreserveLoggingContext
 
@@ -167,7 +166,7 @@ class _BackgroundProcess(object):
         )
 
 
-def run_as_background_process(desc, func, *args, **kwargs):
+def run_as_background_process(desc: str, func, *args, **kwargs):
     """Run the given function in its own logcontext, with resource metrics
 
     This should be used to wrap processes which are fired off to run in the
@@ -179,7 +178,7 @@ def run_as_background_process(desc, func, *args, **kwargs):
     normal synapse inlineCallbacks function).
 
     Args:
-        desc (str): a description for this background process type
+        desc: a description for this background process type
         func: a function, which may return a Deferred or a coroutine
         args: positional args for func
         kwargs: keyword args for func
@@ -188,8 +187,7 @@ def run_as_background_process(desc, func, *args, **kwargs):
         follow the synapse logcontext rules.
     """
 
-    @defer.inlineCallbacks
-    def run():
+    async def run():
         with _bg_metrics_lock:
             count = _background_process_counts.get(desc, 0)
             _background_process_counts[desc] = count + 1
@@ -203,29 +201,21 @@ def run_as_background_process(desc, func, *args, **kwargs):
             try:
                 result = func(*args, **kwargs)
 
-                # We probably don't have an ensureDeferred in our call stack to handle
-                # coroutine results, so we need to ensureDeferred here.
-                #
-                # But we need this check because ensureDeferred doesn't like being
-                # called on immediate values (as opposed to Deferreds or coroutines).
-                if iscoroutine(result):
-                    result = defer.ensureDeferred(result)
+                if inspect.isawaitable(result):
+                    result = await result
 
-                return (yield result)
+                return result
             except Exception:
-                # failure.Failure() fishes the original Failure out of our stack, and
-                # thus gives us a sensible stack trace.
-                f = Failure()
-                logger.error(
-                    "Background process '%s' threw an exception",
-                    desc,
-                    exc_info=(f.type, f.value, f.getTracebackObject()),
+                logger.exception(
+                    "Background process '%s' threw an exception", desc,
                 )
             finally:
                 _background_process_in_flight_count.labels(desc).dec()
 
     with PreserveLoggingContext():
-        return run()
+        # Note that we return a Deferred here so that it can be used in a
+        # looping_call and other places that expect a Deferred.
+        return defer.ensureDeferred(run())
 
 
 def wrap_as_background_process(desc):
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 8201849951..c2fb757d9a 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -194,12 +194,16 @@ class ModuleApi(object):
             synapse.api.errors.AuthError: the access token is invalid
         """
         # see if the access token corresponds to a device
-        user_info = yield self._auth.get_user_by_access_token(access_token)
+        user_info = yield defer.ensureDeferred(
+            self._auth.get_user_by_access_token(access_token)
+        )
         device_id = user_info.get("device_id")
         user_id = user_info["user"].to_string()
         if device_id:
             # delete the device, which will also delete its access tokens
-            yield self._hs.get_device_handler().delete_device(user_id, device_id)
+            yield defer.ensureDeferred(
+                self._hs.get_device_handler().delete_device(user_id, device_id)
+            )
         else:
             # no associated device. Just delete the access token.
             yield defer.ensureDeferred(
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 22ab4a9da5..dfb096e589 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -15,7 +15,18 @@
 
 import logging
 from collections import namedtuple
-from typing import Callable, Iterable, List, TypeVar
+from typing import (
+    Awaitable,
+    Callable,
+    Dict,
+    Iterable,
+    List,
+    Optional,
+    Set,
+    Tuple,
+    TypeVar,
+    Union,
+)
 
 from prometheus_client import Counter
 
@@ -24,12 +35,14 @@ from twisted.internet import defer
 import synapse.server
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import AuthError
+from synapse.events import EventBase
 from synapse.handlers.presence import format_user_presence_state
 from synapse.logging.context import PreserveLoggingContext
 from synapse.logging.utils import log_function
 from synapse.metrics import LaterGauge
 from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.types import StreamToken
+from synapse.streams.config import PaginationConfig
+from synapse.types import Collection, StreamToken, UserID
 from synapse.util.async_helpers import ObservableDeferred, timeout_deferred
 from synapse.util.metrics import Measure
 from synapse.visibility import filter_events_for_client
@@ -77,7 +90,13 @@ class _NotifierUserStream(object):
     so that it can remove itself from the indexes in the Notifier class.
     """
 
-    def __init__(self, user_id, rooms, current_token, time_now_ms):
+    def __init__(
+        self,
+        user_id: str,
+        rooms: Collection[str],
+        current_token: StreamToken,
+        time_now_ms: int,
+    ):
         self.user_id = user_id
         self.rooms = set(rooms)
         self.current_token = current_token
@@ -93,13 +112,13 @@ class _NotifierUserStream(object):
         with PreserveLoggingContext():
             self.notify_deferred = ObservableDeferred(defer.Deferred())
 
-    def notify(self, stream_key, stream_id, time_now_ms):
+    def notify(self, stream_key: str, stream_id: int, time_now_ms: int):
         """Notify any listeners for this user of a new event from an
         event source.
         Args:
-            stream_key(str): The stream the event came from.
-            stream_id(str): The new id for the stream the event came from.
-            time_now_ms(int): The current time in milliseconds.
+            stream_key: The stream the event came from.
+            stream_id: The new id for the stream the event came from.
+            time_now_ms: The current time in milliseconds.
         """
         self.current_token = self.current_token.copy_and_advance(stream_key, stream_id)
         self.last_notified_token = self.current_token
@@ -112,7 +131,7 @@ class _NotifierUserStream(object):
             self.notify_deferred = ObservableDeferred(defer.Deferred())
             noify_deferred.callback(self.current_token)
 
-    def remove(self, notifier):
+    def remove(self, notifier: "Notifier"):
         """ Remove this listener from all the indexes in the Notifier
         it knows about.
         """
@@ -123,10 +142,10 @@ class _NotifierUserStream(object):
 
         notifier.user_to_user_stream.pop(self.user_id)
 
-    def count_listeners(self):
+    def count_listeners(self) -> int:
         return len(self.notify_deferred.observers())
 
-    def new_listener(self, token):
+    def new_listener(self, token: StreamToken) -> _NotificationListener:
         """Returns a deferred that is resolved when there is a new token
         greater than the given token.
 
@@ -159,14 +178,16 @@ class Notifier(object):
     UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000
 
     def __init__(self, hs: "synapse.server.HomeServer"):
-        self.user_to_user_stream = {}
-        self.room_to_user_streams = {}
+        self.user_to_user_stream = {}  # type: Dict[str, _NotifierUserStream]
+        self.room_to_user_streams = {}  # type: Dict[str, Set[_NotifierUserStream]]
 
         self.hs = hs
         self.storage = hs.get_storage()
         self.event_sources = hs.get_event_sources()
         self.store = hs.get_datastore()
-        self.pending_new_room_events = []
+        self.pending_new_room_events = (
+            []
+        )  # type: List[Tuple[int, EventBase, Collection[Union[str, UserID]]]]
 
         # Called when there are new things to stream over replication
         self.replication_callbacks = []  # type: List[Callable[[], None]]
@@ -178,10 +199,9 @@ class Notifier(object):
         self.clock = hs.get_clock()
         self.appservice_handler = hs.get_application_service_handler()
 
+        self.federation_sender = None
         if hs.should_send_federation():
             self.federation_sender = hs.get_federation_sender()
-        else:
-            self.federation_sender = None
 
         self.state_handler = hs.get_state_handler()
 
@@ -193,12 +213,12 @@ class Notifier(object):
         # when rendering the metrics page, which is likely once per minute at
         # most when scraping it.
         def count_listeners():
-            all_user_streams = set()
+            all_user_streams = set()  # type: Set[_NotifierUserStream]
 
-            for x in list(self.room_to_user_streams.values()):
-                all_user_streams |= x
-            for x in list(self.user_to_user_stream.values()):
-                all_user_streams.add(x)
+            for streams in list(self.room_to_user_streams.values()):
+                all_user_streams |= streams
+            for stream in list(self.user_to_user_stream.values()):
+                all_user_streams.add(stream)
 
             return sum(stream.count_listeners() for stream in all_user_streams)
 
@@ -223,7 +243,11 @@ class Notifier(object):
         self.replication_callbacks.append(cb)
 
     def on_new_room_event(
-        self, event, room_stream_id, max_room_stream_id, extra_users=[]
+        self,
+        event: EventBase,
+        room_stream_id: int,
+        max_room_stream_id: int,
+        extra_users: Collection[Union[str, UserID]] = [],
     ):
         """ Used by handlers to inform the notifier something has happened
         in the room, room event wise.
@@ -241,11 +265,11 @@ class Notifier(object):
 
         self.notify_replication()
 
-    def _notify_pending_new_room_events(self, max_room_stream_id):
+    def _notify_pending_new_room_events(self, max_room_stream_id: int):
         """Notify for the room events that were queued waiting for a previous
         event to be persisted.
         Args:
-            max_room_stream_id(int): The highest stream_id below which all
+            max_room_stream_id: The highest stream_id below which all
                 events have been persisted.
         """
         pending = self.pending_new_room_events
@@ -258,7 +282,12 @@ class Notifier(object):
             else:
                 self._on_new_room_event(event, room_stream_id, extra_users)
 
-    def _on_new_room_event(self, event, room_stream_id, extra_users=[]):
+    def _on_new_room_event(
+        self,
+        event: EventBase,
+        room_stream_id: int,
+        extra_users: Collection[Union[str, UserID]] = [],
+    ):
         """Notify any user streams that are interested in this room event"""
         # poke any interested application service.
         run_as_background_process(
@@ -275,13 +304,19 @@ class Notifier(object):
             "room_key", room_stream_id, users=extra_users, rooms=[event.room_id]
         )
 
-    async def _notify_app_services(self, room_stream_id):
+    async def _notify_app_services(self, room_stream_id: int):
         try:
             await self.appservice_handler.notify_interested_services(room_stream_id)
         except Exception:
             logger.exception("Error notifying application services of event")
 
-    def on_new_event(self, stream_key, new_token, users=[], rooms=[]):
+    def on_new_event(
+        self,
+        stream_key: str,
+        new_token: int,
+        users: Collection[Union[str, UserID]] = [],
+        rooms: Collection[str] = [],
+    ):
         """ Used to inform listeners that something has happened event wise.
 
         Will wake up all listeners for the given users and rooms.
@@ -307,14 +342,19 @@ class Notifier(object):
 
                 self.notify_replication()
 
-    def on_new_replication_data(self):
+    def on_new_replication_data(self) -> None:
         """Used to inform replication listeners that something has happend
         without waking up any of the normal user event streams"""
         self.notify_replication()
 
     async def wait_for_events(
-        self, user_id, timeout, callback, room_ids=None, from_token=StreamToken.START
-    ):
+        self,
+        user_id: str,
+        timeout: int,
+        callback: Callable[[StreamToken, StreamToken], Awaitable[T]],
+        room_ids=None,
+        from_token=StreamToken.START,
+    ) -> T:
         """Wait until the callback returns a non empty response or the
         timeout fires.
         """
@@ -377,19 +417,16 @@ class Notifier(object):
 
     async def get_events_for(
         self,
-        user,
-        pagination_config,
-        timeout,
-        only_keys=None,
-        is_guest=False,
-        explicit_room_id=None,
-    ):
+        user: UserID,
+        pagination_config: PaginationConfig,
+        timeout: int,
+        is_guest: bool = False,
+        explicit_room_id: str = None,
+    ) -> EventStreamResult:
         """ For the given user and rooms, return any new events for them. If
         there are no new events wait for up to `timeout` milliseconds for any
         new events to happen before returning.
 
-        If `only_keys` is not None, events from keys will be sent down.
-
         If explicit_room_id is not set, the user's joined rooms will be polled
         for events.
         If explicit_room_id is set, that room will be polled for events only if
@@ -404,11 +441,13 @@ class Notifier(object):
         room_ids, is_joined = await self._get_room_ids(user, explicit_room_id)
         is_peeking = not is_joined
 
-        async def check_for_updates(before_token, after_token):
+        async def check_for_updates(
+            before_token: StreamToken, after_token: StreamToken
+        ) -> EventStreamResult:
             if not after_token.is_after(before_token):
                 return EventStreamResult([], (from_token, from_token))
 
-            events = []
+            events = []  # type: List[EventBase]
             end_token = from_token
 
             for name, source in self.event_sources.sources.items():
@@ -417,8 +456,6 @@ class Notifier(object):
                 after_id = getattr(after_token, keyname)
                 if before_id == after_id:
                     continue
-                if only_keys and name not in only_keys:
-                    continue
 
                 new_events, new_key = await source.get_new_events(
                     user=user,
@@ -476,7 +513,9 @@ class Notifier(object):
 
         return result
 
-    async def _get_room_ids(self, user, explicit_room_id):
+    async def _get_room_ids(
+        self, user: UserID, explicit_room_id: Optional[str]
+    ) -> Tuple[Collection[str], bool]:
         joined_room_ids = await self.store.get_rooms_for_user(user.to_string())
         if explicit_room_id:
             if explicit_room_id in joined_room_ids:
@@ -486,7 +525,7 @@ class Notifier(object):
             raise AuthError(403, "Non-joined access not allowed")
         return joined_room_ids, True
 
-    async def _is_world_readable(self, room_id):
+    async def _is_world_readable(self, room_id: str) -> bool:
         state = await self.state_handler.get_current_state(
             room_id, EventTypes.RoomHistoryVisibility, ""
         )
@@ -496,7 +535,7 @@ class Notifier(object):
             return False
 
     @log_function
-    def remove_expired_streams(self):
+    def remove_expired_streams(self) -> None:
         time_now_ms = self.clock.time_msec()
         expired_streams = []
         expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS
@@ -510,21 +549,21 @@ class Notifier(object):
             expired_stream.remove(self)
 
     @log_function
-    def _register_with_keys(self, user_stream):
+    def _register_with_keys(self, user_stream: _NotifierUserStream):
         self.user_to_user_stream[user_stream.user_id] = user_stream
 
         for room in user_stream.rooms:
             s = self.room_to_user_streams.setdefault(room, set())
             s.add(user_stream)
 
-    def _user_joined_room(self, user_id, room_id):
+    def _user_joined_room(self, user_id: str, room_id: str):
         new_user_stream = self.user_to_user_stream.get(user_id)
         if new_user_stream is not None:
             room_streams = self.room_to_user_streams.setdefault(room_id, set())
             room_streams.add(new_user_stream)
             new_user_stream.rooms.add(room_id)
 
-    def notify_replication(self):
+    def notify_replication(self) -> None:
         """Notify the any replication listeners that there's a new event"""
         for cb in self.replication_callbacks:
             cb()
diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
index 286374d0b5..8047873ff1 100644
--- a/synapse/push/baserules.py
+++ b/synapse/push/baserules.py
@@ -19,11 +19,13 @@ import copy
 from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP
 
 
-def list_with_base_rules(rawrules):
+def list_with_base_rules(rawrules, use_new_defaults=False):
     """Combine the list of rules set by the user with the default push rules
 
     Args:
         rawrules(list): The rules the user has modified or set.
+        use_new_defaults(bool): Whether to use the new experimental default rules when
+            appending or prepending default rules.
 
     Returns:
         A new list with the rules set by the user combined with the defaults.
@@ -43,7 +45,9 @@ def list_with_base_rules(rawrules):
 
     ruleslist.extend(
         make_base_prepend_rules(
-            PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules
+            PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
+            modified_base_rules,
+            use_new_defaults,
         )
     )
 
@@ -54,6 +58,7 @@ def list_with_base_rules(rawrules):
                     make_base_append_rules(
                         PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
                         modified_base_rules,
+                        use_new_defaults,
                     )
                 )
                 current_prio_class -= 1
@@ -62,6 +67,7 @@ def list_with_base_rules(rawrules):
                         make_base_prepend_rules(
                             PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
                             modified_base_rules,
+                            use_new_defaults,
                         )
                     )
 
@@ -70,27 +76,39 @@ def list_with_base_rules(rawrules):
     while current_prio_class > 0:
         ruleslist.extend(
             make_base_append_rules(
-                PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules
+                PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
+                modified_base_rules,
+                use_new_defaults,
             )
         )
         current_prio_class -= 1
         if current_prio_class > 0:
             ruleslist.extend(
                 make_base_prepend_rules(
-                    PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules
+                    PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
+                    modified_base_rules,
+                    use_new_defaults,
                 )
             )
 
     return ruleslist
 
 
-def make_base_append_rules(kind, modified_base_rules):
+def make_base_append_rules(kind, modified_base_rules, use_new_defaults=False):
     rules = []
 
     if kind == "override":
-        rules = BASE_APPEND_OVERRIDE_RULES
+        rules = (
+            NEW_APPEND_OVERRIDE_RULES
+            if use_new_defaults
+            else BASE_APPEND_OVERRIDE_RULES
+        )
     elif kind == "underride":
-        rules = BASE_APPEND_UNDERRIDE_RULES
+        rules = (
+            NEW_APPEND_UNDERRIDE_RULES
+            if use_new_defaults
+            else BASE_APPEND_UNDERRIDE_RULES
+        )
     elif kind == "content":
         rules = BASE_APPEND_CONTENT_RULES
 
@@ -105,7 +123,7 @@ def make_base_append_rules(kind, modified_base_rules):
     return rules
 
 
-def make_base_prepend_rules(kind, modified_base_rules):
+def make_base_prepend_rules(kind, modified_base_rules, use_new_defaults=False):
     rules = []
 
     if kind == "override":
@@ -270,6 +288,135 @@ BASE_APPEND_OVERRIDE_RULES = [
 ]
 
 
+NEW_APPEND_OVERRIDE_RULES = [
+    {
+        "rule_id": "global/override/.m.rule.encrypted",
+        "conditions": [
+            {
+                "kind": "event_match",
+                "key": "type",
+                "pattern": "m.room.encrypted",
+                "_id": "_encrypted",
+            }
+        ],
+        "actions": ["notify"],
+    },
+    {
+        "rule_id": "global/override/.m.rule.suppress_notices",
+        "conditions": [
+            {
+                "kind": "event_match",
+                "key": "type",
+                "pattern": "m.room.message",
+                "_id": "_suppress_notices_type",
+            },
+            {
+                "kind": "event_match",
+                "key": "content.msgtype",
+                "pattern": "m.notice",
+                "_id": "_suppress_notices",
+            },
+        ],
+        "actions": [],
+    },
+    {
+        "rule_id": "global/underride/.m.rule.suppress_edits",
+        "conditions": [
+            {
+                "kind": "event_match",
+                "key": "m.relates_to.m.rel_type",
+                "pattern": "m.replace",
+                "_id": "_suppress_edits",
+            }
+        ],
+        "actions": [],
+    },
+    {
+        "rule_id": "global/override/.m.rule.invite_for_me",
+        "conditions": [
+            {
+                "kind": "event_match",
+                "key": "type",
+                "pattern": "m.room.member",
+                "_id": "_member",
+            },
+            {
+                "kind": "event_match",
+                "key": "content.membership",
+                "pattern": "invite",
+                "_id": "_invite_member",
+            },
+            {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"},
+        ],
+        "actions": ["notify", {"set_tweak": "sound", "value": "default"}],
+    },
+    {
+        "rule_id": "global/override/.m.rule.contains_display_name",
+        "conditions": [{"kind": "contains_display_name"}],
+        "actions": [
+            "notify",
+            {"set_tweak": "sound", "value": "default"},
+            {"set_tweak": "highlight"},
+        ],
+    },
+    {
+        "rule_id": "global/override/.m.rule.tombstone",
+        "conditions": [
+            {
+                "kind": "event_match",
+                "key": "type",
+                "pattern": "m.room.tombstone",
+                "_id": "_tombstone",
+            },
+            {
+                "kind": "event_match",
+                "key": "state_key",
+                "pattern": "",
+                "_id": "_tombstone_statekey",
+            },
+        ],
+        "actions": [
+            "notify",
+            {"set_tweak": "sound", "value": "default"},
+            {"set_tweak": "highlight"},
+        ],
+    },
+    {
+        "rule_id": "global/override/.m.rule.roomnotif",
+        "conditions": [
+            {
+                "kind": "event_match",
+                "key": "content.body",
+                "pattern": "@room",
+                "_id": "_roomnotif_content",
+            },
+            {
+                "kind": "sender_notification_permission",
+                "key": "room",
+                "_id": "_roomnotif_pl",
+            },
+        ],
+        "actions": [
+            "notify",
+            {"set_tweak": "highlight"},
+            {"set_tweak": "sound", "value": "default"},
+        ],
+    },
+    {
+        "rule_id": "global/override/.m.rule.call",
+        "conditions": [
+            {
+                "kind": "event_match",
+                "key": "type",
+                "pattern": "m.call.invite",
+                "_id": "_call",
+            }
+        ],
+        "actions": ["notify", {"set_tweak": "sound", "value": "ring"}],
+    },
+]
+
+
 BASE_APPEND_UNDERRIDE_RULES = [
     {
         "rule_id": "global/underride/.m.rule.call",
@@ -354,6 +501,36 @@ BASE_APPEND_UNDERRIDE_RULES = [
 ]
 
 
+NEW_APPEND_UNDERRIDE_RULES = [
+    {
+        "rule_id": "global/underride/.m.rule.room_one_to_one",
+        "conditions": [
+            {"kind": "room_member_count", "is": "2", "_id": "member_count"},
+            {
+                "kind": "event_match",
+                "key": "content.body",
+                "pattern": "*",
+                "_id": "body",
+            },
+        ],
+        "actions": ["notify", {"set_tweak": "sound", "value": "default"}],
+    },
+    {
+        "rule_id": "global/underride/.m.rule.message",
+        "conditions": [
+            {
+                "kind": "event_match",
+                "key": "content.body",
+                "pattern": "*",
+                "_id": "body",
+            },
+        ],
+        "actions": ["notify"],
+        "enabled": False,
+    },
+]
+
+
 BASE_RULE_IDS = set()
 
 for r in BASE_APPEND_CONTENT_RULES:
@@ -375,3 +552,26 @@ for r in BASE_APPEND_UNDERRIDE_RULES:
     r["priority_class"] = PRIORITY_CLASS_MAP["underride"]
     r["default"] = True
     BASE_RULE_IDS.add(r["rule_id"])
+
+
+NEW_RULE_IDS = set()
+
+for r in BASE_APPEND_CONTENT_RULES:
+    r["priority_class"] = PRIORITY_CLASS_MAP["content"]
+    r["default"] = True
+    NEW_RULE_IDS.add(r["rule_id"])
+
+for r in BASE_PREPEND_OVERRIDE_RULES:
+    r["priority_class"] = PRIORITY_CLASS_MAP["override"]
+    r["default"] = True
+    NEW_RULE_IDS.add(r["rule_id"])
+
+for r in NEW_APPEND_OVERRIDE_RULES:
+    r["priority_class"] = PRIORITY_CLASS_MAP["override"]
+    r["default"] = True
+    NEW_RULE_IDS.add(r["rule_id"])
+
+for r in NEW_APPEND_UNDERRIDE_RULES:
+    r["priority_class"] = PRIORITY_CLASS_MAP["underride"]
+    r["default"] = True
+    NEW_RULE_IDS.add(r["rule_id"])
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 04b9d8ac82..e7fcee0e87 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -120,7 +120,7 @@ class BulkPushRuleEvaluator(object):
             pl_event = await self.store.get_event(pl_event_id)
             auth_events = {POWER_KEY: pl_event}
         else:
-            auth_events_ids = await self.auth.compute_auth_events(
+            auth_events_ids = self.auth.compute_auth_events(
                 event, prev_state_ids, for_verification=False
             )
             auth_events = await self.store.get_events(auth_events_ids)
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index bc8f71916b..d0145666bf 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -21,13 +21,22 @@ async def get_badge_count(store, user_id):
     invites = await store.get_invited_rooms_for_local_user(user_id)
     joins = await store.get_rooms_for_user(user_id)
 
+    my_receipts_by_room = await store.get_receipts_for_user(user_id, "m.read")
+
     badge = len(invites)
 
     for room_id in joins:
-        unread_count = await store.get_unread_message_count_for_user(room_id, user_id)
-        # return one badge count per conversation, as count per
-        # message is so noisy as to be almost useless
-        badge += 1 if unread_count else 0
+        if room_id in my_receipts_by_room:
+            last_unread_event_id = my_receipts_by_room[room_id]
+
+            notifs = await (
+                store.get_unread_event_push_actions_by_room_for_user(
+                    room_id, user_id, last_unread_event_id
+                )
+            )
+            # return one badge count per conversation, as count per
+            # message is so noisy as to be almost useless
+            badge += 1 if notifs["notify_count"] else 0
     return badge
 
 
diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py
index 60dd3f6701..a6fdedde63 100644
--- a/synapse/replication/slave/storage/client_ips.py
+++ b/synapse/replication/slave/storage/client_ips.py
@@ -28,7 +28,7 @@ class SlavedClientIpStore(BaseSlavedStore):
             name="client_ip_last_seen", keylen=4, max_entries=50000
         )
 
-    def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id):
+    async def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id):
         now = int(self._clock.time_msec())
         key = (user_id, access_token, ip)
 
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index f33801f883..d853e4447e 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -18,11 +18,12 @@ The VALID_SERVER_COMMANDS and VALID_CLIENT_COMMANDS define which commands are
 allowed to be sent by which side.
 """
 import abc
-import json
 import logging
 from typing import Tuple, Type
 
-_json_encoder = json.JSONEncoder()
+from canonicaljson import json
+
+from synapse.util import json_encoder as _json_encoder
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/res/templates/saml_error.html b/synapse/res/templates/saml_error.html
index bfd6449c5d..01cd9bdaf3 100644
--- a/synapse/res/templates/saml_error.html
+++ b/synapse/res/templates/saml_error.html
@@ -2,10 +2,17 @@
 <html lang="en">
 <head>
     <meta charset="UTF-8">
-    <title>SSO error</title>
+    <title>SSO login error</title>
 </head>
 <body>
-    <p>Oops! Something went wrong during authentication<span id="errormsg"></span>.</p>
+{# a 403 means we have actively rejected their login #}
+{% if code == 403 %}
+    <p>You are not allowed to log in here.</p>
+{% else %}
+    <p>
+        There was an error during authentication:
+    </p>
+    <div id="errormsg" style="margin:20px 80px">{{ msg }}</div>
     <p>
         If you are seeing this page after clicking a link sent to you via email, make
         sure you only click the confirmation link once, and that you open the
@@ -37,9 +44,9 @@
         // to print one.
         let errorDesc = new URLSearchParams(searchStr).get("error_description")
         if (errorDesc) {
-
-            document.getElementById("errormsg").innerText = ` ("${errorDesc}")`;
+            document.getElementById("errormsg").innerText = errorDesc;
         }
     </script>
+{% endif %}
 </body>
-</html>
\ No newline at end of file
+</html>
diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py
index 5934b1fe8b..b210015173 100644
--- a/synapse/rest/client/v1/directory.py
+++ b/synapse/rest/client/v1/directory.py
@@ -89,7 +89,7 @@ class ClientDirectoryServer(RestServlet):
         dir_handler = self.handlers.directory_handler
 
         try:
-            service = await self.auth.get_appservice_by_req(request)
+            service = self.auth.get_appservice_by_req(request)
             room_alias = RoomAlias.from_string(room_alias)
             await dir_handler.delete_appservice_association(service, room_alias)
             logger.info(
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index 9fd4908136..00831879f3 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/v1/push_rule.py
@@ -25,7 +25,7 @@ from synapse.http.servlet import (
     parse_json_value_from_request,
     parse_string,
 )
-from synapse.push.baserules import BASE_RULE_IDS
+from synapse.push.baserules import BASE_RULE_IDS, NEW_RULE_IDS
 from synapse.push.clientformat import format_push_rules_for_user
 from synapse.push.rulekinds import PRIORITY_CLASS_MAP
 from synapse.rest.client.v2_alpha._base import client_patterns
@@ -45,6 +45,8 @@ class PushRuleRestServlet(RestServlet):
         self.notifier = hs.get_notifier()
         self._is_worker = hs.config.worker_app is not None
 
+        self._users_new_default_push_rules = hs.config.users_new_default_push_rules
+
     async def on_PUT(self, request, path):
         if self._is_worker:
             raise Exception("Cannot handle PUT /push_rules on worker")
@@ -179,7 +181,12 @@ class PushRuleRestServlet(RestServlet):
             rule_id = spec["rule_id"]
             is_default_rule = rule_id.startswith(".")
             if is_default_rule:
-                if namespaced_rule_id not in BASE_RULE_IDS:
+                if user_id in self._users_new_default_push_rules:
+                    rule_ids = NEW_RULE_IDS
+                else:
+                    rule_ids = BASE_RULE_IDS
+
+                if namespaced_rule_id not in rule_ids:
                     raise SynapseError(404, "Unknown rule %r" % (namespaced_rule_id,))
             return self.store.set_push_rule_actions(
                 user_id, namespaced_rule_id, actions, is_default_rule
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 3767a809a4..fead85074b 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -18,7 +18,12 @@ import logging
 from http import HTTPStatus
 
 from synapse.api.constants import LoginType
-from synapse.api.errors import Codes, SynapseError, ThreepidValidationError
+from synapse.api.errors import (
+    Codes,
+    InteractiveAuthIncompleteError,
+    SynapseError,
+    ThreepidValidationError,
+)
 from synapse.config.emailconfig import ThreepidBehaviour
 from synapse.http.server import finish_request, respond_with_html
 from synapse.http.servlet import (
@@ -239,18 +244,12 @@ class PasswordRestServlet(RestServlet):
 
         # we do basic sanity checks here because the auth layer will store these
         # in sessions. Pull out the new password provided to us.
-        if "new_password" in body:
-            new_password = body.pop("new_password")
+        new_password = body.pop("new_password", None)
+        if new_password is not None:
             if not isinstance(new_password, str) or len(new_password) > 512:
                 raise SynapseError(400, "Invalid password")
             self.password_policy_handler.validate_password(new_password)
 
-            # If the password is valid, hash it and store it back on the body.
-            # This ensures that only the hashed password is handled everywhere.
-            if "new_password_hash" in body:
-                raise SynapseError(400, "Unexpected property: new_password_hash")
-            body["new_password_hash"] = await self.auth_handler.hash(new_password)
-
         # there are two possibilities here. Either the user does not have an
         # access token, and needs to do a password reset; or they have one and
         # need to validate their identity.
@@ -263,23 +262,49 @@ class PasswordRestServlet(RestServlet):
 
         if self.auth.has_access_token(request):
             requester = await self.auth.get_user_by_req(request)
-            params = await self.auth_handler.validate_user_via_ui_auth(
-                requester,
-                request,
-                body,
-                self.hs.get_ip_from_request(request),
-                "modify your account password",
-            )
+            try:
+                params, session_id = await self.auth_handler.validate_user_via_ui_auth(
+                    requester,
+                    request,
+                    body,
+                    self.hs.get_ip_from_request(request),
+                    "modify your account password",
+                )
+            except InteractiveAuthIncompleteError as e:
+                # The user needs to provide more steps to complete auth, but
+                # they're not required to provide the password again.
+                #
+                # If a password is available now, hash the provided password and
+                # store it for later.
+                if new_password:
+                    password_hash = await self.auth_handler.hash(new_password)
+                    await self.auth_handler.set_session_data(
+                        e.session_id, "password_hash", password_hash
+                    )
+                raise
             user_id = requester.user.to_string()
         else:
             requester = None
-            result, params, _ = await self.auth_handler.check_auth(
-                [[LoginType.EMAIL_IDENTITY]],
-                request,
-                body,
-                self.hs.get_ip_from_request(request),
-                "modify your account password",
-            )
+            try:
+                result, params, session_id = await self.auth_handler.check_ui_auth(
+                    [[LoginType.EMAIL_IDENTITY]],
+                    request,
+                    body,
+                    self.hs.get_ip_from_request(request),
+                    "modify your account password",
+                )
+            except InteractiveAuthIncompleteError as e:
+                # The user needs to provide more steps to complete auth, but
+                # they're not required to provide the password again.
+                #
+                # If a password is available now, hash the provided password and
+                # store it for later.
+                if new_password:
+                    password_hash = await self.auth_handler.hash(new_password)
+                    await self.auth_handler.set_session_data(
+                        e.session_id, "password_hash", password_hash
+                    )
+                raise
 
             if LoginType.EMAIL_IDENTITY in result:
                 threepid = result[LoginType.EMAIL_IDENTITY]
@@ -304,12 +329,21 @@ class PasswordRestServlet(RestServlet):
                 logger.error("Auth succeeded but no known type! %r", result.keys())
                 raise SynapseError(500, "", Codes.UNKNOWN)
 
-        assert_params_in_dict(params, ["new_password_hash"])
-        new_password_hash = params["new_password_hash"]
+        # If we have a password in this request, prefer it. Otherwise, there
+        # must be a password hash from an earlier request.
+        if new_password:
+            password_hash = await self.auth_handler.hash(new_password)
+        else:
+            password_hash = await self.auth_handler.get_session_data(
+                session_id, "password_hash", None
+            )
+        if not password_hash:
+            raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM)
+
         logout_devices = params.get("logout_devices", True)
 
         await self._set_password_handler.set_password(
-            user_id, new_password_hash, logout_devices, requester
+            user_id, password_hash, logout_devices, requester
         )
 
         return 200, {}
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 370742ce59..f808175698 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -24,6 +24,7 @@ import synapse.types
 from synapse.api.constants import LoginType
 from synapse.api.errors import (
     Codes,
+    InteractiveAuthIncompleteError,
     SynapseError,
     ThreepidValidationError,
     UnrecognizedRequestError,
@@ -387,6 +388,7 @@ class RegisterRestServlet(RestServlet):
         self.ratelimiter = hs.get_registration_ratelimiter()
         self.password_policy_handler = hs.get_password_policy_handler()
         self.clock = hs.get_clock()
+        self._registration_enabled = self.hs.config.enable_registration
 
         self._registration_flows = _calculate_registration_flows(
             hs.config, self.auth_handler
@@ -412,20 +414,8 @@ class RegisterRestServlet(RestServlet):
                 "Do not understand membership kind: %s" % (kind.decode("utf8"),)
             )
 
-        # we do basic sanity checks here because the auth layer will store these
-        # in sessions. Pull out the username/password provided to us.
-        if "password" in body:
-            password = body.pop("password")
-            if not isinstance(password, str) or len(password) > 512:
-                raise SynapseError(400, "Invalid password")
-            self.password_policy_handler.validate_password(password)
-
-            # If the password is valid, hash it and store it back on the body.
-            # This ensures that only the hashed password is handled everywhere.
-            if "password_hash" in body:
-                raise SynapseError(400, "Unexpected property: password_hash")
-            body["password_hash"] = await self.auth_handler.hash(password)
-
+        # Pull out the provided username and do basic sanity checks early since
+        # the auth layer will store these in sessions.
         desired_username = None
         if "username" in body:
             if not isinstance(body["username"], str) or len(body["username"]) > 512:
@@ -434,7 +424,7 @@ class RegisterRestServlet(RestServlet):
 
         appservice = None
         if self.auth.has_access_token(request):
-            appservice = await self.auth.get_appservice_by_req(request)
+            appservice = self.auth.get_appservice_by_req(request)
 
         # fork off as soon as possible for ASes which have completely
         # different registration flows to normal users
@@ -459,22 +449,35 @@ class RegisterRestServlet(RestServlet):
                 )
             return 200, result  # we throw for non 200 responses
 
-        # for regular registration, downcase the provided username before
-        # attempting to register it. This should mean
-        # that people who try to register with upper-case in their usernames
-        # don't get a nasty surprise. (Note that we treat username
-        # case-insenstively in login, so they are free to carry on imagining
-        # that their username is CrAzYh4cKeR if that keeps them happy)
-        if desired_username is not None:
-            desired_username = desired_username.lower()
-
         # == Normal User Registration == (everyone else)
-        if not self.hs.config.enable_registration:
+        if not self._registration_enabled:
             raise SynapseError(403, "Registration has been disabled")
 
+        # For regular registration, convert the provided username to lowercase
+        # before attempting to register it. This should mean that people who try
+        # to register with upper-case in their usernames don't get a nasty surprise.
+        #
+        # Note that we treat usernames case-insensitively in login, so they are
+        # free to carry on imagining that their username is CrAzYh4cKeR if that
+        # keeps them happy.
+        if desired_username is not None:
+            desired_username = desired_username.lower()
+
+        # Check if this account is upgrading from a guest account.
         guest_access_token = body.get("guest_access_token", None)
 
-        if "initial_device_display_name" in body and "password_hash" not in body:
+        # Pull out the provided password and do basic sanity checks early.
+        #
+        # Note that we remove the password from the body since the auth layer
+        # will store the body in the session and we don't want a plaintext
+        # password store there.
+        password = body.pop("password", None)
+        if password is not None:
+            if not isinstance(password, str) or len(password) > 512:
+                raise SynapseError(400, "Invalid password")
+            self.password_policy_handler.validate_password(password)
+
+        if "initial_device_display_name" in body and password is None:
             # ignore 'initial_device_display_name' if sent without
             # a password to work around a client bug where it sent
             # the 'initial_device_display_name' param alone, wiping out
@@ -484,6 +487,7 @@ class RegisterRestServlet(RestServlet):
 
         session_id = self.auth_handler.get_session_id(body)
         registered_user_id = None
+        password_hash = None
         if session_id:
             # if we get a registered user id out of here, it means we previously
             # registered a user for this session, so we could just return the
@@ -492,7 +496,12 @@ class RegisterRestServlet(RestServlet):
             registered_user_id = await self.auth_handler.get_session_data(
                 session_id, "registered_user_id", None
             )
+            # Extract the previously-hashed password from the session.
+            password_hash = await self.auth_handler.get_session_data(
+                session_id, "password_hash", None
+            )
 
+        # Ensure that the username is valid.
         if desired_username is not None:
             await self.registration_handler.check_username(
                 desired_username,
@@ -500,20 +509,38 @@ class RegisterRestServlet(RestServlet):
                 assigned_user_id=registered_user_id,
             )
 
-        auth_result, params, session_id = await self.auth_handler.check_auth(
-            self._registration_flows,
-            request,
-            body,
-            self.hs.get_ip_from_request(request),
-            "register a new account",
-        )
+        # Check if the user-interactive authentication flows are complete, if
+        # not this will raise a user-interactive auth error.
+        try:
+            auth_result, params, session_id = await self.auth_handler.check_ui_auth(
+                self._registration_flows,
+                request,
+                body,
+                self.hs.get_ip_from_request(request),
+                "register a new account",
+            )
+        except InteractiveAuthIncompleteError as e:
+            # The user needs to provide more steps to complete auth.
+            #
+            # Hash the password and store it with the session since the client
+            # is not required to provide the password again.
+            #
+            # If a password hash was previously stored we will not attempt to
+            # re-hash and store it for efficiency. This assumes the password
+            # does not change throughout the authentication flow, but this
+            # should be fine since the data is meant to be consistent.
+            if not password_hash and password:
+                password_hash = await self.auth_handler.hash(password)
+                await self.auth_handler.set_session_data(
+                    e.session_id, "password_hash", password_hash
+                )
+            raise
 
         # Check that we're not trying to register a denied 3pid.
         #
         # the user-facing checks will probably already have happened in
         # /register/email/requestToken when we requested a 3pid, but that's not
         # guaranteed.
-
         if auth_result:
             for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]:
                 if login_type in auth_result:
@@ -535,12 +562,15 @@ class RegisterRestServlet(RestServlet):
             # don't re-register the threepids
             registered = False
         else:
-            # NB: This may be from the auth handler and NOT from the POST
-            assert_params_in_dict(params, ["password_hash"])
+            # If we have a password in this request, prefer it. Otherwise, there
+            # might be a password hash from an earlier request.
+            if password:
+                password_hash = await self.auth_handler.hash(password)
+            if not password_hash:
+                raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM)
 
             desired_username = params.get("username", None)
             guest_access_token = params.get("guest_access_token", None)
-            new_password_hash = params.get("password_hash", None)
 
             if desired_username is not None:
                 desired_username = desired_username.lower()
@@ -582,7 +612,7 @@ class RegisterRestServlet(RestServlet):
 
             registered_user_id = await self.registration_handler.register_user(
                 localpart=desired_username,
-                password_hash=new_password_hash,
+                password_hash=password_hash,
                 guest_access_token=guest_access_token,
                 threepid=threepid,
                 address=client_addr,
@@ -595,8 +625,8 @@ class RegisterRestServlet(RestServlet):
                 ):
                     await self.store.upsert_monthly_active_user(registered_user_id)
 
-            # remember that we've now registered that user account, and with
-            #  what user ID (since the user may not have specified)
+            # Remember that the user account has been registered (and the user
+            # ID it was registered with, since it might not have been specified).
             await self.auth_handler.set_session_data(
                 session_id, "registered_user_id", registered_user_id
             )
@@ -635,7 +665,7 @@ class RegisterRestServlet(RestServlet):
             (object) params: registration parameters, from which we pull
                 device_id, initial_device_name and inhibit_login
         Returns:
-            defer.Deferred: (object) dictionary for response from /register
+            (object) dictionary for response from /register
         """
         result = {"user_id": user_id, "home_server": self.hs.hostname}
         if not params.get("inhibit_login", False):
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index 05cbd9e2eb..6f4b224454 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -427,7 +427,6 @@ class SyncRestServlet(RestServlet):
             result["ephemeral"] = {"events": ephemeral_events}
             result["unread_notifications"] = room.unread_notifications
             result["summary"] = room.summary
-            result["org.matrix.msc2654.unread_count"] = room.unread_count
 
         return result
 
diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py
index 4386eb4e72..b3e4d5612e 100644
--- a/synapse/rest/consent/consent_resource.py
+++ b/synapse/rest/consent/consent_resource.py
@@ -22,8 +22,6 @@ from os import path
 import jinja2
 from jinja2 import TemplateNotFound
 
-from twisted.internet import defer
-
 from synapse.api.errors import NotFoundError, StoreError, SynapseError
 from synapse.config import ConfigError
 from synapse.http.server import DirectServeHtmlResource, respond_with_html
@@ -135,7 +133,7 @@ class ConsentResource(DirectServeHtmlResource):
             else:
                 qualified_user_id = UserID(username, self.hs.hostname).to_string()
 
-            u = await defer.maybeDeferred(self.store.get_user_by_id, qualified_user_id)
+            u = await self.store.get_user_by_id(qualified_user_id)
             if u is None:
                 raise NotFoundError("Unknown user")
 
diff --git a/synapse/rest/health.py b/synapse/rest/health.py
new file mode 100644
index 0000000000..0170950bf3
--- /dev/null
+++ b/synapse/rest/health.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.web.resource import Resource
+
+
+class HealthResource(Resource):
+    """A resource that does nothing except return a 200 with a body of `OK`,
+    which can be used as a health check.
+
+    Note: `SynapseRequest._should_log_request` ensures that requests to
+    `/health` do not get logged at INFO.
+    """
+
+    isLeaf = 1
+
+    def render_GET(self, request):
+        request.setHeader(b"Content-Type", b"text/plain")
+        return b"OK"
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index f4768a9e8b..cd8c246594 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -27,9 +27,7 @@ from typing import Dict, Optional
 from urllib import parse as urlparse
 
 import attr
-from canonicaljson import json
 
-from twisted.internet import defer
 from twisted.internet.error import DNSLookupError
 
 from synapse.api.errors import Codes, SynapseError
@@ -43,6 +41,7 @@ from synapse.http.servlet import parse_integer, parse_string
 from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.rest.media.v1._base import get_filename_from_headers
+from synapse.util import json_encoder
 from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.stringutils import random_string
@@ -228,7 +227,7 @@ class PreviewUrlResource(DirectServeJsonResource):
         else:
             logger.info("Returning cached response")
 
-        og = await make_deferred_yieldable(defer.maybeDeferred(observable.observe))
+        og = await make_deferred_yieldable(observable.observe())
         respond_with_json_bytes(request, 200, og, send_cors=True)
 
     async def _do_preview(self, url: str, user: str, ts: int) -> bytes:
@@ -355,7 +354,7 @@ class PreviewUrlResource(DirectServeJsonResource):
 
         logger.debug("Calculated OG for %s as %s", url, og)
 
-        jsonog = json.dumps(og)
+        jsonog = json_encoder.encode(og)
 
         # store OG in history-aware DB cache
         await self.store.store_url_cache(
diff --git a/synapse/secrets.py b/synapse/secrets.py
index 5f43f81eb0..ff86950a54 100644
--- a/synapse/secrets.py
+++ b/synapse/secrets.py
@@ -25,8 +25,12 @@ import sys
 if sys.version_info[0:2] >= (3, 6):
     import secrets
 
-    def Secrets():
-        return secrets
+    class Secrets:
+        def token_bytes(self, nbytes=32):
+            return secrets.token_bytes(nbytes)
+
+        def token_hex(self, nbytes=32):
+            return secrets.token_hex(nbytes)
 
 
 else:
diff --git a/synapse/server.py b/synapse/server.py
index 81d7f26f9c..9055b97ac3 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -22,10 +22,14 @@
 
 # Imports required for the default HomeServer() implementation
 import abc
+import functools
 import logging
 import os
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypeVar, cast
 
+import twisted
 from twisted.mail.smtp import sendmail
+from twisted.web.iweb import IPolicyForHTTPS
 
 from synapse.api.auth import Auth
 from synapse.api.filtering import Filtering
@@ -93,7 +97,7 @@ from synapse.push.pusherpool import PusherPool
 from synapse.replication.tcp.client import ReplicationDataHandler
 from synapse.replication.tcp.handler import ReplicationCommandHandler
 from synapse.replication.tcp.resource import ReplicationStreamer
-from synapse.replication.tcp.streams import STREAMS_MAP
+from synapse.replication.tcp.streams import STREAMS_MAP, Stream
 from synapse.rest.media.v1.media_repository import (
     MediaRepository,
     MediaRepositoryResource,
@@ -107,30 +111,72 @@ from synapse.server_notices.worker_server_notices_sender import (
 from synapse.state import StateHandler, StateResolutionHandler
 from synapse.storage import Databases, DataStore, Storage
 from synapse.streams.events import EventSources
+from synapse.types import DomainSpecificString
 from synapse.util import Clock
 from synapse.util.distributor import Distributor
 from synapse.util.stringutils import random_string
 
 logger = logging.getLogger(__name__)
 
+if TYPE_CHECKING:
+    from synapse.handlers.oidc_handler import OidcHandler
+    from synapse.handlers.saml_handler import SamlHandler
 
-class HomeServer(object):
+
+T = TypeVar("T", bound=Callable[..., Any])
+
+
+def cache_in_self(builder: T) -> T:
+    """Wraps a function called e.g. `get_foo`, checking if `self.foo` exists and
+    returning if so. If not, calls the given function and sets `self.foo` to it.
+
+    Also ensures that dependency cycles throw an exception correctly, rather
+    than overflowing the stack.
+    """
+
+    if not builder.__name__.startswith("get_"):
+        raise Exception(
+            "@cache_in_self can only be used on functions starting with `get_`"
+        )
+
+    depname = builder.__name__[len("get_") :]
+
+    building = [False]
+
+    @functools.wraps(builder)
+    def _get(self):
+        try:
+            return getattr(self, depname)
+        except AttributeError:
+            pass
+
+        # Prevent cyclic dependencies from deadlocking
+        if building[0]:
+            raise ValueError("Cyclic dependency while building %s" % (depname,))
+
+        building[0] = True
+        try:
+            dep = builder(self)
+            setattr(self, depname, dep)
+        finally:
+            building[0] = False
+
+        return dep
+
+    # We cast here as we need to tell mypy that `_get` has the same signature as
+    # `builder`.
+    return cast(T, _get)
+
+
+class HomeServer(metaclass=abc.ABCMeta):
     """A basic homeserver object without lazy component builders.
 
     This will need all of the components it requires to either be passed as
     constructor arguments, or the relevant methods overriding to create them.
     Typically this would only be used for unit tests.
 
-    For every dependency in the DEPENDENCIES list below, this class creates one
-    method,
-        def get_DEPENDENCY(self)
-    which returns the value of that dependency. If no value has yet been set
-    nor was provided to the constructor, it will attempt to call a lazy builder
-    method called
-        def build_DEPENDENCY(self)
-    which must be implemented by the subclass. This code may call any of the
-    required "get" methods on the instance to obtain the sub-dependencies that
-    one requires.
+    Dependencies should be added by creating a `def get_<depname>(self)`
+    function, wrapping it in `@cache_in_self`.
 
     Attributes:
         config (synapse.config.homeserver.HomeserverConfig):
@@ -138,86 +184,6 @@ class HomeServer(object):
             we are listening on to provide HTTP services.
     """
 
-    __metaclass__ = abc.ABCMeta
-
-    DEPENDENCIES = [
-        "http_client",
-        "federation_client",
-        "federation_server",
-        "handlers",
-        "auth",
-        "room_creation_handler",
-        "room_shutdown_handler",
-        "state_handler",
-        "state_resolution_handler",
-        "presence_handler",
-        "sync_handler",
-        "typing_handler",
-        "room_list_handler",
-        "acme_handler",
-        "auth_handler",
-        "device_handler",
-        "stats_handler",
-        "e2e_keys_handler",
-        "e2e_room_keys_handler",
-        "event_handler",
-        "event_stream_handler",
-        "initial_sync_handler",
-        "application_service_api",
-        "application_service_scheduler",
-        "application_service_handler",
-        "device_message_handler",
-        "profile_handler",
-        "event_creation_handler",
-        "deactivate_account_handler",
-        "set_password_handler",
-        "notifier",
-        "event_sources",
-        "keyring",
-        "pusherpool",
-        "event_builder_factory",
-        "filtering",
-        "http_client_context_factory",
-        "simple_http_client",
-        "proxied_http_client",
-        "media_repository",
-        "media_repository_resource",
-        "federation_transport_client",
-        "federation_sender",
-        "receipts_handler",
-        "macaroon_generator",
-        "tcp_replication",
-        "read_marker_handler",
-        "action_generator",
-        "user_directory_handler",
-        "groups_local_handler",
-        "groups_server_handler",
-        "groups_attestation_signing",
-        "groups_attestation_renewer",
-        "secrets",
-        "spam_checker",
-        "third_party_event_rules",
-        "room_member_handler",
-        "federation_registry",
-        "server_notices_manager",
-        "server_notices_sender",
-        "message_handler",
-        "pagination_handler",
-        "room_context_handler",
-        "sendmail",
-        "registration_handler",
-        "account_validity_handler",
-        "cas_handler",
-        "saml_handler",
-        "oidc_handler",
-        "event_client_serializer",
-        "password_policy_handler",
-        "storage",
-        "replication_streamer",
-        "replication_data_handler",
-        "replication_streams",
-    ]
-
     REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"]
 
     # This is overridden in derived application classes
@@ -232,16 +198,17 @@ class HomeServer(object):
             config: The full config for the homeserver.
         """
         if not reactor:
-            from twisted.internet import reactor
+            from twisted.internet import reactor as _reactor
+
+            reactor = _reactor
 
         self._reactor = reactor
         self.hostname = hostname
         # the key we use to sign events and requests
         self.signing_key = config.key.signing_key[0]
         self.config = config
-        self._building = {}
-        self._listening_services = []
-        self.start_time = None
+        self._listening_services = []  # type: List[twisted.internet.tcp.Port]
+        self.start_time = None  # type: Optional[int]
 
         self._instance_id = random_string(5)
         self._instance_name = config.worker_name or "master"
@@ -255,13 +222,13 @@ class HomeServer(object):
             burst_count=config.rc_registration.burst_count,
         )
 
-        self.datastores = None
+        self.datastores = None  # type: Optional[Databases]
 
         # Other kwargs are explicit dependencies
         for depname in kwargs:
             setattr(self, depname, kwargs[depname])
 
-    def get_instance_id(self):
+    def get_instance_id(self) -> str:
         """A unique ID for this synapse process instance.
 
         This is used to distinguish running instances in worker-based
@@ -277,13 +244,13 @@ class HomeServer(object):
         """
         return self._instance_name
 
-    def setup(self):
+    def setup(self) -> None:
         logger.info("Setting up.")
         self.start_time = int(self.get_clock().time())
         self.datastores = Databases(self.DATASTORE_CLASS, self)
         logger.info("Finished setting up.")
 
-    def setup_master(self):
+    def setup_master(self) -> None:
         """
         Some handlers have side effects on instantiation (like registering
         background updates). This function causes them to be fetched, and
@@ -292,192 +259,242 @@ class HomeServer(object):
         for i in self.REQUIRED_ON_MASTER_STARTUP:
             getattr(self, "get_" + i)()
 
-    def get_reactor(self):
+    def get_reactor(self) -> twisted.internet.base.ReactorBase:
         """
         Fetch the Twisted reactor in use by this HomeServer.
         """
         return self._reactor
 
-    def get_ip_from_request(self, request):
+    def get_ip_from_request(self, request) -> str:
         # X-Forwarded-For is handled by our custom request type.
         return request.getClientIP()
 
-    def is_mine(self, domain_specific_string):
+    def is_mine(self, domain_specific_string: DomainSpecificString) -> bool:
         return domain_specific_string.domain == self.hostname
 
-    def is_mine_id(self, string):
+    def is_mine_id(self, string: str) -> bool:
         return string.split(":", 1)[1] == self.hostname
 
-    def get_clock(self):
+    def get_clock(self) -> Clock:
         return self.clock
 
     def get_datastore(self) -> DataStore:
+        if not self.datastores:
+            raise Exception("HomeServer.setup must be called before getting datastores")
+
         return self.datastores.main
 
-    def get_datastores(self):
+    def get_datastores(self) -> Databases:
+        if not self.datastores:
+            raise Exception("HomeServer.setup must be called before getting datastores")
+
         return self.datastores
 
-    def get_config(self):
+    def get_config(self) -> HomeServerConfig:
         return self.config
 
-    def get_distributor(self):
+    def get_distributor(self) -> Distributor:
         return self.distributor
 
     def get_registration_ratelimiter(self) -> Ratelimiter:
         return self.registration_ratelimiter
 
-    def build_federation_client(self):
+    @cache_in_self
+    def get_federation_client(self) -> FederationClient:
         return FederationClient(self)
 
-    def build_federation_server(self):
+    @cache_in_self
+    def get_federation_server(self) -> FederationServer:
         return FederationServer(self)
 
-    def build_handlers(self):
+    @cache_in_self
+    def get_handlers(self) -> Handlers:
         return Handlers(self)
 
-    def build_notifier(self):
+    @cache_in_self
+    def get_notifier(self) -> Notifier:
         return Notifier(self)
 
-    def build_auth(self):
+    @cache_in_self
+    def get_auth(self) -> Auth:
         return Auth(self)
 
-    def build_http_client_context_factory(self):
+    @cache_in_self
+    def get_http_client_context_factory(self) -> IPolicyForHTTPS:
         return (
             InsecureInterceptableContextFactory()
             if self.config.use_insecure_ssl_client_just_for_testing_do_not_use
             else RegularPolicyForHTTPS()
         )
 
-    def build_simple_http_client(self):
+    @cache_in_self
+    def get_simple_http_client(self) -> SimpleHttpClient:
         return SimpleHttpClient(self)
 
-    def build_proxied_http_client(self):
+    @cache_in_self
+    def get_proxied_http_client(self) -> SimpleHttpClient:
         return SimpleHttpClient(
             self,
             http_proxy=os.getenvb(b"http_proxy"),
             https_proxy=os.getenvb(b"HTTPS_PROXY"),
         )
 
-    def build_room_creation_handler(self):
+    @cache_in_self
+    def get_room_creation_handler(self) -> RoomCreationHandler:
         return RoomCreationHandler(self)
 
-    def build_room_shutdown_handler(self):
+    @cache_in_self
+    def get_room_shutdown_handler(self) -> RoomShutdownHandler:
         return RoomShutdownHandler(self)
 
-    def build_sendmail(self):
+    @cache_in_self
+    def get_sendmail(self) -> sendmail:
         return sendmail
 
-    def build_state_handler(self):
+    @cache_in_self
+    def get_state_handler(self) -> StateHandler:
         return StateHandler(self)
 
-    def build_state_resolution_handler(self):
+    @cache_in_self
+    def get_state_resolution_handler(self) -> StateResolutionHandler:
         return StateResolutionHandler(self)
 
-    def build_presence_handler(self):
+    @cache_in_self
+    def get_presence_handler(self) -> PresenceHandler:
         return PresenceHandler(self)
 
-    def build_typing_handler(self):
+    @cache_in_self
+    def get_typing_handler(self):
         if self.config.worker.writers.typing == self.get_instance_name():
             return TypingWriterHandler(self)
         else:
             return FollowerTypingHandler(self)
 
-    def build_sync_handler(self):
+    @cache_in_self
+    def get_sync_handler(self) -> SyncHandler:
         return SyncHandler(self)
 
-    def build_room_list_handler(self):
+    @cache_in_self
+    def get_room_list_handler(self) -> RoomListHandler:
         return RoomListHandler(self)
 
-    def build_auth_handler(self):
+    @cache_in_self
+    def get_auth_handler(self) -> AuthHandler:
         return AuthHandler(self)
 
-    def build_macaroon_generator(self):
+    @cache_in_self
+    def get_macaroon_generator(self) -> MacaroonGenerator:
         return MacaroonGenerator(self)
 
-    def build_device_handler(self):
+    @cache_in_self
+    def get_device_handler(self):
         if self.config.worker_app:
             return DeviceWorkerHandler(self)
         else:
             return DeviceHandler(self)
 
-    def build_device_message_handler(self):
+    @cache_in_self
+    def get_device_message_handler(self) -> DeviceMessageHandler:
         return DeviceMessageHandler(self)
 
-    def build_e2e_keys_handler(self):
+    @cache_in_self
+    def get_e2e_keys_handler(self) -> E2eKeysHandler:
         return E2eKeysHandler(self)
 
-    def build_e2e_room_keys_handler(self):
+    @cache_in_self
+    def get_e2e_room_keys_handler(self) -> E2eRoomKeysHandler:
         return E2eRoomKeysHandler(self)
 
-    def build_acme_handler(self):
+    @cache_in_self
+    def get_acme_handler(self) -> AcmeHandler:
         return AcmeHandler(self)
 
-    def build_application_service_api(self):
+    @cache_in_self
+    def get_application_service_api(self) -> ApplicationServiceApi:
         return ApplicationServiceApi(self)
 
-    def build_application_service_scheduler(self):
+    @cache_in_self
+    def get_application_service_scheduler(self) -> ApplicationServiceScheduler:
         return ApplicationServiceScheduler(self)
 
-    def build_application_service_handler(self):
+    @cache_in_self
+    def get_application_service_handler(self) -> ApplicationServicesHandler:
         return ApplicationServicesHandler(self)
 
-    def build_event_handler(self):
+    @cache_in_self
+    def get_event_handler(self) -> EventHandler:
         return EventHandler(self)
 
-    def build_event_stream_handler(self):
+    @cache_in_self
+    def get_event_stream_handler(self) -> EventStreamHandler:
         return EventStreamHandler(self)
 
-    def build_initial_sync_handler(self):
+    @cache_in_self
+    def get_initial_sync_handler(self) -> InitialSyncHandler:
         return InitialSyncHandler(self)
 
-    def build_profile_handler(self):
+    @cache_in_self
+    def get_profile_handler(self):
         if self.config.worker_app:
             return BaseProfileHandler(self)
         else:
             return MasterProfileHandler(self)
 
-    def build_event_creation_handler(self):
+    @cache_in_self
+    def get_event_creation_handler(self) -> EventCreationHandler:
         return EventCreationHandler(self)
 
-    def build_deactivate_account_handler(self):
+    @cache_in_self
+    def get_deactivate_account_handler(self) -> DeactivateAccountHandler:
         return DeactivateAccountHandler(self)
 
-    def build_set_password_handler(self):
+    @cache_in_self
+    def get_set_password_handler(self) -> SetPasswordHandler:
         return SetPasswordHandler(self)
 
-    def build_event_sources(self):
+    @cache_in_self
+    def get_event_sources(self) -> EventSources:
         return EventSources(self)
 
-    def build_keyring(self):
+    @cache_in_self
+    def get_keyring(self) -> Keyring:
         return Keyring(self)
 
-    def build_event_builder_factory(self):
+    @cache_in_self
+    def get_event_builder_factory(self) -> EventBuilderFactory:
         return EventBuilderFactory(self)
 
-    def build_filtering(self):
+    @cache_in_self
+    def get_filtering(self) -> Filtering:
         return Filtering(self)
 
-    def build_pusherpool(self):
+    @cache_in_self
+    def get_pusherpool(self) -> PusherPool:
         return PusherPool(self)
 
-    def build_http_client(self):
+    @cache_in_self
+    def get_http_client(self) -> MatrixFederationHttpClient:
         tls_client_options_factory = context_factory.FederationPolicyForHTTPS(
             self.config
         )
         return MatrixFederationHttpClient(self, tls_client_options_factory)
 
-    def build_media_repository_resource(self):
+    @cache_in_self
+    def get_media_repository_resource(self) -> MediaRepositoryResource:
         # build the media repo resource. This indirects through the HomeServer
         # to ensure that we only have a single instance of
         return MediaRepositoryResource(self)
 
-    def build_media_repository(self):
+    @cache_in_self
+    def get_media_repository(self) -> MediaRepository:
         return MediaRepository(self)
 
-    def build_federation_transport_client(self):
+    @cache_in_self
+    def get_federation_transport_client(self) -> TransportLayerClient:
         return TransportLayerClient(self)
 
-    def build_federation_sender(self):
+    @cache_in_self
+    def get_federation_sender(self):
         if self.should_send_federation():
             return FederationSender(self)
         elif not self.config.worker_app:
@@ -485,156 +502,152 @@ class HomeServer(object):
         else:
             raise Exception("Workers cannot send federation traffic")
 
-    def build_receipts_handler(self):
+    @cache_in_self
+    def get_receipts_handler(self) -> ReceiptsHandler:
         return ReceiptsHandler(self)
 
-    def build_read_marker_handler(self):
+    @cache_in_self
+    def get_read_marker_handler(self) -> ReadMarkerHandler:
         return ReadMarkerHandler(self)
 
-    def build_tcp_replication(self):
+    @cache_in_self
+    def get_tcp_replication(self) -> ReplicationCommandHandler:
         return ReplicationCommandHandler(self)
 
-    def build_action_generator(self):
+    @cache_in_self
+    def get_action_generator(self) -> ActionGenerator:
         return ActionGenerator(self)
 
-    def build_user_directory_handler(self):
+    @cache_in_self
+    def get_user_directory_handler(self) -> UserDirectoryHandler:
         return UserDirectoryHandler(self)
 
-    def build_groups_local_handler(self):
+    @cache_in_self
+    def get_groups_local_handler(self):
         if self.config.worker_app:
             return GroupsLocalWorkerHandler(self)
         else:
             return GroupsLocalHandler(self)
 
-    def build_groups_server_handler(self):
+    @cache_in_self
+    def get_groups_server_handler(self):
         if self.config.worker_app:
             return GroupsServerWorkerHandler(self)
         else:
             return GroupsServerHandler(self)
 
-    def build_groups_attestation_signing(self):
+    @cache_in_self
+    def get_groups_attestation_signing(self) -> GroupAttestationSigning:
         return GroupAttestationSigning(self)
 
-    def build_groups_attestation_renewer(self):
+    @cache_in_self
+    def get_groups_attestation_renewer(self) -> GroupAttestionRenewer:
         return GroupAttestionRenewer(self)
 
-    def build_secrets(self):
+    @cache_in_self
+    def get_secrets(self) -> Secrets:
         return Secrets()
 
-    def build_stats_handler(self):
+    @cache_in_self
+    def get_stats_handler(self) -> StatsHandler:
         return StatsHandler(self)
 
-    def build_spam_checker(self):
+    @cache_in_self
+    def get_spam_checker(self):
         return SpamChecker(self)
 
-    def build_third_party_event_rules(self):
+    @cache_in_self
+    def get_third_party_event_rules(self) -> ThirdPartyEventRules:
         return ThirdPartyEventRules(self)
 
-    def build_room_member_handler(self):
+    @cache_in_self
+    def get_room_member_handler(self):
         if self.config.worker_app:
             return RoomMemberWorkerHandler(self)
         return RoomMemberMasterHandler(self)
 
-    def build_federation_registry(self):
+    @cache_in_self
+    def get_federation_registry(self) -> FederationHandlerRegistry:
         return FederationHandlerRegistry(self)
 
-    def build_server_notices_manager(self):
+    @cache_in_self
+    def get_server_notices_manager(self):
         if self.config.worker_app:
             raise Exception("Workers cannot send server notices")
         return ServerNoticesManager(self)
 
-    def build_server_notices_sender(self):
+    @cache_in_self
+    def get_server_notices_sender(self):
         if self.config.worker_app:
             return WorkerServerNoticesSender(self)
         return ServerNoticesSender(self)
 
-    def build_message_handler(self):
+    @cache_in_self
+    def get_message_handler(self) -> MessageHandler:
         return MessageHandler(self)
 
-    def build_pagination_handler(self):
+    @cache_in_self
+    def get_pagination_handler(self) -> PaginationHandler:
         return PaginationHandler(self)
 
-    def build_room_context_handler(self):
+    @cache_in_self
+    def get_room_context_handler(self) -> RoomContextHandler:
         return RoomContextHandler(self)
 
-    def build_registration_handler(self):
+    @cache_in_self
+    def get_registration_handler(self) -> RegistrationHandler:
         return RegistrationHandler(self)
 
-    def build_account_validity_handler(self):
+    @cache_in_self
+    def get_account_validity_handler(self) -> AccountValidityHandler:
         return AccountValidityHandler(self)
 
-    def build_cas_handler(self):
+    @cache_in_self
+    def get_cas_handler(self) -> CasHandler:
         return CasHandler(self)
 
-    def build_saml_handler(self):
+    @cache_in_self
+    def get_saml_handler(self) -> "SamlHandler":
         from synapse.handlers.saml_handler import SamlHandler
 
         return SamlHandler(self)
 
-    def build_oidc_handler(self):
+    @cache_in_self
+    def get_oidc_handler(self) -> "OidcHandler":
         from synapse.handlers.oidc_handler import OidcHandler
 
         return OidcHandler(self)
 
-    def build_event_client_serializer(self):
+    @cache_in_self
+    def get_event_client_serializer(self) -> EventClientSerializer:
         return EventClientSerializer(self)
 
-    def build_password_policy_handler(self):
+    @cache_in_self
+    def get_password_policy_handler(self) -> PasswordPolicyHandler:
         return PasswordPolicyHandler(self)
 
-    def build_storage(self) -> Storage:
-        return Storage(self, self.datastores)
+    @cache_in_self
+    def get_storage(self) -> Storage:
+        return Storage(self, self.get_datastores())
 
-    def build_replication_streamer(self) -> ReplicationStreamer:
+    @cache_in_self
+    def get_replication_streamer(self) -> ReplicationStreamer:
         return ReplicationStreamer(self)
 
-    def build_replication_data_handler(self):
+    @cache_in_self
+    def get_replication_data_handler(self) -> ReplicationDataHandler:
         return ReplicationDataHandler(self)
 
-    def build_replication_streams(self):
+    @cache_in_self
+    def get_replication_streams(self) -> Dict[str, Stream]:
         return {stream.NAME: stream(self) for stream in STREAMS_MAP.values()}
 
-    def remove_pusher(self, app_id, push_key, user_id):
-        return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
+    async def remove_pusher(self, app_id: str, push_key: str, user_id: str):
+        return await self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
 
-    def should_send_federation(self):
+    def should_send_federation(self) -> bool:
         "Should this server be sending federation traffic directly?"
         return self.config.send_federation and (
             not self.config.worker_app
             or self.config.worker_app == "synapse.app.federation_sender"
         )
-
-
-def _make_dependency_method(depname):
-    def _get(hs):
-        try:
-            return getattr(hs, depname)
-        except AttributeError:
-            pass
-
-        try:
-            builder = getattr(hs, "build_%s" % (depname))
-        except AttributeError:
-            raise NotImplementedError(
-                "%s has no %s nor a builder for it" % (type(hs).__name__, depname)
-            )
-
-        # Prevent cyclic dependencies from deadlocking
-        if depname in hs._building:
-            raise ValueError("Cyclic dependency while building %s" % (depname,))
-
-        hs._building[depname] = 1
-        try:
-            dep = builder()
-            setattr(hs, depname, dep)
-        finally:
-            del hs._building[depname]
-
-        return dep
-
-    setattr(HomeServer, "get_%s" % (depname), _get)
-
-
-# Build magic accessors for every dependency
-for depname in HomeServer.DEPENDENCIES:
-    _make_dependency_method(depname)
diff --git a/synapse/server.pyi b/synapse/server.pyi
deleted file mode 100644
index 1aba408c21..0000000000
--- a/synapse/server.pyi
+++ /dev/null
@@ -1,155 +0,0 @@
-from typing import Dict
-
-import twisted.internet
-
-import synapse.api.auth
-import synapse.config.homeserver
-import synapse.crypto.keyring
-import synapse.federation.federation_server
-import synapse.federation.sender
-import synapse.federation.transport.client
-import synapse.handlers
-import synapse.handlers.auth
-import synapse.handlers.deactivate_account
-import synapse.handlers.device
-import synapse.handlers.e2e_keys
-import synapse.handlers.message
-import synapse.handlers.presence
-import synapse.handlers.register
-import synapse.handlers.room
-import synapse.handlers.room_member
-import synapse.handlers.set_password
-import synapse.http.client
-import synapse.http.matrixfederationclient
-import synapse.notifier
-import synapse.push.pusherpool
-import synapse.replication.tcp.client
-import synapse.replication.tcp.handler
-import synapse.rest.media.v1.media_repository
-import synapse.server_notices.server_notices_manager
-import synapse.server_notices.server_notices_sender
-import synapse.state
-import synapse.storage
-from synapse.events.builder import EventBuilderFactory
-from synapse.handlers.typing import FollowerTypingHandler
-from synapse.replication.tcp.streams import Stream
-
-class HomeServer(object):
-    @property
-    def config(self) -> synapse.config.homeserver.HomeServerConfig:
-        pass
-    @property
-    def hostname(self) -> str:
-        pass
-    def get_auth(self) -> synapse.api.auth.Auth:
-        pass
-    def get_auth_handler(self) -> synapse.handlers.auth.AuthHandler:
-        pass
-    def get_datastore(self) -> synapse.storage.DataStore:
-        pass
-    def get_device_handler(self) -> synapse.handlers.device.DeviceHandler:
-        pass
-    def get_e2e_keys_handler(self) -> synapse.handlers.e2e_keys.E2eKeysHandler:
-        pass
-    def get_handlers(self) -> synapse.handlers.Handlers:
-        pass
-    def get_state_handler(self) -> synapse.state.StateHandler:
-        pass
-    def get_state_resolution_handler(self) -> synapse.state.StateResolutionHandler:
-        pass
-    def get_simple_http_client(self) -> synapse.http.client.SimpleHttpClient:
-        """Fetch an HTTP client implementation which doesn't do any blacklisting
-        or support any HTTP_PROXY settings"""
-        pass
-    def get_proxied_http_client(self) -> synapse.http.client.SimpleHttpClient:
-        """Fetch an HTTP client implementation which doesn't do any blacklisting
-        but does support HTTP_PROXY settings"""
-        pass
-    def get_deactivate_account_handler(
-        self,
-    ) -> synapse.handlers.deactivate_account.DeactivateAccountHandler:
-        pass
-    def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler:
-        pass
-    def get_room_member_handler(self) -> synapse.handlers.room_member.RoomMemberHandler:
-        pass
-    def get_room_shutdown_handler(self) -> synapse.handlers.room.RoomShutdownHandler:
-        pass
-    def get_event_creation_handler(
-        self,
-    ) -> synapse.handlers.message.EventCreationHandler:
-        pass
-    def get_set_password_handler(
-        self,
-    ) -> synapse.handlers.set_password.SetPasswordHandler:
-        pass
-    def get_federation_sender(self) -> synapse.federation.sender.FederationSender:
-        pass
-    def get_federation_transport_client(
-        self,
-    ) -> synapse.federation.transport.client.TransportLayerClient:
-        pass
-    def get_media_repository_resource(
-        self,
-    ) -> synapse.rest.media.v1.media_repository.MediaRepositoryResource:
-        pass
-    def get_media_repository(
-        self,
-    ) -> synapse.rest.media.v1.media_repository.MediaRepository:
-        pass
-    def get_server_notices_manager(
-        self,
-    ) -> synapse.server_notices.server_notices_manager.ServerNoticesManager:
-        pass
-    def get_server_notices_sender(
-        self,
-    ) -> synapse.server_notices.server_notices_sender.ServerNoticesSender:
-        pass
-    def get_notifier(self) -> synapse.notifier.Notifier:
-        pass
-    def get_presence_handler(self) -> synapse.handlers.presence.BasePresenceHandler:
-        pass
-    def get_clock(self) -> synapse.util.Clock:
-        pass
-    def get_reactor(self) -> twisted.internet.base.ReactorBase:
-        pass
-    def get_keyring(self) -> synapse.crypto.keyring.Keyring:
-        pass
-    def get_tcp_replication(
-        self,
-    ) -> synapse.replication.tcp.handler.ReplicationCommandHandler:
-        pass
-    def get_replication_data_handler(
-        self,
-    ) -> synapse.replication.tcp.client.ReplicationDataHandler:
-        pass
-    def get_federation_registry(
-        self,
-    ) -> synapse.federation.federation_server.FederationHandlerRegistry:
-        pass
-    def is_mine_id(self, domain_id: str) -> bool:
-        pass
-    def get_instance_id(self) -> str:
-        pass
-    def get_instance_name(self) -> str:
-        pass
-    def get_event_builder_factory(self) -> EventBuilderFactory:
-        pass
-    def get_storage(self) -> synapse.storage.Storage:
-        pass
-    def get_registration_handler(self) -> synapse.handlers.register.RegistrationHandler:
-        pass
-    def get_macaroon_generator(self) -> synapse.handlers.auth.MacaroonGenerator:
-        pass
-    def get_pusherpool(self) -> synapse.push.pusherpool.PusherPool:
-        pass
-    def get_replication_streams(self) -> Dict[str, Stream]:
-        pass
-    def get_http_client(
-        self,
-    ) -> synapse.http.matrixfederationclient.MatrixFederationHttpClient:
-        pass
-    def should_send_federation(self) -> bool:
-        pass
-    def get_typing_handler(self) -> FollowerTypingHandler:
-        pass
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index ca800df831..6814bf5fcf 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -58,7 +58,6 @@ class SQLBaseStore(metaclass=ABCMeta):
         """
         for host in {get_domain_from_id(u) for u in members_changed}:
             self._attempt_to_invalidate_cache("is_host_joined", (room_id, host))
-            self._attempt_to_invalidate_cache("was_host_joined", (room_id, host))
 
         self._attempt_to_invalidate_cache("get_users_in_room", (room_id,))
         self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py
index b163eebf39..4406e58273 100644
--- a/synapse/storage/databases/__init__.py
+++ b/synapse/storage/databases/__init__.py
@@ -38,9 +38,9 @@ class Databases(object):
         # store.
 
         self.databases = []
-        self.main = None
-        self.state = None
-        self.persist_events = None
+        main = None
+        state = None
+        persist_events = None
 
         for database_config in hs.config.database.databases:
             db_name = database_config.name
@@ -61,27 +61,25 @@ class Databases(object):
 
                     # Sanity check we don't try and configure the main store on
                     # multiple databases.
-                    if self.main:
+                    if main:
                         raise Exception("'main' data store already configured")
 
-                    self.main = main_store_class(database, db_conn, hs)
+                    main = main_store_class(database, db_conn, hs)
 
                     # If we're on a process that can persist events also
                     # instantiate a `PersistEventsStore`
                     if hs.config.worker.writers.events == hs.get_instance_name():
-                        self.persist_events = PersistEventsStore(
-                            hs, database, self.main
-                        )
+                        persist_events = PersistEventsStore(hs, database, main)
 
                 if "state" in database_config.databases:
                     logger.info("Starting 'state' data store")
 
                     # Sanity check we don't try and configure the state store on
                     # multiple databases.
-                    if self.state:
+                    if state:
                         raise Exception("'state' data store already configured")
 
-                    self.state = StateGroupDataStore(database, db_conn, hs)
+                    state = StateGroupDataStore(database, db_conn, hs)
 
                 db_conn.commit()
 
@@ -90,8 +88,14 @@ class Databases(object):
                 logger.info("Database %r prepared", db_name)
 
         # Sanity check that we have actually configured all the required stores.
-        if not self.main:
+        if not main:
             raise Exception("No 'main' data store configured")
 
-        if not self.state:
+        if not state:
             raise Exception("No 'main' data store configured")
+
+        # We use local variables here to ensure that the databases do not have
+        # optional types.
+        self.main = main
+        self.state = state
+        self.persist_events = persist_events
diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py
index 2193d8fdc5..82aac2bbf3 100644
--- a/synapse/storage/databases/main/account_data.py
+++ b/synapse/storage/databases/main/account_data.py
@@ -16,16 +16,16 @@
 
 import abc
 import logging
-from typing import List, Tuple
-
-from canonicaljson import json
+from typing import List, Optional, Tuple
 
 from twisted.internet import defer
 
 from synapse.storage._base import SQLBaseStore, db_to_json
 from synapse.storage.database import DatabasePool
 from synapse.storage.util.id_generators import StreamIdGenerator
-from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
+from synapse.types import JsonDict
+from synapse.util import json_encoder
+from synapse.util.caches.descriptors import _CacheContext, cached
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
 logger = logging.getLogger(__name__)
@@ -98,13 +98,15 @@ class AccountDataWorkerStore(SQLBaseStore):
             "get_account_data_for_user", get_account_data_for_user_txn
         )
 
-    @cachedInlineCallbacks(num_args=2, max_entries=5000)
-    def get_global_account_data_by_type_for_user(self, data_type, user_id):
+    @cached(num_args=2, max_entries=5000)
+    async def get_global_account_data_by_type_for_user(
+        self, data_type: str, user_id: str
+    ) -> Optional[JsonDict]:
         """
         Returns:
-            Deferred: A dict
+            The account data.
         """
-        result = yield self.db_pool.simple_select_one_onecol(
+        result = await self.db_pool.simple_select_one_onecol(
             table="account_data",
             keyvalues={"user_id": user_id, "account_data_type": data_type},
             retcol="content",
@@ -281,9 +283,11 @@ class AccountDataWorkerStore(SQLBaseStore):
             "get_updated_account_data_for_user", get_updated_account_data_for_user_txn
         )
 
-    @cachedInlineCallbacks(num_args=2, cache_context=True, max_entries=5000)
-    def is_ignored_by(self, ignored_user_id, ignorer_user_id, cache_context):
-        ignored_account_data = yield self.get_global_account_data_by_type_for_user(
+    @cached(num_args=2, cache_context=True, max_entries=5000)
+    async def is_ignored_by(
+        self, ignored_user_id: str, ignorer_user_id: str, cache_context: _CacheContext
+    ) -> bool:
+        ignored_account_data = await self.get_global_account_data_by_type_for_user(
             "m.ignored_user_list",
             ignorer_user_id,
             on_invalidate=cache_context.invalidate,
@@ -308,32 +312,35 @@ class AccountDataStore(AccountDataWorkerStore):
 
         super(AccountDataStore, self).__init__(database, db_conn, hs)
 
-    def get_max_account_data_stream_id(self):
+    def get_max_account_data_stream_id(self) -> int:
         """Get the current max stream id for the private user data stream
 
         Returns:
-            A deferred int.
+            The maximum stream ID.
         """
         return self._account_data_id_gen.get_current_token()
 
-    @defer.inlineCallbacks
-    def add_account_data_to_room(self, user_id, room_id, account_data_type, content):
+    async def add_account_data_to_room(
+        self, user_id: str, room_id: str, account_data_type: str, content: JsonDict
+    ) -> int:
         """Add some account_data to a room for a user.
+
         Args:
-            user_id(str): The user to add a tag for.
-            room_id(str): The room to add a tag for.
-            account_data_type(str): The type of account_data to add.
-            content(dict): A json object to associate with the tag.
+            user_id: The user to add a tag for.
+            room_id: The room to add a tag for.
+            account_data_type: The type of account_data to add.
+            content: A json object to associate with the tag.
+
         Returns:
-            A deferred that completes once the account_data has been added.
+            The maximum stream ID.
         """
-        content_json = json.dumps(content)
+        content_json = json_encoder.encode(content)
 
         with self._account_data_id_gen.get_next() as next_id:
             # no need to lock here as room_account_data has a unique constraint
             # on (user_id, room_id, account_data_type) so simple_upsert will
             # retry if there is a conflict.
-            yield self.db_pool.simple_upsert(
+            await self.db_pool.simple_upsert(
                 desc="add_room_account_data",
                 table="room_account_data",
                 keyvalues={
@@ -351,7 +358,7 @@ class AccountDataStore(AccountDataWorkerStore):
             # doesn't sound any worse than the whole update getting lost,
             # which is what would happen if we combined the two into one
             # transaction.
-            yield self._update_max_stream_id(next_id)
+            await self._update_max_stream_id(next_id)
 
             self._account_data_stream_cache.entity_has_changed(user_id, next_id)
             self.get_account_data_for_user.invalidate((user_id,))
@@ -360,26 +367,28 @@ class AccountDataStore(AccountDataWorkerStore):
                 (user_id, room_id, account_data_type), content
             )
 
-        result = self._account_data_id_gen.get_current_token()
-        return result
+        return self._account_data_id_gen.get_current_token()
 
-    @defer.inlineCallbacks
-    def add_account_data_for_user(self, user_id, account_data_type, content):
+    async def add_account_data_for_user(
+        self, user_id: str, account_data_type: str, content: JsonDict
+    ) -> int:
         """Add some account_data to a room for a user.
+
         Args:
-            user_id(str): The user to add a tag for.
-            account_data_type(str): The type of account_data to add.
-            content(dict): A json object to associate with the tag.
+            user_id: The user to add a tag for.
+            account_data_type: The type of account_data to add.
+            content: A json object to associate with the tag.
+
         Returns:
-            A deferred that completes once the account_data has been added.
+            The maximum stream ID.
         """
-        content_json = json.dumps(content)
+        content_json = json_encoder.encode(content)
 
         with self._account_data_id_gen.get_next() as next_id:
             # no need to lock here as account_data has a unique constraint on
             # (user_id, account_data_type) so simple_upsert will retry if
             # there is a conflict.
-            yield self.db_pool.simple_upsert(
+            await self.db_pool.simple_upsert(
                 desc="add_user_account_data",
                 table="account_data",
                 keyvalues={"user_id": user_id, "account_data_type": account_data_type},
@@ -397,7 +406,7 @@ class AccountDataStore(AccountDataWorkerStore):
             # Note: This is only here for backwards compat to allow admins to
             # roll back to a previous Synapse version. Next time we update the
             # database version we can remove this table.
-            yield self._update_max_stream_id(next_id)
+            await self._update_max_stream_id(next_id)
 
             self._account_data_stream_cache.entity_has_changed(user_id, next_id)
             self.get_account_data_for_user.invalidate((user_id,))
@@ -405,14 +414,13 @@ class AccountDataStore(AccountDataWorkerStore):
                 (account_data_type, user_id)
             )
 
-        result = self._account_data_id_gen.get_current_token()
-        return result
+        return self._account_data_id_gen.get_current_token()
 
-    def _update_max_stream_id(self, next_id):
+    def _update_max_stream_id(self, next_id: int):
         """Update the max stream_id
 
         Args:
-            next_id(int): The the revision to advance to.
+            next_id: The the revision to advance to.
         """
 
         # Note: This is only here for backwards compat to allow admins to
diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py
index 055a3962dc..5cf1a88399 100644
--- a/synapse/storage/databases/main/appservice.py
+++ b/synapse/storage/databases/main/appservice.py
@@ -18,8 +18,6 @@ import re
 
 from canonicaljson import json
 
-from twisted.internet import defer
-
 from synapse.appservice import AppServiceTransaction
 from synapse.config.appservice import load_appservices
 from synapse.storage._base import SQLBaseStore, db_to_json
@@ -124,17 +122,15 @@ class ApplicationServiceStore(ApplicationServiceWorkerStore):
 class ApplicationServiceTransactionWorkerStore(
     ApplicationServiceWorkerStore, EventsWorkerStore
 ):
-    @defer.inlineCallbacks
-    def get_appservices_by_state(self, state):
+    async def get_appservices_by_state(self, state):
         """Get a list of application services based on their state.
 
         Args:
             state(ApplicationServiceState): The state to filter on.
         Returns:
-            A Deferred which resolves to a list of ApplicationServices, which
-            may be empty.
+            A list of ApplicationServices, which may be empty.
         """
-        results = yield self.db_pool.simple_select_list(
+        results = await self.db_pool.simple_select_list(
             "application_services_state", {"state": state}, ["as_id"]
         )
         # NB: This assumes this class is linked with ApplicationServiceStore
@@ -147,16 +143,15 @@ class ApplicationServiceTransactionWorkerStore(
                     services.append(service)
         return services
 
-    @defer.inlineCallbacks
-    def get_appservice_state(self, service):
+    async def get_appservice_state(self, service):
         """Get the application service state.
 
         Args:
             service(ApplicationService): The service whose state to set.
         Returns:
-            A Deferred which resolves to ApplicationServiceState.
+            An ApplicationServiceState.
         """
-        result = yield self.db_pool.simple_select_one(
+        result = await self.db_pool.simple_select_one(
             "application_services_state",
             {"as_id": service.id},
             ["state"],
@@ -270,16 +265,14 @@ class ApplicationServiceTransactionWorkerStore(
             "complete_appservice_txn", _complete_appservice_txn
         )
 
-    @defer.inlineCallbacks
-    def get_oldest_unsent_txn(self, service):
+    async def get_oldest_unsent_txn(self, service):
         """Get the oldest transaction which has not been sent for this
         service.
 
         Args:
             service(ApplicationService): The app service to get the oldest txn.
         Returns:
-            A Deferred which resolves to an AppServiceTransaction or
-            None.
+            An AppServiceTransaction or None.
         """
 
         def _get_oldest_unsent_txn(txn):
@@ -298,7 +291,7 @@ class ApplicationServiceTransactionWorkerStore(
 
             return entry
 
-        entry = yield self.db_pool.runInteraction(
+        entry = await self.db_pool.runInteraction(
             "get_oldest_unsent_appservice_txn", _get_oldest_unsent_txn
         )
 
@@ -307,7 +300,7 @@ class ApplicationServiceTransactionWorkerStore(
 
         event_ids = db_to_json(entry["event_ids"])
 
-        events = yield self.get_events_as_list(event_ids)
+        events = await self.get_events_as_list(event_ids)
 
         return AppServiceTransaction(service=service, id=entry["txn_id"], events=events)
 
@@ -332,8 +325,7 @@ class ApplicationServiceTransactionWorkerStore(
             "set_appservice_last_pos", set_appservice_last_pos_txn
         )
 
-    @defer.inlineCallbacks
-    def get_new_events_for_appservice(self, current_id, limit):
+    async def get_new_events_for_appservice(self, current_id, limit):
         """Get all new evnets"""
 
         def get_new_events_for_appservice_txn(txn):
@@ -357,11 +349,11 @@ class ApplicationServiceTransactionWorkerStore(
 
             return upper_bound, [row[1] for row in rows]
 
-        upper_bound, event_ids = yield self.db_pool.runInteraction(
+        upper_bound, event_ids = await self.db_pool.runInteraction(
             "get_new_events_for_appservice", get_new_events_for_appservice_txn
         )
 
-        events = yield self.get_events_as_list(event_ids)
+        events = await self.get_events_as_list(event_ids)
 
         return upper_bound, events
 
diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
index 683afde52b..10de446065 100644
--- a/synapse/storage/databases/main/cache.py
+++ b/synapse/storage/databases/main/cache.py
@@ -172,7 +172,6 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
 
         self.get_latest_event_ids_in_room.invalidate((room_id,))
 
-        self.get_unread_message_count_for_user.invalidate_many((room_id,))
         self.get_unread_event_push_actions_by_room_for_user.invalidate_many((room_id,))
 
         if not backfilled:
diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py
index 1de8249563..f211ddbaf8 100644
--- a/synapse/storage/databases/main/censor_events.py
+++ b/synapse/storage/databases/main/censor_events.py
@@ -16,8 +16,6 @@
 import logging
 from typing import TYPE_CHECKING
 
-from twisted.internet import defer
-
 from synapse.events.utils import prune_event_dict
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore
@@ -148,17 +146,16 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
             updatevalues={"json": pruned_json},
         )
 
-    @defer.inlineCallbacks
-    def expire_event(self, event_id):
+    async def expire_event(self, event_id: str) -> None:
         """Retrieve and expire an event that has expired, and delete its associated
         expiry timestamp. If the event can't be retrieved, delete its associated
         timestamp so we don't try to expire it again in the future.
 
         Args:
-             event_id (str): The ID of the event to delete.
+             event_id: The ID of the event to delete.
         """
         # Try to retrieve the event's content from the database or the event cache.
-        event = yield self.get_event(event_id)
+        event = await self.get_event(event_id)
 
         def delete_expired_event_txn(txn):
             # Delete the expiry timestamp associated with this event from the database.
@@ -193,7 +190,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
                 txn, "_get_event_cache", (event.event_id,)
             )
 
-        yield self.db_pool.runInteraction(
+        await self.db_pool.runInteraction(
             "delete_expired_event", delete_expired_event_txn
         )
 
diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
index 712c8d0264..216a5925fc 100644
--- a/synapse/storage/databases/main/client_ips.py
+++ b/synapse/storage/databases/main/client_ips.py
@@ -14,8 +14,7 @@
 # limitations under the License.
 
 import logging
-
-from twisted.internet import defer
+from typing import Dict, Optional, Tuple
 
 from synapse.metrics.background_process_metrics import wrap_as_background_process
 from synapse.storage._base import SQLBaseStore
@@ -82,21 +81,19 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
             "devices_last_seen", self._devices_last_seen_update
         )
 
-    @defer.inlineCallbacks
-    def _remove_user_ip_nonunique(self, progress, batch_size):
+    async def _remove_user_ip_nonunique(self, progress, batch_size):
         def f(conn):
             txn = conn.cursor()
             txn.execute("DROP INDEX IF EXISTS user_ips_user_ip")
             txn.close()
 
-        yield self.db_pool.runWithConnection(f)
-        yield self.db_pool.updates._end_background_update(
+        await self.db_pool.runWithConnection(f)
+        await self.db_pool.updates._end_background_update(
             "user_ips_drop_nonunique_index"
         )
         return 1
 
-    @defer.inlineCallbacks
-    def _analyze_user_ip(self, progress, batch_size):
+    async def _analyze_user_ip(self, progress, batch_size):
         # Background update to analyze user_ips table before we run the
         # deduplication background update. The table may not have been analyzed
         # for ages due to the table locks.
@@ -106,14 +103,13 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
         def user_ips_analyze(txn):
             txn.execute("ANALYZE user_ips")
 
-        yield self.db_pool.runInteraction("user_ips_analyze", user_ips_analyze)
+        await self.db_pool.runInteraction("user_ips_analyze", user_ips_analyze)
 
-        yield self.db_pool.updates._end_background_update("user_ips_analyze")
+        await self.db_pool.updates._end_background_update("user_ips_analyze")
 
         return 1
 
-    @defer.inlineCallbacks
-    def _remove_user_ip_dupes(self, progress, batch_size):
+    async def _remove_user_ip_dupes(self, progress, batch_size):
         # This works function works by scanning the user_ips table in batches
         # based on `last_seen`. For each row in a batch it searches the rest of
         # the table to see if there are any duplicates, if there are then they
@@ -140,7 +136,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
                 return None
 
         # Get a last seen that has roughly `batch_size` since `begin_last_seen`
-        end_last_seen = yield self.db_pool.runInteraction(
+        end_last_seen = await self.db_pool.runInteraction(
             "user_ips_dups_get_last_seen", get_last_seen
         )
 
@@ -275,15 +271,14 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
                 txn, "user_ips_remove_dupes", {"last_seen": end_last_seen}
             )
 
-        yield self.db_pool.runInteraction("user_ips_dups_remove", remove)
+        await self.db_pool.runInteraction("user_ips_dups_remove", remove)
 
         if last:
-            yield self.db_pool.updates._end_background_update("user_ips_remove_dupes")
+            await self.db_pool.updates._end_background_update("user_ips_remove_dupes")
 
         return batch_size
 
-    @defer.inlineCallbacks
-    def _devices_last_seen_update(self, progress, batch_size):
+    async def _devices_last_seen_update(self, progress, batch_size):
         """Background update to insert last seen info into devices table
         """
 
@@ -346,12 +341,12 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
 
             return len(rows)
 
-        updated = yield self.db_pool.runInteraction(
+        updated = await self.db_pool.runInteraction(
             "_devices_last_seen_update", _devices_last_seen_update_txn
         )
 
         if not updated:
-            yield self.db_pool.updates._end_background_update("devices_last_seen")
+            await self.db_pool.updates._end_background_update("devices_last_seen")
 
         return updated
 
@@ -380,8 +375,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
         if self.user_ips_max_age:
             self._clock.looping_call(self._prune_old_user_ips, 5 * 1000)
 
-    @defer.inlineCallbacks
-    def insert_client_ip(
+    async def insert_client_ip(
         self, user_id, access_token, ip, user_agent, device_id, now=None
     ):
         if not now:
@@ -392,7 +386,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
             last_seen = self.client_ip_last_seen.get(key)
         except KeyError:
             last_seen = None
-        yield self.populate_monthly_active_users(user_id)
+        await self.populate_monthly_active_users(user_id)
         # Rate-limited inserts
         if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
             return
@@ -461,25 +455,25 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
                 # Failed to upsert, log and continue
                 logger.error("Failed to insert client IP %r: %r", entry, e)
 
-    @defer.inlineCallbacks
-    def get_last_client_ip_by_device(self, user_id, device_id):
+    async def get_last_client_ip_by_device(
+        self, user_id: str, device_id: Optional[str]
+    ) -> Dict[Tuple[str, str], dict]:
         """For each device_id listed, give the user_ip it was last seen on
 
         Args:
-            user_id (str)
-            device_id (str): If None fetches all devices for the user
+            user_id: The user to fetch devices for.
+            device_id: If None fetches all devices for the user
 
         Returns:
-            defer.Deferred: resolves to a dict, where the keys
-            are (user_id, device_id) tuples. The values are also dicts, with
-            keys giving the column names
+            A dictionary mapping a tuple of (user_id, device_id) to dicts, with
+            keys giving the column names from the devices table.
         """
 
         keyvalues = {"user_id": user_id}
         if device_id is not None:
             keyvalues["device_id"] = device_id
 
-        res = yield self.db_pool.simple_select_list(
+        res = await self.db_pool.simple_select_list(
             table="devices",
             keyvalues=keyvalues,
             retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"),
@@ -501,8 +495,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
                     }
         return ret
 
-    @defer.inlineCallbacks
-    def get_user_ip_and_agents(self, user):
+    async def get_user_ip_and_agents(self, user):
         user_id = user.to_string()
         results = {}
 
@@ -512,7 +505,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
                 user_agent, _, last_seen = self._batch_row_update[key]
                 results[(access_token, ip)] = (user_agent, last_seen)
 
-        rows = yield self.db_pool.simple_select_list(
+        rows = await self.db_pool.simple_select_list(
             table="user_ips",
             keyvalues={"user_id": user_id},
             retcols=["access_token", "ip", "user_agent", "last_seen"],
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 874ecdf8d2..1f6e995c4f 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -16,13 +16,10 @@
 import logging
 from typing import List, Tuple
 
-from canonicaljson import json
-
-from twisted.internet import defer
-
 from synapse.logging.opentracing import log_kv, set_tag, trace
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
 from synapse.storage.database import DatabasePool
+from synapse.util import json_encoder
 from synapse.util.caches.expiringcache import ExpiringCache
 
 logger = logging.getLogger(__name__)
@@ -32,24 +29,31 @@ class DeviceInboxWorkerStore(SQLBaseStore):
     def get_to_device_stream_token(self):
         return self._device_inbox_id_gen.get_current_token()
 
-    def get_new_messages_for_device(
-        self, user_id, device_id, last_stream_id, current_stream_id, limit=100
-    ):
+    async def get_new_messages_for_device(
+        self,
+        user_id: str,
+        device_id: str,
+        last_stream_id: int,
+        current_stream_id: int,
+        limit: int = 100,
+    ) -> Tuple[List[dict], int]:
         """
         Args:
-            user_id(str): The recipient user_id.
-            device_id(str): The recipient device_id.
-            current_stream_id(int): The current position of the to device
+            user_id: The recipient user_id.
+            device_id: The recipient device_id.
+            last_stream_id: The last stream ID checked.
+            current_stream_id: The current position of the to device
                 message stream.
+            limit: The maximum number of messages to retrieve.
+
         Returns:
-            Deferred ([dict], int): List of messages for the device and where
-                in the stream the messages got to.
+            A list of messages for the device and where in the stream the messages got to.
         """
         has_changed = self._device_inbox_stream_cache.has_entity_changed(
             user_id, last_stream_id
         )
         if not has_changed:
-            return defer.succeed(([], current_stream_id))
+            return ([], current_stream_id)
 
         def get_new_messages_for_device_txn(txn):
             sql = (
@@ -70,20 +74,22 @@ class DeviceInboxWorkerStore(SQLBaseStore):
                 stream_pos = current_stream_id
             return messages, stream_pos
 
-        return self.db_pool.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_new_messages_for_device", get_new_messages_for_device_txn
         )
 
     @trace
-    @defer.inlineCallbacks
-    def delete_messages_for_device(self, user_id, device_id, up_to_stream_id):
+    async def delete_messages_for_device(
+        self, user_id: str, device_id: str, up_to_stream_id: int
+    ) -> int:
         """
         Args:
-            user_id(str): The recipient user_id.
-            device_id(str): The recipient device_id.
-            up_to_stream_id(int): Where to delete messages up to.
+            user_id: The recipient user_id.
+            device_id: The recipient device_id.
+            up_to_stream_id: Where to delete messages up to.
+
         Returns:
-            A deferred that resolves to the number of messages deleted.
+            The number of messages deleted.
         """
         # If we have cached the last stream id we've deleted up to, we can
         # check if there is likely to be anything that needs deleting
@@ -110,7 +116,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
             txn.execute(sql, (user_id, device_id, up_to_stream_id))
             return txn.rowcount
 
-        count = yield self.db_pool.runInteraction(
+        count = await self.db_pool.runInteraction(
             "delete_messages_for_device", delete_messages_for_device_txn
         )
 
@@ -129,9 +135,9 @@ class DeviceInboxWorkerStore(SQLBaseStore):
         return count
 
     @trace
-    def get_new_device_msgs_for_remote(
+    async def get_new_device_msgs_for_remote(
         self, destination, last_stream_id, current_stream_id, limit
-    ):
+    ) -> Tuple[List[dict], int]:
         """
         Args:
             destination(str): The name of the remote server.
@@ -140,8 +146,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
             current_stream_id(int|long): The current position of the device
                 message stream.
         Returns:
-            Deferred ([dict], int|long): List of messages for the device and where
-                in the stream the messages got to.
+            A list of messages for the device and where in the stream the messages got to.
         """
 
         set_tag("destination", destination)
@@ -154,11 +159,11 @@ class DeviceInboxWorkerStore(SQLBaseStore):
         )
         if not has_changed or last_stream_id == current_stream_id:
             log_kv({"message": "No new messages in stream"})
-            return defer.succeed(([], current_stream_id))
+            return ([], current_stream_id)
 
         if limit <= 0:
             # This can happen if we run out of room for EDUs in the transaction.
-            return defer.succeed(([], last_stream_id))
+            return ([], last_stream_id)
 
         @trace
         def get_new_messages_for_remote_destination_txn(txn):
@@ -179,7 +184,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
                 stream_pos = current_stream_id
             return messages, stream_pos
 
-        return self.db_pool.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_new_device_msgs_for_remote",
             get_new_messages_for_remote_destination_txn,
         )
@@ -291,16 +296,15 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
             self.DEVICE_INBOX_STREAM_ID, self._background_drop_index_device_inbox
         )
 
-    @defer.inlineCallbacks
-    def _background_drop_index_device_inbox(self, progress, batch_size):
+    async def _background_drop_index_device_inbox(self, progress, batch_size):
         def reindex_txn(conn):
             txn = conn.cursor()
             txn.execute("DROP INDEX IF EXISTS device_inbox_stream_id")
             txn.close()
 
-        yield self.db_pool.runWithConnection(reindex_txn)
+        await self.db_pool.runWithConnection(reindex_txn)
 
-        yield self.db_pool.updates._end_background_update(self.DEVICE_INBOX_STREAM_ID)
+        await self.db_pool.updates._end_background_update(self.DEVICE_INBOX_STREAM_ID)
 
         return 1
 
@@ -321,21 +325,21 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
         )
 
     @trace
-    @defer.inlineCallbacks
-    def add_messages_to_device_inbox(
-        self, local_messages_by_user_then_device, remote_messages_by_destination
-    ):
+    async def add_messages_to_device_inbox(
+        self,
+        local_messages_by_user_then_device: dict,
+        remote_messages_by_destination: dict,
+    ) -> int:
         """Used to send messages from this server.
 
         Args:
-            sender_user_id(str): The ID of the user sending these messages.
-            local_messages_by_user_and_device(dict):
+            local_messages_by_user_and_device:
                 Dictionary of user_id to device_id to message.
-            remote_messages_by_destination(dict):
+            remote_messages_by_destination:
                 Dictionary of destination server_name to the EDU JSON to send.
+
         Returns:
-            A deferred stream_id that resolves when the messages have been
-            inserted.
+            The new stream_id.
         """
 
         def add_messages_txn(txn, now_ms, stream_id):
@@ -354,13 +358,13 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
             )
             rows = []
             for destination, edu in remote_messages_by_destination.items():
-                edu_json = json.dumps(edu)
+                edu_json = json_encoder.encode(edu)
                 rows.append((destination, stream_id, now_ms, edu_json))
             txn.executemany(sql, rows)
 
         with self._device_inbox_id_gen.get_next() as stream_id:
             now_ms = self.clock.time_msec()
-            yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 "add_messages_to_device_inbox", add_messages_txn, now_ms, stream_id
             )
             for user_id in local_messages_by_user_then_device.keys():
@@ -372,10 +376,9 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
 
         return self._device_inbox_id_gen.get_current_token()
 
-    @defer.inlineCallbacks
-    def add_messages_from_remote_to_device_inbox(
-        self, origin, message_id, local_messages_by_user_then_device
-    ):
+    async def add_messages_from_remote_to_device_inbox(
+        self, origin: str, message_id: str, local_messages_by_user_then_device: dict
+    ) -> int:
         def add_messages_txn(txn, now_ms, stream_id):
             # Check if we've already inserted a matching message_id for that
             # origin. This can happen if the origin doesn't receive our
@@ -410,7 +413,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
 
         with self._device_inbox_id_gen.get_next() as stream_id:
             now_ms = self.clock.time_msec()
-            yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 "add_messages_from_remote_to_device_inbox",
                 add_messages_txn,
                 now_ms,
@@ -432,7 +435,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
                 # Handle wildcard device_ids.
                 sql = "SELECT device_id FROM devices WHERE user_id = ?"
                 txn.execute(sql, (user_id,))
-                message_json = json.dumps(messages_by_device["*"])
+                message_json = json_encoder.encode(messages_by_device["*"])
                 for row in txn:
                     # Add the message for all devices for this user on this
                     # server.
@@ -454,7 +457,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
                     # Only insert into the local inbox if the device exists on
                     # this server
                     device = row[0]
-                    message_json = json.dumps(messages_by_device[device])
+                    message_json = json_encoder.encode(messages_by_device[device])
                     messages_json_for_user[device] = message_json
 
             if messages_json_for_user:
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index d6c6f0ac34..f9385a2c83 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -15,11 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
-from typing import List, Optional, Set, Tuple
-
-from canonicaljson import json
-
-from twisted.internet import defer
+from typing import Dict, Iterable, List, Optional, Set, Tuple
 
 from synapse.api.errors import Codes, StoreError
 from synapse.logging.opentracing import (
@@ -36,6 +32,7 @@ from synapse.storage.database import (
     make_tuple_comparison_clause,
 )
 from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key
+from synapse.util import json_encoder
 from synapse.util.caches.descriptors import (
     Cache,
     cached,
@@ -55,13 +52,13 @@ BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES = "remove_dup_outbound_pokes"
 
 
 class DeviceWorkerStore(SQLBaseStore):
-    def get_device(self, user_id, device_id):
+    def get_device(self, user_id: str, device_id: str):
         """Retrieve a device. Only returns devices that are not marked as
         hidden.
 
         Args:
-            user_id (str): The ID of the user which owns the device
-            device_id (str): The ID of the device to retrieve
+            user_id: The ID of the user which owns the device
+            device_id: The ID of the device to retrieve
         Returns:
             defer.Deferred for a dict containing the device information
         Raises:
@@ -74,19 +71,17 @@ class DeviceWorkerStore(SQLBaseStore):
             desc="get_device",
         )
 
-    @defer.inlineCallbacks
-    def get_devices_by_user(self, user_id):
+    async def get_devices_by_user(self, user_id: str) -> Dict[str, Dict[str, str]]:
         """Retrieve all of a user's registered devices. Only returns devices
         that are not marked as hidden.
 
         Args:
-            user_id (str):
+            user_id:
         Returns:
-            defer.Deferred: resolves to a dict from device_id to a dict
-            containing "device_id", "user_id" and "display_name" for each
-            device.
+            A mapping from device_id to a dict containing "device_id", "user_id"
+            and "display_name" for each device.
         """
-        devices = yield self.db_pool.simple_select_list(
+        devices = await self.db_pool.simple_select_list(
             table="devices",
             keyvalues={"user_id": user_id, "hidden": False},
             retcols=("user_id", "device_id", "display_name"),
@@ -96,19 +91,20 @@ class DeviceWorkerStore(SQLBaseStore):
         return {d["device_id"]: d for d in devices}
 
     @trace
-    @defer.inlineCallbacks
-    def get_device_updates_by_remote(self, destination, from_stream_id, limit):
+    async def get_device_updates_by_remote(
+        self, destination: str, from_stream_id: int, limit: int
+    ) -> Tuple[int, List[Tuple[str, dict]]]:
         """Get a stream of device updates to send to the given remote server.
 
         Args:
-            destination (str): The host the device updates are intended for
-            from_stream_id (int): The minimum stream_id to filter updates by, exclusive
-            limit (int): Maximum number of device updates to return
+            destination: The host the device updates are intended for
+            from_stream_id: The minimum stream_id to filter updates by, exclusive
+            limit: Maximum number of device updates to return
+
         Returns:
-            Deferred[tuple[int, list[tuple[string,dict]]]]:
-                current stream id (ie, the stream id of the last update included in the
-                response), and the list of updates, where each update is a pair of EDU
-                type and EDU contents
+            A mapping from the  current stream id (ie, the stream id of the last
+            update included in the response), and the list of updates, where
+            each update is a pair of EDU type and EDU contents.
         """
         now_stream_id = self._device_list_id_gen.get_current_token()
 
@@ -118,7 +114,7 @@ class DeviceWorkerStore(SQLBaseStore):
         if not has_changed:
             return now_stream_id, []
 
-        updates = yield self.db_pool.runInteraction(
+        updates = await self.db_pool.runInteraction(
             "get_device_updates_by_remote",
             self._get_device_updates_by_remote_txn,
             destination,
@@ -137,7 +133,7 @@ class DeviceWorkerStore(SQLBaseStore):
         master_key_by_user = {}
         self_signing_key_by_user = {}
         for user in users:
-            cross_signing_key = yield self.get_e2e_cross_signing_key(user, "master")
+            cross_signing_key = await self.get_e2e_cross_signing_key(user, "master")
             if cross_signing_key:
                 key_id, verify_key = get_verify_key_from_cross_signing_key(
                     cross_signing_key
@@ -150,7 +146,7 @@ class DeviceWorkerStore(SQLBaseStore):
                     "device_id": verify_key.version,
                 }
 
-            cross_signing_key = yield self.get_e2e_cross_signing_key(
+            cross_signing_key = await self.get_e2e_cross_signing_key(
                 user, "self_signing"
             )
             if cross_signing_key:
@@ -201,7 +197,7 @@ class DeviceWorkerStore(SQLBaseStore):
                 if update_stream_id > previous_update_stream_id:
                     query_map[key] = (update_stream_id, update_context)
 
-        results = yield self._get_device_update_edus_by_remote(
+        results = await self._get_device_update_edus_by_remote(
             destination, from_stream_id, query_map
         )
 
@@ -214,16 +210,21 @@ class DeviceWorkerStore(SQLBaseStore):
         return now_stream_id, results
 
     def _get_device_updates_by_remote_txn(
-        self, txn, destination, from_stream_id, now_stream_id, limit
+        self,
+        txn: LoggingTransaction,
+        destination: str,
+        from_stream_id: int,
+        now_stream_id: int,
+        limit: int,
     ):
         """Return device update information for a given remote destination
 
         Args:
-            txn (LoggingTransaction): The transaction to execute
-            destination (str): The host the device updates are intended for
-            from_stream_id (int): The minimum stream_id to filter updates by, exclusive
-            now_stream_id (int): The maximum stream_id to filter updates by, inclusive
-            limit (int): Maximum number of device updates to return
+            txn: The transaction to execute
+            destination: The host the device updates are intended for
+            from_stream_id: The minimum stream_id to filter updates by, exclusive
+            now_stream_id: The maximum stream_id to filter updates by, inclusive
+            limit: Maximum number of device updates to return
 
         Returns:
             List: List of device updates
@@ -239,23 +240,26 @@ class DeviceWorkerStore(SQLBaseStore):
 
         return list(txn)
 
-    @defer.inlineCallbacks
-    def _get_device_update_edus_by_remote(self, destination, from_stream_id, query_map):
+    async def _get_device_update_edus_by_remote(
+        self,
+        destination: str,
+        from_stream_id: int,
+        query_map: Dict[Tuple[str, str], Tuple[int, Optional[str]]],
+    ) -> List[Tuple[str, dict]]:
         """Returns a list of device update EDUs as well as E2EE keys
 
         Args:
-            destination (str): The host the device updates are intended for
-            from_stream_id (int): The minimum stream_id to filter updates by, exclusive
+            destination: The host the device updates are intended for
+            from_stream_id: The minimum stream_id to filter updates by, exclusive
             query_map (Dict[(str, str): (int, str|None)]): Dictionary mapping
-                user_id/device_id to update stream_id and the relevent json-encoded
+                user_id/device_id to update stream_id and the relevant json-encoded
                 opentracing context
 
         Returns:
-            List[Dict]: List of objects representing an device update EDU
-
+            List of objects representing an device update EDU
         """
         devices = (
-            yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 "_get_e2e_device_keys_txn",
                 self._get_e2e_device_keys_txn,
                 query_map.keys(),
@@ -270,7 +274,7 @@ class DeviceWorkerStore(SQLBaseStore):
         for user_id, user_devices in devices.items():
             # The prev_id for the first row is always the last row before
             # `from_stream_id`
-            prev_id = yield self._get_last_device_update_for_remote_user(
+            prev_id = await self._get_last_device_update_for_remote_user(
                 destination, user_id, from_stream_id
             )
 
@@ -314,7 +318,7 @@ class DeviceWorkerStore(SQLBaseStore):
         return results
 
     def _get_last_device_update_for_remote_user(
-        self, destination, user_id, from_stream_id
+        self, destination: str, user_id: str, from_stream_id: int
     ):
         def f(txn):
             prev_sent_id_sql = """
@@ -328,7 +332,7 @@ class DeviceWorkerStore(SQLBaseStore):
 
         return self.db_pool.runInteraction("get_last_device_update_for_remote_user", f)
 
-    def mark_as_sent_devices_by_remote(self, destination, stream_id):
+    def mark_as_sent_devices_by_remote(self, destination: str, stream_id: int):
         """Mark that updates have successfully been sent to the destination.
         """
         return self.db_pool.runInteraction(
@@ -338,7 +342,9 @@ class DeviceWorkerStore(SQLBaseStore):
             stream_id,
         )
 
-    def _mark_as_sent_devices_by_remote_txn(self, txn, destination, stream_id):
+    def _mark_as_sent_devices_by_remote_txn(
+        self, txn: LoggingTransaction, destination: str, stream_id: int
+    ) -> None:
         # We update the device_lists_outbound_last_success with the successfully
         # poked users.
         sql = """
@@ -366,17 +372,21 @@ class DeviceWorkerStore(SQLBaseStore):
         """
         txn.execute(sql, (destination, stream_id))
 
-    @defer.inlineCallbacks
-    def add_user_signature_change_to_streams(self, from_user_id, user_ids):
+    async def add_user_signature_change_to_streams(
+        self, from_user_id: str, user_ids: List[str]
+    ) -> int:
         """Persist that a user has made new signatures
 
         Args:
-            from_user_id (str): the user who made the signatures
-            user_ids (list[str]): the users who were signed
+            from_user_id: the user who made the signatures
+            user_ids: the users who were signed
+
+        Returns:
+            THe new stream ID.
         """
 
         with self._device_list_id_gen.get_next() as stream_id:
-            yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 "add_user_sig_change_to_streams",
                 self._add_user_signature_change_txn,
                 from_user_id,
@@ -385,7 +395,13 @@ class DeviceWorkerStore(SQLBaseStore):
             )
         return stream_id
 
-    def _add_user_signature_change_txn(self, txn, from_user_id, user_ids, stream_id):
+    def _add_user_signature_change_txn(
+        self,
+        txn: LoggingTransaction,
+        from_user_id: str,
+        user_ids: List[str],
+        stream_id: int,
+    ) -> None:
         txn.call_after(
             self._user_signature_stream_cache.entity_has_changed,
             from_user_id,
@@ -397,33 +413,34 @@ class DeviceWorkerStore(SQLBaseStore):
             values={
                 "stream_id": stream_id,
                 "from_user_id": from_user_id,
-                "user_ids": json.dumps(user_ids),
+                "user_ids": json_encoder.encode(user_ids),
             },
         )
 
-    def get_device_stream_token(self):
+    def get_device_stream_token(self) -> int:
         return self._device_list_id_gen.get_current_token()
 
     @trace
-    @defer.inlineCallbacks
-    def get_user_devices_from_cache(self, query_list):
+    async def get_user_devices_from_cache(
+        self, query_list: List[Tuple[str, str]]
+    ) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]:
         """Get the devices (and keys if any) for remote users from the cache.
 
         Args:
-            query_list(list): List of (user_id, device_ids), if device_ids is
+            query_list: List of (user_id, device_ids), if device_ids is
                 falsey then return all device ids for that user.
 
         Returns:
-            (user_ids_not_in_cache, results_map), where user_ids_not_in_cache is
-            a set of user_ids and results_map is a mapping of
-            user_id -> device_id -> device_info
+            A tuple of (user_ids_not_in_cache, results_map), where
+            user_ids_not_in_cache is a set of user_ids and results_map is a
+            mapping of user_id -> device_id -> device_info.
         """
         user_ids = {user_id for user_id, _ in query_list}
-        user_map = yield self.get_device_list_last_stream_id_for_remotes(list(user_ids))
+        user_map = await self.get_device_list_last_stream_id_for_remotes(list(user_ids))
 
         # We go and check if any of the users need to have their device lists
         # resynced. If they do then we remove them from the cached list.
-        users_needing_resync = yield self.get_user_ids_requiring_device_list_resync(
+        users_needing_resync = await self.get_user_ids_requiring_device_list_resync(
             user_ids
         )
         user_ids_in_cache = {
@@ -437,19 +454,19 @@ class DeviceWorkerStore(SQLBaseStore):
                 continue
 
             if device_id:
-                device = yield self._get_cached_user_device(user_id, device_id)
+                device = await self._get_cached_user_device(user_id, device_id)
                 results.setdefault(user_id, {})[device_id] = device
             else:
-                results[user_id] = yield self.get_cached_devices_for_user(user_id)
+                results[user_id] = await self.get_cached_devices_for_user(user_id)
 
         set_tag("in_cache", results)
         set_tag("not_in_cache", user_ids_not_in_cache)
 
         return user_ids_not_in_cache, results
 
-    @cachedInlineCallbacks(num_args=2, tree=True)
-    def _get_cached_user_device(self, user_id, device_id):
-        content = yield self.db_pool.simple_select_one_onecol(
+    @cached(num_args=2, tree=True)
+    async def _get_cached_user_device(self, user_id: str, device_id: str) -> JsonDict:
+        content = await self.db_pool.simple_select_one_onecol(
             table="device_lists_remote_cache",
             keyvalues={"user_id": user_id, "device_id": device_id},
             retcol="content",
@@ -457,9 +474,9 @@ class DeviceWorkerStore(SQLBaseStore):
         )
         return db_to_json(content)
 
-    @cachedInlineCallbacks()
-    def get_cached_devices_for_user(self, user_id):
-        devices = yield self.db_pool.simple_select_list(
+    @cached()
+    async def get_cached_devices_for_user(self, user_id: str) -> Dict[str, JsonDict]:
+        devices = await self.db_pool.simple_select_list(
             table="device_lists_remote_cache",
             keyvalues={"user_id": user_id},
             retcols=("device_id", "content"),
@@ -469,11 +486,11 @@ class DeviceWorkerStore(SQLBaseStore):
             device["device_id"]: db_to_json(device["content"]) for device in devices
         }
 
-    def get_devices_with_keys_by_user(self, user_id):
+    def get_devices_with_keys_by_user(self, user_id: str):
         """Get all devices (with any device keys) for a user
 
         Returns:
-            (stream_id, devices)
+            Deferred which resolves to (stream_id, devices)
         """
         return self.db_pool.runInteraction(
             "get_devices_with_keys_by_user",
@@ -481,7 +498,9 @@ class DeviceWorkerStore(SQLBaseStore):
             user_id,
         )
 
-    def _get_devices_with_keys_by_user_txn(self, txn, user_id):
+    def _get_devices_with_keys_by_user_txn(
+        self, txn: LoggingTransaction, user_id: str
+    ) -> Tuple[int, List[JsonDict]]:
         now_stream_id = self._device_list_id_gen.get_current_token()
 
         devices = self._get_e2e_device_keys_txn(
@@ -514,17 +533,18 @@ class DeviceWorkerStore(SQLBaseStore):
 
         return now_stream_id, []
 
-    def get_users_whose_devices_changed(self, from_key, user_ids):
+    async def get_users_whose_devices_changed(
+        self, from_key: str, user_ids: Iterable[str]
+    ) -> Set[str]:
         """Get set of users whose devices have changed since `from_key` that
         are in the given list of user_ids.
 
         Args:
-            from_key (str): The device lists stream token
-            user_ids (Iterable[str])
+            from_key: The device lists stream token
+            user_ids: The user IDs to query for devices.
 
         Returns:
-            Deferred[set[str]]: The set of user_ids whose devices have changed
-            since `from_key`
+            The set of user_ids whose devices have changed since `from_key`
         """
         from_key = int(from_key)
 
@@ -535,7 +555,7 @@ class DeviceWorkerStore(SQLBaseStore):
         )
 
         if not to_check:
-            return defer.succeed(set())
+            return set()
 
         def _get_users_whose_devices_changed_txn(txn):
             changes = set()
@@ -555,18 +575,22 @@ class DeviceWorkerStore(SQLBaseStore):
 
             return changes
 
-        return self.db_pool.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_users_whose_devices_changed", _get_users_whose_devices_changed_txn
         )
 
-    @defer.inlineCallbacks
-    def get_users_whose_signatures_changed(self, user_id, from_key):
+    async def get_users_whose_signatures_changed(
+        self, user_id: str, from_key: str
+    ) -> Set[str]:
         """Get the users who have new cross-signing signatures made by `user_id` since
         `from_key`.
 
         Args:
-            user_id (str): the user who made the signatures
-            from_key (str): The device lists stream token
+            user_id: the user who made the signatures
+            from_key: The device lists stream token
+
+        Returns:
+            A set of user IDs with updated signatures.
         """
         from_key = int(from_key)
         if self._user_signature_stream_cache.has_entity_changed(user_id, from_key):
@@ -574,7 +598,7 @@ class DeviceWorkerStore(SQLBaseStore):
                 SELECT DISTINCT user_ids FROM user_signature_stream
                 WHERE from_user_id = ? AND stream_id > ?
             """
-            rows = yield self.db_pool.execute(
+            rows = await self.db_pool.execute(
                 "get_users_whose_signatures_changed", None, sql, user_id, from_key
             )
             return {user for row in rows for user in db_to_json(row[0])}
@@ -600,7 +624,7 @@ class DeviceWorkerStore(SQLBaseStore):
             between the requested tokens due to the limit.
 
             The token returned can be used in a subsequent call to this
-            function to get further updatees.
+            function to get further updates.
 
             The updates are a list of 2-tuples of stream ID and the row data
         """
@@ -637,7 +661,7 @@ class DeviceWorkerStore(SQLBaseStore):
         )
 
     @cached(max_entries=10000)
-    def get_device_list_last_stream_id_for_remote(self, user_id):
+    def get_device_list_last_stream_id_for_remote(self, user_id: str):
         """Get the last stream_id we got for a user. May be None if we haven't
         got any information for them.
         """
@@ -654,7 +678,7 @@ class DeviceWorkerStore(SQLBaseStore):
         list_name="user_ids",
         inlineCallbacks=True,
     )
-    def get_device_list_last_stream_id_for_remotes(self, user_ids):
+    def get_device_list_last_stream_id_for_remotes(self, user_ids: str):
         rows = yield self.db_pool.simple_select_many_batch(
             table="device_lists_remote_extremeties",
             column="user_id",
@@ -668,8 +692,7 @@ class DeviceWorkerStore(SQLBaseStore):
 
         return results
 
-    @defer.inlineCallbacks
-    def get_user_ids_requiring_device_list_resync(
+    async def get_user_ids_requiring_device_list_resync(
         self, user_ids: Optional[Collection[str]] = None,
     ) -> Set[str]:
         """Given a list of remote users return the list of users that we
@@ -680,7 +703,7 @@ class DeviceWorkerStore(SQLBaseStore):
             The IDs of users whose device lists need resync.
         """
         if user_ids:
-            rows = yield self.db_pool.simple_select_many_batch(
+            rows = await self.db_pool.simple_select_many_batch(
                 table="device_lists_remote_resync",
                 column="user_id",
                 iterable=user_ids,
@@ -688,7 +711,7 @@ class DeviceWorkerStore(SQLBaseStore):
                 desc="get_user_ids_requiring_device_list_resync_with_iterable",
             )
         else:
-            rows = yield self.db_pool.simple_select_list(
+            rows = await self.db_pool.simple_select_list(
                 table="device_lists_remote_resync",
                 keyvalues=None,
                 retcols=("user_id",),
@@ -709,7 +732,7 @@ class DeviceWorkerStore(SQLBaseStore):
             desc="make_remote_user_device_cache_as_stale",
         )
 
-    def mark_remote_user_device_list_as_unsubscribed(self, user_id):
+    def mark_remote_user_device_list_as_unsubscribed(self, user_id: str):
         """Mark that we no longer track device lists for remote user.
         """
 
@@ -940,16 +963,15 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
             "drop_device_lists_outbound_last_success_non_unique_idx",
         )
 
-    @defer.inlineCallbacks
-    def _drop_device_list_streams_non_unique_indexes(self, progress, batch_size):
+    async def _drop_device_list_streams_non_unique_indexes(self, progress, batch_size):
         def f(conn):
             txn = conn.cursor()
             txn.execute("DROP INDEX IF EXISTS device_lists_remote_cache_id")
             txn.execute("DROP INDEX IF EXISTS device_lists_remote_extremeties_id")
             txn.close()
 
-        yield self.db_pool.runWithConnection(f)
-        yield self.db_pool.updates._end_background_update(
+        await self.db_pool.runWithConnection(f)
+        await self.db_pool.updates._end_background_update(
             DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES
         )
         return 1
@@ -1029,18 +1051,20 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
 
         self._clock.looping_call(self._prune_old_outbound_device_pokes, 60 * 60 * 1000)
 
-    @defer.inlineCallbacks
-    def store_device(self, user_id, device_id, initial_device_display_name):
+    async def store_device(
+        self, user_id: str, device_id: str, initial_device_display_name: str
+    ) -> bool:
         """Ensure the given device is known; add it to the store if not
 
         Args:
-            user_id (str): id of user associated with the device
-            device_id (str): id of device
-            initial_device_display_name (str): initial displayname of the
-               device. Ignored if device exists.
+            user_id: id of user associated with the device
+            device_id: id of device
+            initial_device_display_name: initial displayname of the device.
+                Ignored if device exists.
+
         Returns:
-            defer.Deferred: boolean whether the device was inserted or an
-                existing device existed with that ID.
+            Whether the device was inserted or an existing device existed with that ID.
+
         Raises:
             StoreError: if the device is already in use
         """
@@ -1049,7 +1073,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             return False
 
         try:
-            inserted = yield self.db_pool.simple_insert(
+            inserted = await self.db_pool.simple_insert(
                 "devices",
                 values={
                     "user_id": user_id,
@@ -1063,7 +1087,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             if not inserted:
                 # if the device already exists, check if it's a real device, or
                 # if the device ID is reserved by something else
-                hidden = yield self.db_pool.simple_select_one_onecol(
+                hidden = await self.db_pool.simple_select_one_onecol(
                     "devices",
                     keyvalues={"user_id": user_id, "device_id": device_id},
                     retcol="hidden",
@@ -1088,17 +1112,14 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             )
             raise StoreError(500, "Problem storing device.")
 
-    @defer.inlineCallbacks
-    def delete_device(self, user_id, device_id):
+    async def delete_device(self, user_id: str, device_id: str) -> None:
         """Delete a device.
 
         Args:
-            user_id (str): The ID of the user which owns the device
-            device_id (str): The ID of the device to delete
-        Returns:
-            defer.Deferred
+            user_id: The ID of the user which owns the device
+            device_id: The ID of the device to delete
         """
-        yield self.db_pool.simple_delete_one(
+        await self.db_pool.simple_delete_one(
             table="devices",
             keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
             desc="delete_device",
@@ -1106,17 +1127,14 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
 
         self.device_id_exists_cache.invalidate((user_id, device_id))
 
-    @defer.inlineCallbacks
-    def delete_devices(self, user_id, device_ids):
+    async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
         """Deletes several devices.
 
         Args:
-            user_id (str): The ID of the user which owns the devices
-            device_ids (list): The IDs of the devices to delete
-        Returns:
-            defer.Deferred
+            user_id: The ID of the user which owns the devices
+            device_ids: The IDs of the devices to delete
         """
-        yield self.db_pool.simple_delete_many(
+        await self.db_pool.simple_delete_many(
             table="devices",
             column="device_id",
             iterable=device_ids,
@@ -1126,26 +1144,25 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         for device_id in device_ids:
             self.device_id_exists_cache.invalidate((user_id, device_id))
 
-    def update_device(self, user_id, device_id, new_display_name=None):
+    async def update_device(
+        self, user_id: str, device_id: str, new_display_name: Optional[str] = None
+    ) -> None:
         """Update a device. Only updates the device if it is not marked as
         hidden.
 
         Args:
-            user_id (str): The ID of the user which owns the device
-            device_id (str): The ID of the device to update
-            new_display_name (str|None): new displayname for device; None
-               to leave unchanged
+            user_id: The ID of the user which owns the device
+            device_id: The ID of the device to update
+            new_display_name: new displayname for device; None to leave unchanged
         Raises:
             StoreError: if the device is not found
-        Returns:
-            defer.Deferred
         """
         updates = {}
         if new_display_name is not None:
             updates["display_name"] = new_display_name
         if not updates:
-            return defer.succeed(None)
-        return self.db_pool.simple_update_one(
+            return None
+        await self.db_pool.simple_update_one(
             table="devices",
             keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
             updatevalues=updates,
@@ -1153,7 +1170,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         )
 
     def update_remote_device_list_cache_entry(
-        self, user_id, device_id, content, stream_id
+        self, user_id: str, device_id: str, content: JsonDict, stream_id: int
     ):
         """Updates a single device in the cache of a remote user's devicelist.
 
@@ -1161,10 +1178,10 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         device list.
 
         Args:
-            user_id (str): User to update device list for
-            device_id (str): ID of decivice being updated
-            content (dict): new data on this device
-            stream_id (int): the version of the device list
+            user_id: User to update device list for
+            device_id: ID of decivice being updated
+            content: new data on this device
+            stream_id: the version of the device list
 
         Returns:
             Deferred[None]
@@ -1179,8 +1196,13 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         )
 
     def _update_remote_device_list_cache_entry_txn(
-        self, txn, user_id, device_id, content, stream_id
-    ):
+        self,
+        txn: LoggingTransaction,
+        user_id: str,
+        device_id: str,
+        content: JsonDict,
+        stream_id: int,
+    ) -> None:
         if content.get("deleted"):
             self.db_pool.simple_delete_txn(
                 txn,
@@ -1194,7 +1216,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
                 txn,
                 table="device_lists_remote_cache",
                 keyvalues={"user_id": user_id, "device_id": device_id},
-                values={"content": json.dumps(content)},
+                values={"content": json_encoder.encode(content)},
                 # we don't need to lock, because we assume we are the only thread
                 # updating this user's devices.
                 lock=False,
@@ -1216,16 +1238,18 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             lock=False,
         )
 
-    def update_remote_device_list_cache(self, user_id, devices, stream_id):
+    def update_remote_device_list_cache(
+        self, user_id: str, devices: List[dict], stream_id: int
+    ):
         """Replace the entire cache of the remote user's devices.
 
         Note: assumes that we are the only thread that can be updating this user's
         device list.
 
         Args:
-            user_id (str): User to update device list for
-            devices (list[dict]): list of device objects supplied over federation
-            stream_id (int): the version of the device list
+            user_id: User to update device list for
+            devices: list of device objects supplied over federation
+            stream_id: the version of the device list
 
         Returns:
             Deferred[None]
@@ -1238,7 +1262,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             stream_id,
         )
 
-    def _update_remote_device_list_cache_txn(self, txn, user_id, devices, stream_id):
+    def _update_remote_device_list_cache_txn(
+        self, txn: LoggingTransaction, user_id: str, devices: List[dict], stream_id: int
+    ):
         self.db_pool.simple_delete_txn(
             txn, table="device_lists_remote_cache", keyvalues={"user_id": user_id}
         )
@@ -1250,7 +1276,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
                 {
                     "user_id": user_id,
                     "device_id": content["device_id"],
-                    "content": json.dumps(content),
+                    "content": json_encoder.encode(content),
                 }
                 for content in devices
             ],
@@ -1279,8 +1305,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             txn, table="device_lists_remote_resync", keyvalues={"user_id": user_id},
         )
 
-    @defer.inlineCallbacks
-    def add_device_change_to_streams(self, user_id, device_ids, hosts):
+    async def add_device_change_to_streams(
+        self, user_id: str, device_ids: Collection[str], hosts: List[str]
+    ):
         """Persist that a user's devices have been updated, and which hosts
         (if any) should be poked.
         """
@@ -1288,7 +1315,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             return
 
         with self._device_list_id_gen.get_next_mult(len(device_ids)) as stream_ids:
-            yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 "add_device_change_to_stream",
                 self._add_device_change_to_stream_txn,
                 user_id,
@@ -1303,7 +1330,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         with self._device_list_id_gen.get_next_mult(
             len(hosts) * len(device_ids)
         ) as stream_ids:
-            yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 "add_device_outbound_poke_to_stream",
                 self._add_device_outbound_poke_to_stream_txn,
                 user_id,
@@ -1348,7 +1375,13 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         )
 
     def _add_device_outbound_poke_to_stream_txn(
-        self, txn, user_id, device_ids, hosts, stream_ids, context,
+        self,
+        txn: LoggingTransaction,
+        user_id: str,
+        device_ids: Collection[str],
+        hosts: List[str],
+        stream_ids: List[str],
+        context: Dict[str, str],
     ):
         for host in hosts:
             txn.call_after(
@@ -1371,7 +1404,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
                     "device_id": device_id,
                     "sent": False,
                     "ts": now,
-                    "opentracing_context": json.dumps(context)
+                    "opentracing_context": json_encoder.encode(context)
                     if whitelisted_homeserver(destination)
                     else "{}",
                 }
@@ -1380,7 +1413,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             ],
         )
 
-    def _prune_old_outbound_device_pokes(self, prune_age=24 * 60 * 60 * 1000):
+    def _prune_old_outbound_device_pokes(self, prune_age: int = 24 * 60 * 60 * 1000):
         """Delete old entries out of the device_lists_outbound_pokes to ensure
         that we don't fill up due to dead servers.
 
diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py
index 7819bfcbb3..037e02603c 100644
--- a/synapse/storage/databases/main/directory.py
+++ b/synapse/storage/databases/main/directory.py
@@ -14,30 +14,29 @@
 # limitations under the License.
 
 from collections import namedtuple
-from typing import Optional
-
-from twisted.internet import defer
+from typing import Iterable, Optional
 
 from synapse.api.errors import SynapseError
 from synapse.storage._base import SQLBaseStore
+from synapse.types import RoomAlias
 from synapse.util.caches.descriptors import cached
 
 RoomAliasMapping = namedtuple("RoomAliasMapping", ("room_id", "room_alias", "servers"))
 
 
 class DirectoryWorkerStore(SQLBaseStore):
-    @defer.inlineCallbacks
-    def get_association_from_room_alias(self, room_alias):
-        """ Get's the room_id and server list for a given room_alias
+    async def get_association_from_room_alias(
+        self, room_alias: RoomAlias
+    ) -> Optional[RoomAliasMapping]:
+        """Gets the room_id and server list for a given room_alias
 
         Args:
-            room_alias (RoomAlias)
+            room_alias: The alias to translate to an ID.
 
         Returns:
-            Deferred: results in namedtuple with keys "room_id" and
-            "servers" or None if no association can be found
+            The room alias mapping or None if no association can be found.
         """
-        room_id = yield self.db_pool.simple_select_one_onecol(
+        room_id = await self.db_pool.simple_select_one_onecol(
             "room_aliases",
             {"room_alias": room_alias.to_string()},
             "room_id",
@@ -48,7 +47,7 @@ class DirectoryWorkerStore(SQLBaseStore):
         if not room_id:
             return None
 
-        servers = yield self.db_pool.simple_select_onecol(
+        servers = await self.db_pool.simple_select_onecol(
             "room_alias_servers",
             {"room_alias": room_alias.to_string()},
             "server",
@@ -79,18 +78,20 @@ class DirectoryWorkerStore(SQLBaseStore):
 
 
 class DirectoryStore(DirectoryWorkerStore):
-    @defer.inlineCallbacks
-    def create_room_alias_association(self, room_alias, room_id, servers, creator=None):
+    async def create_room_alias_association(
+        self,
+        room_alias: RoomAlias,
+        room_id: str,
+        servers: Iterable[str],
+        creator: Optional[str] = None,
+    ) -> None:
         """ Creates an association between a room alias and room_id/servers
 
         Args:
-            room_alias (RoomAlias)
-            room_id (str)
-            servers (list)
-            creator (str): Optional user_id of creator.
-
-        Returns:
-            Deferred
+            room_alias: The alias to create.
+            room_id: The target of the alias.
+            servers: A list of servers through which it may be possible to join the room
+            creator: Optional user_id of creator.
         """
 
         def alias_txn(txn):
@@ -118,24 +119,22 @@ class DirectoryStore(DirectoryWorkerStore):
             )
 
         try:
-            ret = yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 "create_room_alias_association", alias_txn
             )
         except self.database_engine.module.IntegrityError:
             raise SynapseError(
                 409, "Room alias %s already exists" % room_alias.to_string()
             )
-        return ret
 
-    @defer.inlineCallbacks
-    def delete_room_alias(self, room_alias):
-        room_id = yield self.db_pool.runInteraction(
+    async def delete_room_alias(self, room_alias: RoomAlias) -> str:
+        room_id = await self.db_pool.runInteraction(
             "delete_room_alias", self._delete_room_alias_txn, room_alias
         )
 
         return room_id
 
-    def _delete_room_alias_txn(self, txn, room_alias):
+    def _delete_room_alias_txn(self, txn, room_alias: RoomAlias) -> str:
         txn.execute(
             "SELECT room_id FROM room_aliases WHERE room_alias = ?",
             (room_alias.to_string(),),
diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py
index 90152edc3c..2eeb9f97dc 100644
--- a/synapse/storage/databases/main/e2e_room_keys.py
+++ b/synapse/storage/databases/main/e2e_room_keys.py
@@ -14,18 +14,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from canonicaljson import json
-
-from twisted.internet import defer
-
 from synapse.api.errors import StoreError
 from synapse.logging.opentracing import log_kv, trace
 from synapse.storage._base import SQLBaseStore, db_to_json
+from synapse.util import json_encoder
 
 
 class EndToEndRoomKeyStore(SQLBaseStore):
-    @defer.inlineCallbacks
-    def update_e2e_room_key(self, user_id, version, room_id, session_id, room_key):
+    async def update_e2e_room_key(
+        self, user_id, version, room_id, session_id, room_key
+    ):
         """Replaces the encrypted E2E room key for a given session in a given backup
 
         Args:
@@ -38,7 +36,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             StoreError
         """
 
-        yield self.db_pool.simple_update_one(
+        await self.db_pool.simple_update_one(
             table="e2e_room_keys",
             keyvalues={
                 "user_id": user_id,
@@ -50,13 +48,12 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                 "first_message_index": room_key["first_message_index"],
                 "forwarded_count": room_key["forwarded_count"],
                 "is_verified": room_key["is_verified"],
-                "session_data": json.dumps(room_key["session_data"]),
+                "session_data": json_encoder.encode(room_key["session_data"]),
             },
             desc="update_e2e_room_key",
         )
 
-    @defer.inlineCallbacks
-    def add_e2e_room_keys(self, user_id, version, room_keys):
+    async def add_e2e_room_keys(self, user_id, version, room_keys):
         """Bulk add room keys to a given backup.
 
         Args:
@@ -77,7 +74,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                     "first_message_index": room_key["first_message_index"],
                     "forwarded_count": room_key["forwarded_count"],
                     "is_verified": room_key["is_verified"],
-                    "session_data": json.dumps(room_key["session_data"]),
+                    "session_data": json_encoder.encode(room_key["session_data"]),
                 }
             )
             log_kv(
@@ -89,13 +86,12 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                 }
             )
 
-        yield self.db_pool.simple_insert_many(
+        await self.db_pool.simple_insert_many(
             table="e2e_room_keys", values=values, desc="add_e2e_room_keys"
         )
 
     @trace
-    @defer.inlineCallbacks
-    def get_e2e_room_keys(self, user_id, version, room_id=None, session_id=None):
+    async def get_e2e_room_keys(self, user_id, version, room_id=None, session_id=None):
         """Bulk get the E2E room keys for a given backup, optionally filtered to a given
         room, or a given session.
 
@@ -110,7 +106,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                 the backup (or for the specified room)
 
         Returns:
-            A deferred list of dicts giving the session_data and message metadata for
+            A list of dicts giving the session_data and message metadata for
             these room keys.
         """
 
@@ -125,7 +121,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             if session_id:
                 keyvalues["session_id"] = session_id
 
-        rows = yield self.db_pool.simple_select_list(
+        rows = await self.db_pool.simple_select_list(
             table="e2e_room_keys",
             keyvalues=keyvalues,
             retcols=(
@@ -243,8 +239,9 @@ class EndToEndRoomKeyStore(SQLBaseStore):
         )
 
     @trace
-    @defer.inlineCallbacks
-    def delete_e2e_room_keys(self, user_id, version, room_id=None, session_id=None):
+    async def delete_e2e_room_keys(
+        self, user_id, version, room_id=None, session_id=None
+    ):
         """Bulk delete the E2E room keys for a given backup, optionally filtered to a given
         room or a given session.
 
@@ -259,7 +256,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                 the backup (or for the specified room)
 
         Returns:
-            A deferred of the deletion transaction
+            The deletion transaction
         """
 
         keyvalues = {"user_id": user_id, "version": int(version)}
@@ -268,7 +265,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             if session_id:
                 keyvalues["session_id"] = session_id
 
-        yield self.db_pool.simple_delete(
+        await self.db_pool.simple_delete(
             table="e2e_room_keys", keyvalues=keyvalues, desc="delete_e2e_room_keys"
         )
 
@@ -360,7 +357,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                     "user_id": user_id,
                     "version": new_version,
                     "algorithm": info["algorithm"],
-                    "auth_data": json.dumps(info["auth_data"]),
+                    "auth_data": json_encoder.encode(info["auth_data"]),
                 },
             )
 
@@ -387,7 +384,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
         updatevalues = {}
 
         if info is not None and "auth_data" in info:
-            updatevalues["auth_data"] = json.dumps(info["auth_data"])
+            updatevalues["auth_data"] = json_encoder.encode(info["auth_data"])
         if version_etag is not None:
             updatevalues["etag"] = version_etag
 
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index f90cd80561..a1291b06ff 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -14,24 +14,23 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from typing import Dict, List, Tuple
+from typing import Dict, Iterable, List, Optional, Tuple
 
-from canonicaljson import encode_canonical_json, json
+from canonicaljson import encode_canonical_json
 
 from twisted.enterprise.adbapi import Connection
-from twisted.internet import defer
 
 from synapse.logging.opentracing import log_kv, set_tag, trace
 from synapse.storage._base import SQLBaseStore, db_to_json
 from synapse.storage.database import make_in_list_sql_clause
+from synapse.util import json_encoder
 from synapse.util.caches.descriptors import cached, cachedList
 from synapse.util.iterutils import batch_iter
 
 
 class EndToEndKeyWorkerStore(SQLBaseStore):
     @trace
-    @defer.inlineCallbacks
-    def get_e2e_device_keys(
+    async def get_e2e_device_keys(
         self, query_list, include_all_devices=False, include_deleted_devices=False
     ):
         """Fetch a list of device keys.
@@ -51,7 +50,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
         if not query_list:
             return {}
 
-        results = yield self.db_pool.runInteraction(
+        results = await self.db_pool.runInteraction(
             "get_e2e_device_keys",
             self._get_e2e_device_keys_txn,
             query_list,
@@ -174,8 +173,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
         log_kv(result)
         return result
 
-    @defer.inlineCallbacks
-    def get_e2e_one_time_keys(self, user_id, device_id, key_ids):
+    async def get_e2e_one_time_keys(
+        self, user_id: str, device_id: str, key_ids: List[str]
+    ) -> Dict[Tuple[str, str], str]:
         """Retrieve a number of one-time keys for a user
 
         Args:
@@ -185,11 +185,10 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
                 retrieve
 
         Returns:
-            deferred resolving to Dict[(str, str), str]: map from (algorithm,
-            key_id) to json string for key
+            A map from (algorithm, key_id) to json string for key
         """
 
-        rows = yield self.db_pool.simple_select_many_batch(
+        rows = await self.db_pool.simple_select_many_batch(
             table="e2e_one_time_keys_json",
             column="key_id",
             iterable=key_ids,
@@ -201,17 +200,21 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
         log_kv({"message": "Fetched one time keys for user", "one_time_keys": result})
         return result
 
-    @defer.inlineCallbacks
-    def add_e2e_one_time_keys(self, user_id, device_id, time_now, new_keys):
+    async def add_e2e_one_time_keys(
+        self,
+        user_id: str,
+        device_id: str,
+        time_now: int,
+        new_keys: Iterable[Tuple[str, str, str]],
+    ) -> None:
         """Insert some new one time keys for a device. Errors if any of the
         keys already exist.
 
         Args:
-            user_id(str): id of user to get keys for
-            device_id(str): id of device to get keys for
-            time_now(long): insertion time to record (ms since epoch)
-            new_keys(iterable[(str, str, str)]: keys to add - each a tuple of
-                (algorithm, key_id, key json)
+            user_id: id of user to get keys for
+            device_id: id of device to get keys for
+            time_now: insertion time to record (ms since epoch)
+            new_keys: keys to add - each a tuple of (algorithm, key_id, key json)
         """
 
         def _add_e2e_one_time_keys(txn):
@@ -241,7 +244,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
                 txn, self.count_e2e_one_time_keys, (user_id, device_id)
             )
 
-        yield self.db_pool.runInteraction(
+        await self.db_pool.runInteraction(
             "add_e2e_one_time_keys_insert", _add_e2e_one_time_keys
         )
 
@@ -308,22 +311,23 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
 
     # FIXME: delete fallbacks when user logs out
 
-    @defer.inlineCallbacks
-    def get_e2e_cross_signing_key(self, user_id, key_type, from_user_id=None):
+    async def get_e2e_cross_signing_key(
+        self, user_id: str, key_type: str, from_user_id: Optional[str] = None
+    ) -> Optional[dict]:
         """Returns a user's cross-signing key.
 
         Args:
-            user_id (str): the user whose key is being requested
-            key_type (str): the type of key that is being requested: either 'master'
+            user_id: the user whose key is being requested
+            key_type: the type of key that is being requested: either 'master'
                 for a master key, 'self_signing' for a self-signing key, or
                 'user_signing' for a user-signing key
-            from_user_id (str): if specified, signatures made by this user on
+            from_user_id: if specified, signatures made by this user on
                 the self-signing key will be included in the result
 
         Returns:
             dict of the key data or None if not found
         """
-        res = yield self.get_e2e_cross_signing_keys_bulk([user_id], from_user_id)
+        res = await self.get_e2e_cross_signing_keys_bulk([user_id], from_user_id)
         user_keys = res.get(user_id)
         if not user_keys:
             return None
@@ -489,28 +493,26 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
 
         return keys
 
-    @defer.inlineCallbacks
-    def get_e2e_cross_signing_keys_bulk(
-        self, user_ids: List[str], from_user_id: str = None
-    ) -> defer.Deferred:
+    async def get_e2e_cross_signing_keys_bulk(
+        self, user_ids: List[str], from_user_id: Optional[str] = None
+    ) -> Dict[str, Dict[str, dict]]:
         """Returns the cross-signing keys for a set of users.
 
         Args:
-            user_ids (list[str]): the users whose keys are being requested
-            from_user_id (str): if specified, signatures made by this user on
+            user_ids: the users whose keys are being requested
+            from_user_id: if specified, signatures made by this user on
                 the self-signing keys will be included in the result
 
         Returns:
-            Deferred[dict[str, dict[str, dict]]]: map of user ID to key type to
-                key data.  If a user's cross-signing keys were not found, either
-                their user ID will not be in the dict, or their user ID will map
-                to None.
+            A map of user ID to key type to key data.  If a user's cross-signing
+            keys were not found, either their user ID will not be in the dict,
+            or their user ID will map to None.
         """
 
-        result = yield self._get_bare_e2e_cross_signing_keys_bulk(user_ids)
+        result = await self._get_bare_e2e_cross_signing_keys_bulk(user_ids)
 
         if from_user_id:
-            result = yield self.db_pool.runInteraction(
+            result = await self.db_pool.runInteraction(
                 "get_e2e_cross_signing_signatures",
                 self._get_e2e_cross_signing_signatures_txn,
                 result,
@@ -785,7 +787,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
                 values={
                     "user_id": user_id,
                     "keytype": key_type,
-                    "keydata": json.dumps(key),
+                    "keydata": json_encoder.encode(key),
                     "stream_id": stream_id,
                 },
             )
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index eddb32b4d3..484875f989 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -15,9 +15,7 @@
 import itertools
 import logging
 from queue import Empty, PriorityQueue
-from typing import Dict, List, Optional, Set, Tuple
-
-from twisted.internet import defer
+from typing import Dict, Iterable, List, Optional, Set, Tuple
 
 from synapse.api.errors import StoreError
 from synapse.metrics.background_process_metrics import run_as_background_process
@@ -286,17 +284,13 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
 
         return dict(txn)
 
-    @defer.inlineCallbacks
-    def get_max_depth_of(self, event_ids):
+    async def get_max_depth_of(self, event_ids: List[str]) -> int:
         """Returns the max depth of a set of event IDs
 
         Args:
-            event_ids (list[str])
-
-        Returns
-            Deferred[int]
+            event_ids: The event IDs to calculate the max depth of.
         """
-        rows = yield self.db_pool.simple_select_many_batch(
+        rows = await self.db_pool.simple_select_many_batch(
             table="events",
             column="event_id",
             iterable=event_ids,
@@ -550,9 +544,8 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
 
         return event_results
 
-    @defer.inlineCallbacks
-    def get_missing_events(self, room_id, earliest_events, latest_events, limit):
-        ids = yield self.db_pool.runInteraction(
+    async def get_missing_events(self, room_id, earliest_events, latest_events, limit):
+        ids = await self.db_pool.runInteraction(
             "get_missing_events",
             self._get_missing_events,
             room_id,
@@ -560,7 +553,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
             latest_events,
             limit,
         )
-        events = yield self.get_events_as_list(ids)
+        events = await self.get_events_as_list(ids)
         return events
 
     def _get_missing_events(self, txn, room_id, earliest_events, latest_events, limit):
@@ -595,17 +588,13 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         event_results.reverse()
         return event_results
 
-    @defer.inlineCallbacks
-    def get_successor_events(self, event_ids):
+    async def get_successor_events(self, event_ids: Iterable[str]) -> List[str]:
         """Fetch all events that have the given events as a prev event
 
         Args:
-            event_ids (iterable[str])
-
-        Returns:
-            Deferred[list[str]]
+            event_ids: The events to use as the previous events.
         """
-        rows = yield self.db_pool.simple_select_many_batch(
+        rows = await self.db_pool.simple_select_many_batch(
             table="event_edges",
             column="prev_event_id",
             iterable=event_ids,
@@ -674,8 +663,7 @@ class EventFederationStore(EventFederationWorkerStore):
         txn.execute(query, (room_id,))
         txn.call_after(self.get_latest_event_ids_in_room.invalidate, (room_id,))
 
-    @defer.inlineCallbacks
-    def _background_delete_non_state_event_auth(self, progress, batch_size):
+    async def _background_delete_non_state_event_auth(self, progress, batch_size):
         def delete_event_auth(txn):
             target_min_stream_id = progress.get("target_min_stream_id_inclusive")
             max_stream_id = progress.get("max_stream_id_exclusive")
@@ -714,12 +702,12 @@ class EventFederationStore(EventFederationWorkerStore):
 
             return min_stream_id >= target_min_stream_id
 
-        result = yield self.db_pool.runInteraction(
+        result = await self.db_pool.runInteraction(
             self.EVENT_AUTH_STATE_ONLY, delete_event_auth
         )
 
         if not result:
-            yield self.db_pool.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 self.EVENT_AUTH_STATE_ONLY
             )
 
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index b8cefb4d5e..7c246d3e4c 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -17,11 +17,10 @@
 import logging
 from typing import List
 
-from canonicaljson import json
-
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import LoggingTransaction, SQLBaseStore, db_to_json
 from synapse.storage.database import DatabasePool
+from synapse.util import json_encoder
 from synapse.util.caches.descriptors import cachedInlineCallbacks
 
 logger = logging.getLogger(__name__)
@@ -50,7 +49,7 @@ def _serialize_action(actions, is_highlight):
     else:
         if actions == DEFAULT_NOTIF_ACTION:
             return ""
-    return json.dumps(actions)
+    return json_encoder.encode(actions)
 
 
 def _deserialize_action(actions, is_highlight):
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 4d8a24ce4b..1a68bf32cb 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -53,47 +53,6 @@ event_counter = Counter(
     ["type", "origin_type", "origin_entity"],
 )
 
-STATE_EVENT_TYPES_TO_MARK_UNREAD = {
-    EventTypes.Topic,
-    EventTypes.Name,
-    EventTypes.RoomAvatar,
-    EventTypes.Tombstone,
-}
-
-
-def should_count_as_unread(event: EventBase, context: EventContext) -> bool:
-    # Exclude rejected and soft-failed events.
-    if context.rejected or event.internal_metadata.is_soft_failed():
-        return False
-
-    # Exclude notices.
-    if (
-        not event.is_state()
-        and event.type == EventTypes.Message
-        and event.content.get("msgtype") == "m.notice"
-    ):
-        return False
-
-    # Exclude edits.
-    relates_to = event.content.get("m.relates_to", {})
-    if relates_to.get("rel_type") == RelationTypes.REPLACE:
-        return False
-
-    # Mark events that have a non-empty string body as unread.
-    body = event.content.get("body")
-    if isinstance(body, str) and body:
-        return True
-
-    # Mark some state events as unread.
-    if event.is_state() and event.type in STATE_EVENT_TYPES_TO_MARK_UNREAD:
-        return True
-
-    # Mark encrypted events as unread.
-    if not event.is_state() and event.type == EventTypes.Encrypted:
-        return True
-
-    return False
-
 
 def encode_json(json_object):
     """
@@ -239,10 +198,6 @@ class PersistEventsStore:
 
                 event_counter.labels(event.type, origin_type, origin_entity).inc()
 
-                self.store.get_unread_message_count_for_user.invalidate_many(
-                    (event.room_id,),
-                )
-
             for room_id, new_state in current_state_for_room.items():
                 self.store.get_current_state_ids.prefill((room_id,), new_state)
 
@@ -864,9 +819,8 @@ class PersistEventsStore:
                     "contains_url": (
                         "url" in event.content and isinstance(event.content["url"], str)
                     ),
-                    "count_as_unread": should_count_as_unread(event, context),
                 }
-                for event, context in events_and_contexts
+                for event, _ in events_and_contexts
             ],
         )
 
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index a7b7393f6e..755b7a2a85 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -41,15 +41,9 @@ from synapse.replication.tcp.streams import BackfillStream
 from synapse.replication.tcp.streams.events import EventsStream
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
 from synapse.storage.database import DatabasePool
-from synapse.storage.types import Cursor
 from synapse.storage.util.id_generators import StreamIdGenerator
 from synapse.types import get_domain_from_id
-from synapse.util.caches.descriptors import (
-    Cache,
-    _CacheContext,
-    cached,
-    cachedInlineCallbacks,
-)
+from synapse.util.caches.descriptors import Cache, cached, cachedInlineCallbacks
 from synapse.util.iterutils import batch_iter
 from synapse.util.metrics import Measure
 
@@ -1364,84 +1358,6 @@ class EventsWorkerStore(SQLBaseStore):
             desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
         )
 
-    @cached(tree=True, cache_context=True)
-    async def get_unread_message_count_for_user(
-        self, room_id: str, user_id: str, cache_context: _CacheContext,
-    ) -> int:
-        """Retrieve the count of unread messages for the given room and user.
-
-        Args:
-            room_id: The ID of the room to count unread messages in.
-            user_id: The ID of the user to count unread messages for.
-
-        Returns:
-            The number of unread messages for the given user in the given room.
-        """
-        with Measure(self._clock, "get_unread_message_count_for_user"):
-            last_read_event_id = await self.get_last_receipt_event_id_for_user(
-                user_id=user_id,
-                room_id=room_id,
-                receipt_type="m.read",
-                on_invalidate=cache_context.invalidate,
-            )
-
-            return await self.db_pool.runInteraction(
-                "get_unread_message_count_for_user",
-                self._get_unread_message_count_for_user_txn,
-                user_id,
-                room_id,
-                last_read_event_id,
-            )
-
-    def _get_unread_message_count_for_user_txn(
-        self,
-        txn: Cursor,
-        user_id: str,
-        room_id: str,
-        last_read_event_id: Optional[str],
-    ) -> int:
-        if last_read_event_id:
-            # Get the stream ordering for the last read event.
-            stream_ordering = self.db_pool.simple_select_one_onecol_txn(
-                txn=txn,
-                table="events",
-                keyvalues={"room_id": room_id, "event_id": last_read_event_id},
-                retcol="stream_ordering",
-            )
-        else:
-            # If there's no read receipt for that room, it probably means the user hasn't
-            # opened it yet, in which case use the stream ID of their join event.
-            # We can't just set it to 0 otherwise messages from other local users from
-            # before this user joined will be counted as well.
-            txn.execute(
-                """
-                SELECT stream_ordering FROM local_current_membership
-                LEFT JOIN events USING (event_id, room_id)
-                WHERE membership = 'join'
-                    AND user_id = ?
-                    AND room_id = ?
-                """,
-                (user_id, room_id),
-            )
-            row = txn.fetchone()
-
-            if row is None:
-                return 0
-
-            stream_ordering = row[0]
-
-        # Count the messages that qualify as unread after the stream ordering we've just
-        # retrieved.
-        sql = """
-            SELECT COUNT(*) FROM events
-            WHERE sender != ? AND room_id = ? AND stream_ordering > ? AND count_as_unread
-        """
-
-        txn.execute(sql, (user_id, room_id, stream_ordering))
-        row = txn.fetchone()
-
-        return row[0] if row else 0
-
 
 AllNewEventsResult = namedtuple(
     "AllNewEventsResult",
diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py
index cae6bda80e..45a1760170 100644
--- a/synapse/storage/databases/main/filtering.py
+++ b/synapse/storage/databases/main/filtering.py
@@ -17,12 +17,12 @@ from canonicaljson import encode_canonical_json
 
 from synapse.api.errors import Codes, SynapseError
 from synapse.storage._base import SQLBaseStore, db_to_json
-from synapse.util.caches.descriptors import cachedInlineCallbacks
+from synapse.util.caches.descriptors import cached
 
 
 class FilteringStore(SQLBaseStore):
-    @cachedInlineCallbacks(num_args=2)
-    def get_user_filter(self, user_localpart, filter_id):
+    @cached(num_args=2)
+    async def get_user_filter(self, user_localpart, filter_id):
         # filter_id is BIGINT UNSIGNED, so if it isn't a number, fail
         # with a coherent error message rather than 500 M_UNKNOWN.
         try:
@@ -30,7 +30,7 @@ class FilteringStore(SQLBaseStore):
         except ValueError:
             raise SynapseError(400, "Invalid filter ID", Codes.INVALID_PARAM)
 
-        def_json = yield self.db_pool.simple_select_one_onecol(
+        def_json = await self.db_pool.simple_select_one_onecol(
             table="user_filters",
             keyvalues={"user_id": user_localpart, "filter_id": filter_id},
             retcol="filter_json",
diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py
index a98181f445..380db3a3f3 100644
--- a/synapse/storage/databases/main/group_server.py
+++ b/synapse/storage/databases/main/group_server.py
@@ -14,14 +14,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import List, Tuple
-
-from canonicaljson import json
-
-from twisted.internet import defer
+from typing import List, Optional, Tuple
 
 from synapse.api.errors import SynapseError
 from synapse.storage._base import SQLBaseStore, db_to_json
+from synapse.types import JsonDict
+from synapse.util import json_encoder
 
 # The category ID for the "default" category. We don't store as null in the
 # database to avoid the fun of null != null
@@ -211,9 +209,8 @@ class GroupServerWorkerStore(SQLBaseStore):
             "get_rooms_for_summary", _get_rooms_for_summary_txn
         )
 
-    @defer.inlineCallbacks
-    def get_group_categories(self, group_id):
-        rows = yield self.db_pool.simple_select_list(
+    async def get_group_categories(self, group_id):
+        rows = await self.db_pool.simple_select_list(
             table="group_room_categories",
             keyvalues={"group_id": group_id},
             retcols=("category_id", "is_public", "profile"),
@@ -228,9 +225,8 @@ class GroupServerWorkerStore(SQLBaseStore):
             for row in rows
         }
 
-    @defer.inlineCallbacks
-    def get_group_category(self, group_id, category_id):
-        category = yield self.db_pool.simple_select_one(
+    async def get_group_category(self, group_id, category_id):
+        category = await self.db_pool.simple_select_one(
             table="group_room_categories",
             keyvalues={"group_id": group_id, "category_id": category_id},
             retcols=("is_public", "profile"),
@@ -241,9 +237,8 @@ class GroupServerWorkerStore(SQLBaseStore):
 
         return category
 
-    @defer.inlineCallbacks
-    def get_group_roles(self, group_id):
-        rows = yield self.db_pool.simple_select_list(
+    async def get_group_roles(self, group_id):
+        rows = await self.db_pool.simple_select_list(
             table="group_roles",
             keyvalues={"group_id": group_id},
             retcols=("role_id", "is_public", "profile"),
@@ -258,9 +253,8 @@ class GroupServerWorkerStore(SQLBaseStore):
             for row in rows
         }
 
-    @defer.inlineCallbacks
-    def get_group_role(self, group_id, role_id):
-        role = yield self.db_pool.simple_select_one(
+    async def get_group_role(self, group_id, role_id):
+        role = await self.db_pool.simple_select_one(
             table="group_roles",
             keyvalues={"group_id": group_id, "role_id": role_id},
             retcols=("is_public", "profile"),
@@ -449,12 +443,11 @@ class GroupServerWorkerStore(SQLBaseStore):
             "get_attestations_need_renewals", _get_attestations_need_renewals_txn
         )
 
-    @defer.inlineCallbacks
-    def get_remote_attestation(self, group_id, user_id):
+    async def get_remote_attestation(self, group_id, user_id):
         """Get the attestation that proves the remote agrees that the user is
         in the group.
         """
-        row = yield self.db_pool.simple_select_one(
+        row = await self.db_pool.simple_select_one(
             table="group_attestations_remote",
             keyvalues={"group_id": group_id, "user_id": user_id},
             retcols=("valid_until_ms", "attestation_json"),
@@ -500,13 +493,13 @@ class GroupServerWorkerStore(SQLBaseStore):
             "get_all_groups_for_user", _get_all_groups_for_user_txn
         )
 
-    def get_groups_changes_for_user(self, user_id, from_token, to_token):
+    async def get_groups_changes_for_user(self, user_id, from_token, to_token):
         from_token = int(from_token)
         has_changed = self._group_updates_stream_cache.has_entity_changed(
             user_id, from_token
         )
         if not has_changed:
-            return defer.succeed([])
+            return []
 
         def _get_groups_changes_for_user_txn(txn):
             sql = """
@@ -526,7 +519,7 @@ class GroupServerWorkerStore(SQLBaseStore):
                 for group_id, membership, gtype, content_json in txn
             ]
 
-        return self.db_pool.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_groups_changes_for_user", _get_groups_changes_for_user_txn
         )
 
@@ -752,7 +745,7 @@ class GroupServerStore(GroupServerWorkerStore):
         if profile is None:
             insertion_values["profile"] = "{}"
         else:
-            update_values["profile"] = json.dumps(profile)
+            update_values["profile"] = json_encoder.encode(profile)
 
         if is_public is None:
             insertion_values["is_public"] = True
@@ -783,7 +776,7 @@ class GroupServerStore(GroupServerWorkerStore):
         if profile is None:
             insertion_values["profile"] = "{}"
         else:
-            update_values["profile"] = json.dumps(profile)
+            update_values["profile"] = json_encoder.encode(profile)
 
         if is_public is None:
             insertion_values["is_public"] = True
@@ -1007,7 +1000,7 @@ class GroupServerStore(GroupServerWorkerStore):
                         "group_id": group_id,
                         "user_id": user_id,
                         "valid_until_ms": remote_attestation["valid_until_ms"],
-                        "attestation_json": json.dumps(remote_attestation),
+                        "attestation_json": json_encoder.encode(remote_attestation),
                     },
                 )
 
@@ -1088,31 +1081,31 @@ class GroupServerStore(GroupServerWorkerStore):
             desc="update_group_publicity",
         )
 
-    @defer.inlineCallbacks
-    def register_user_group_membership(
+    async def register_user_group_membership(
         self,
-        group_id,
-        user_id,
-        membership,
-        is_admin=False,
-        content={},
-        local_attestation=None,
-        remote_attestation=None,
-        is_publicised=False,
-    ):
+        group_id: str,
+        user_id: str,
+        membership: str,
+        is_admin: bool = False,
+        content: JsonDict = {},
+        local_attestation: Optional[dict] = None,
+        remote_attestation: Optional[dict] = None,
+        is_publicised: bool = False,
+    ) -> int:
         """Registers that a local user is a member of a (local or remote) group.
 
         Args:
-            group_id (str)
-            user_id (str)
-            membership (str)
-            is_admin (bool)
-            content (dict): Content of the membership, e.g. includes the inviter
+            group_id: The group the member is being added to.
+            user_id: THe user ID to add to the group.
+            membership: The type of group membership.
+            is_admin: Whether the user should be added as a group admin.
+            content: Content of the membership, e.g. includes the inviter
                 if the user has been invited.
-            local_attestation (dict): If remote group then store the fact that we
+            local_attestation: If remote group then store the fact that we
                 have given out an attestation, else None.
-            remote_attestation (dict): If remote group then store the remote
+            remote_attestation: If remote group then store the remote
                 attestation from the group, else None.
+            is_publicised: Whether this should be publicised.
         """
 
         def _register_user_group_membership_txn(txn, next_id):
@@ -1131,7 +1124,7 @@ class GroupServerStore(GroupServerWorkerStore):
                     "is_admin": is_admin,
                     "membership": membership,
                     "is_publicised": is_publicised,
-                    "content": json.dumps(content),
+                    "content": json_encoder.encode(content),
                 },
             )
 
@@ -1143,7 +1136,7 @@ class GroupServerStore(GroupServerWorkerStore):
                     "group_id": group_id,
                     "user_id": user_id,
                     "type": "membership",
-                    "content": json.dumps(
+                    "content": json_encoder.encode(
                         {"membership": membership, "content": content}
                     ),
                 },
@@ -1171,7 +1164,7 @@ class GroupServerStore(GroupServerWorkerStore):
                             "group_id": group_id,
                             "user_id": user_id,
                             "valid_until_ms": remote_attestation["valid_until_ms"],
-                            "attestation_json": json.dumps(remote_attestation),
+                            "attestation_json": json_encoder.encode(remote_attestation),
                         },
                     )
             else:
@@ -1189,18 +1182,17 @@ class GroupServerStore(GroupServerWorkerStore):
             return next_id
 
         with self._group_updates_id_gen.get_next() as next_id:
-            res = yield self.db_pool.runInteraction(
+            res = await self.db_pool.runInteraction(
                 "register_user_group_membership",
                 _register_user_group_membership_txn,
                 next_id,
             )
         return res
 
-    @defer.inlineCallbacks
-    def create_group(
+    async def create_group(
         self, group_id, user_id, name, avatar_url, short_description, long_description
-    ):
-        yield self.db_pool.simple_insert(
+    ) -> None:
+        await self.db_pool.simple_insert(
             table="groups",
             values={
                 "group_id": group_id,
@@ -1213,9 +1205,8 @@ class GroupServerStore(GroupServerWorkerStore):
             desc="create_group",
         )
 
-    @defer.inlineCallbacks
-    def update_group_profile(self, group_id, profile):
-        yield self.db_pool.simple_update_one(
+    async def update_group_profile(self, group_id, profile):
+        await self.db_pool.simple_update_one(
             table="groups",
             keyvalues={"group_id": group_id},
             updatevalues=profile,
@@ -1240,7 +1231,7 @@ class GroupServerStore(GroupServerWorkerStore):
             keyvalues={"group_id": group_id, "user_id": user_id},
             updatevalues={
                 "valid_until_ms": attestation["valid_until_ms"],
-                "attestation_json": json.dumps(attestation),
+                "attestation_json": json_encoder.encode(attestation),
             },
             desc="update_remote_attestion",
         )
diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py
index baa7a5092a..686052bd83 100644
--- a/synapse/storage/databases/main/metrics.py
+++ b/synapse/storage/databases/main/metrics.py
@@ -15,8 +15,6 @@
 import typing
 from collections import Counter
 
-from twisted.internet import defer
-
 from synapse.metrics import BucketCollector
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore
@@ -69,8 +67,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
         res = await self.db_pool.runInteraction("read_forward_extremities", fetch)
         self._current_forward_extremities_amount = Counter([x[0] for x in res])
 
-    @defer.inlineCallbacks
-    def count_daily_messages(self):
+    async def count_daily_messages(self):
         """
         Returns an estimate of the number of messages sent in the last day.
 
@@ -88,11 +85,9 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
             (count,) = txn.fetchone()
             return count
 
-        ret = yield self.db_pool.runInteraction("count_messages", _count_messages)
-        return ret
+        return await self.db_pool.runInteraction("count_messages", _count_messages)
 
-    @defer.inlineCallbacks
-    def count_daily_sent_messages(self):
+    async def count_daily_sent_messages(self):
         def _count_messages(txn):
             # This is good enough as if you have silly characters in your own
             # hostname then thats your own fault.
@@ -109,13 +104,11 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
             (count,) = txn.fetchone()
             return count
 
-        ret = yield self.db_pool.runInteraction(
+        return await self.db_pool.runInteraction(
             "count_daily_sent_messages", _count_messages
         )
-        return ret
 
-    @defer.inlineCallbacks
-    def count_daily_active_rooms(self):
+    async def count_daily_active_rooms(self):
         def _count(txn):
             sql = """
                 SELECT COALESCE(COUNT(DISTINCT room_id), 0) FROM events
@@ -126,5 +119,4 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
             (count,) = txn.fetchone()
             return count
 
-        ret = yield self.db_pool.runInteraction("count_daily_active_rooms", _count)
-        return ret
+        return await self.db_pool.runInteraction("count_daily_active_rooms", _count)
diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py
index 02b01d9619..e71cdd2cb4 100644
--- a/synapse/storage/databases/main/monthly_active_users.py
+++ b/synapse/storage/databases/main/monthly_active_users.py
@@ -15,8 +15,6 @@
 import logging
 from typing import List
 
-from twisted.internet import defer
-
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.database import DatabasePool, make_in_list_sql_clause
 from synapse.util.caches.descriptors import cached
@@ -252,16 +250,12 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
             "reap_monthly_active_users", _reap_users, reserved_users
         )
 
-    @defer.inlineCallbacks
-    def upsert_monthly_active_user(self, user_id):
+    async def upsert_monthly_active_user(self, user_id: str) -> None:
         """Updates or inserts the user into the monthly active user table, which
         is used to track the current MAU usage of the server
 
         Args:
-            user_id (str): user to add/update
-
-        Returns:
-            Deferred
+            user_id: user to add/update
         """
         # Support user never to be included in MAU stats. Note I can't easily call this
         # from upsert_monthly_active_user_txn because then I need a _txn form of
@@ -271,11 +265,11 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
         # _initialise_reserved_users reasoning that it would be very strange to
         #  include a support user in this context.
 
-        is_support = yield self.is_support_user(user_id)
+        is_support = await self.is_support_user(user_id)
         if is_support:
             return
 
-        yield self.db_pool.runInteraction(
+        await self.db_pool.runInteraction(
             "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id
         )
 
@@ -322,8 +316,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
 
         return is_insert
 
-    @defer.inlineCallbacks
-    def populate_monthly_active_users(self, user_id):
+    async def populate_monthly_active_users(self, user_id):
         """Checks on the state of monthly active user limits and optionally
         add the user to the monthly active tables
 
@@ -332,14 +325,14 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
         """
         if self._limit_usage_by_mau or self._mau_stats_only:
             # Trial users and guests should not be included as part of MAU group
-            is_guest = yield self.is_guest(user_id)
+            is_guest = await self.is_guest(user_id)
             if is_guest:
                 return
-            is_trial = yield self.is_trial_user(user_id)
+            is_trial = await self.is_trial_user(user_id)
             if is_trial:
                 return
 
-            last_seen_timestamp = yield self.user_last_seen_monthly_active(user_id)
+            last_seen_timestamp = await self.user_last_seen_monthly_active(user_id)
             now = self.hs.get_clock().time_msec()
 
             # We want to reduce to the total number of db writes, and are happy
@@ -352,10 +345,10 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
                 # False, there is no point in checking get_monthly_active_count - it
                 # adds no value and will break the logic if max_mau_value is exceeded.
                 if not self._limit_usage_by_mau:
-                    yield self.upsert_monthly_active_user(user_id)
+                    await self.upsert_monthly_active_user(user_id)
                 else:
-                    count = yield self.get_monthly_active_count()
+                    count = await self.get_monthly_active_count()
                     if count < self._max_mau_value:
-                        yield self.upsert_monthly_active_user(user_id)
+                        await self.upsert_monthly_active_user(user_id)
             elif now - last_seen_timestamp > LAST_SEEN_GRANULARITY:
-                yield self.upsert_monthly_active_user(user_id)
+                await self.upsert_monthly_active_user(user_id)
diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py
index 99e66dc6e9..59ba12820a 100644
--- a/synapse/storage/databases/main/presence.py
+++ b/synapse/storage/databases/main/presence.py
@@ -15,8 +15,6 @@
 
 from typing import List, Tuple
 
-from twisted.internet import defer
-
 from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
 from synapse.storage.presence import UserPresenceState
 from synapse.util.caches.descriptors import cached, cachedList
@@ -24,14 +22,13 @@ from synapse.util.iterutils import batch_iter
 
 
 class PresenceStore(SQLBaseStore):
-    @defer.inlineCallbacks
-    def update_presence(self, presence_states):
+    async def update_presence(self, presence_states):
         stream_ordering_manager = self._presence_id_gen.get_next_mult(
             len(presence_states)
         )
 
         with stream_ordering_manager as stream_orderings:
-            yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 "update_presence",
                 self._update_presence_txn,
                 stream_orderings,
diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py
index 4a4f2cb385..b8261357d4 100644
--- a/synapse/storage/databases/main/profile.py
+++ b/synapse/storage/databases/main/profile.py
@@ -13,18 +13,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from twisted.internet import defer
-
 from synapse.api.errors import StoreError
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.databases.main.roommember import ProfileInfo
 
 
 class ProfileWorkerStore(SQLBaseStore):
-    @defer.inlineCallbacks
-    def get_profileinfo(self, user_localpart):
+    async def get_profileinfo(self, user_localpart):
         try:
-            profile = yield self.db_pool.simple_select_one(
+            profile = await self.db_pool.simple_select_one(
                 table="profiles",
                 keyvalues={"user_id": user_localpart},
                 retcols=("displayname", "avatar_url"),
@@ -118,14 +115,13 @@ class ProfileStore(ProfileWorkerStore):
             desc="update_remote_profile_cache",
         )
 
-    @defer.inlineCallbacks
-    def maybe_delete_remote_profile_cache(self, user_id):
+    async def maybe_delete_remote_profile_cache(self, user_id):
         """Check if we still care about the remote user's profile, and if we
         don't then remove their profile from the cache
         """
-        subscribed = yield self.is_subscribed_remote_profile_for_user(user_id)
+        subscribed = await self.is_subscribed_remote_profile_for_user(user_id)
         if not subscribed:
-            yield self.db_pool.simple_delete(
+            await self.db_pool.simple_delete(
                 table="remote_profile_cache",
                 keyvalues={"user_id": user_id},
                 desc="delete_remote_profile_cache",
@@ -151,11 +147,10 @@ class ProfileStore(ProfileWorkerStore):
             _get_remote_profile_cache_entries_that_expire_txn,
         )
 
-    @defer.inlineCallbacks
-    def is_subscribed_remote_profile_for_user(self, user_id):
+    async def is_subscribed_remote_profile_for_user(self, user_id):
         """Check whether we are interested in a remote user's profile.
         """
-        res = yield self.db_pool.simple_select_one_onecol(
+        res = await self.db_pool.simple_select_one_onecol(
             table="group_users",
             keyvalues={"user_id": user_id},
             retcol="user_id",
@@ -166,7 +161,7 @@ class ProfileStore(ProfileWorkerStore):
         if res:
             return True
 
-        res = yield self.db_pool.simple_select_one_onecol(
+        res = await self.db_pool.simple_select_one_onecol(
             table="group_invites",
             keyvalues={"user_id": user_id},
             retcol="user_id",
diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index 97cc12931d..6562db5c2b 100644
--- a/synapse/storage/databases/main/push_rule.py
+++ b/synapse/storage/databases/main/push_rule.py
@@ -18,8 +18,6 @@ import abc
 import logging
 from typing import List, Tuple, Union
 
-from canonicaljson import json
-
 from twisted.internet import defer
 
 from synapse.push.baserules import list_with_base_rules
@@ -33,13 +31,14 @@ from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
 from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
 from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
 from synapse.storage.util.id_generators import ChainedIdGenerator
+from synapse.util import json_encoder
 from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
 logger = logging.getLogger(__name__)
 
 
-def _load_rules(rawrules, enabled_map):
+def _load_rules(rawrules, enabled_map, use_new_defaults=False):
     ruleslist = []
     for rawrule in rawrules:
         rule = dict(rawrule)
@@ -49,7 +48,7 @@ def _load_rules(rawrules, enabled_map):
         ruleslist.append(rule)
 
     # We're going to be mutating this a lot, so do a deep copy
-    rules = list(list_with_base_rules(ruleslist))
+    rules = list(list_with_base_rules(ruleslist, use_new_defaults))
 
     for i, rule in enumerate(rules):
         rule_id = rule["rule_id"]
@@ -105,6 +104,8 @@ class PushRulesWorkerStore(
             prefilled_cache=push_rules_prefill,
         )
 
+        self._users_new_default_push_rules = hs.config.users_new_default_push_rules
+
     @abc.abstractmethod
     def get_max_push_rules_stream_id(self):
         """Get the position of the push rules stream.
@@ -134,7 +135,9 @@ class PushRulesWorkerStore(
 
         enabled_map = yield self.get_push_rules_enabled_for_user(user_id)
 
-        rules = _load_rules(rows, enabled_map)
+        use_new_defaults = user_id in self._users_new_default_push_rules
+
+        rules = _load_rules(rows, enabled_map, use_new_defaults)
 
         return rules
 
@@ -194,7 +197,11 @@ class PushRulesWorkerStore(
         enabled_map_by_user = yield self.bulk_get_push_rules_enabled(user_ids)
 
         for user_id, rules in results.items():
-            results[user_id] = _load_rules(rules, enabled_map_by_user.get(user_id, {}))
+            use_new_defaults = user_id in self._users_new_default_push_rules
+
+            results[user_id] = _load_rules(
+                rules, enabled_map_by_user.get(user_id, {}), use_new_defaults,
+            )
 
         return results
 
@@ -249,81 +256,6 @@ class PushRulesWorkerStore(
             ):
                 yield self.copy_push_rule_from_room_to_room(new_room_id, user_id, rule)
 
-    @defer.inlineCallbacks
-    def bulk_get_push_rules_for_room(self, event, context):
-        state_group = context.state_group
-        if not state_group:
-            # If state_group is None it means it has yet to be assigned a
-            # state group, i.e. we need to make sure that calls with a state_group
-            # of None don't hit previous cached calls with a None state_group.
-            # To do this we set the state_group to a new object as object() != object()
-            state_group = object()
-
-        current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
-        result = yield self._bulk_get_push_rules_for_room(
-            event.room_id, state_group, current_state_ids, event=event
-        )
-        return result
-
-    @cachedInlineCallbacks(num_args=2, cache_context=True)
-    def _bulk_get_push_rules_for_room(
-        self, room_id, state_group, current_state_ids, cache_context, event=None
-    ):
-        # We don't use `state_group`, its there so that we can cache based
-        # on it. However, its important that its never None, since two current_state's
-        # with a state_group of None are likely to be different.
-        # See bulk_get_push_rules_for_room for how we work around this.
-        assert state_group is not None
-
-        # We also will want to generate notifs for other people in the room so
-        # their unread countss are correct in the event stream, but to avoid
-        # generating them for bot / AS users etc, we only do so for people who've
-        # sent a read receipt into the room.
-
-        users_in_room = yield self._get_joined_users_from_context(
-            room_id,
-            state_group,
-            current_state_ids,
-            on_invalidate=cache_context.invalidate,
-            event=event,
-        )
-
-        # We ignore app service users for now. This is so that we don't fill
-        # up the `get_if_users_have_pushers` cache with AS entries that we
-        # know don't have pushers, nor even read receipts.
-        local_users_in_room = {
-            u
-            for u in users_in_room
-            if self.hs.is_mine_id(u)
-            and not self.get_if_app_services_interested_in_user(u)
-        }
-
-        # users in the room who have pushers need to get push rules run because
-        # that's how their pushers work
-        if_users_with_pushers = yield self.get_if_users_have_pushers(
-            local_users_in_room, on_invalidate=cache_context.invalidate
-        )
-        user_ids = {
-            uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher
-        }
-
-        users_with_receipts = yield self.get_users_with_read_receipts_in_room(
-            room_id, on_invalidate=cache_context.invalidate
-        )
-
-        # any users with pushers must be ours: they have pushers
-        for uid in users_with_receipts:
-            if uid in local_users_in_room:
-                user_ids.add(uid)
-
-        rules_by_user = yield self.bulk_get_push_rules(
-            user_ids, on_invalidate=cache_context.invalidate
-        )
-
-        rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None}
-
-        return rules_by_user
-
     @cachedList(
         cached_method_name="get_push_rules_enabled_for_user",
         list_name="user_ids",
@@ -411,8 +343,8 @@ class PushRuleStore(PushRulesWorkerStore):
         before=None,
         after=None,
     ):
-        conditions_json = json.dumps(conditions)
-        actions_json = json.dumps(actions)
+        conditions_json = json_encoder.encode(conditions)
+        actions_json = json_encoder.encode(actions)
         with self._push_rules_stream_id_gen.get_next() as ids:
             stream_id, event_stream_ordering = ids
             if before or after:
@@ -681,7 +613,7 @@ class PushRuleStore(PushRulesWorkerStore):
 
     @defer.inlineCallbacks
     def set_push_rule_actions(self, user_id, rule_id, actions, is_default_rule):
-        actions_json = json.dumps(actions)
+        actions_json = json_encoder.encode(actions)
 
         def set_push_rule_actions_txn(txn, stream_id, event_stream_ordering):
             if is_default_rule:
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index 6255977c92..1920a8a152 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -18,13 +18,12 @@ import abc
 import logging
 from typing import List, Tuple
 
-from canonicaljson import json
-
 from twisted.internet import defer
 
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
 from synapse.storage.database import DatabasePool
 from synapse.storage.util.id_generators import StreamIdGenerator
+from synapse.util import json_encoder
 from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
 from synapse.util.caches.stream_change_cache import StreamChangeCache
@@ -459,7 +458,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
             values={
                 "stream_id": stream_id,
                 "event_id": event_id,
-                "data": json.dumps(data),
+                "data": json_encoder.encode(data),
             },
             # receipts_linearized has a unique constraint on
             # (user_id, room_id, receipt_type), so no need to lock
@@ -585,7 +584,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
                 "room_id": room_id,
                 "receipt_type": receipt_type,
                 "user_id": user_id,
-                "event_ids": json.dumps(event_ids),
-                "data": json.dumps(data),
+                "event_ids": json_encoder.encode(event_ids),
+                "data": json_encoder.encode(data),
             },
         )
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index f618629e09..402ae25571 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -17,9 +17,8 @@
 
 import logging
 import re
-from typing import Optional
+from typing import Dict, List, Optional
 
-from twisted.internet import defer
 from twisted.internet.defer import Deferred
 
 from synapse.api.constants import UserTypes
@@ -30,7 +29,7 @@ from synapse.storage.database import DatabasePool
 from synapse.storage.types import Cursor
 from synapse.storage.util.sequence import build_sequence_generator
 from synapse.types import UserID
-from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
+from synapse.util.caches.descriptors import cached
 
 THIRTY_MINUTES_IN_MS = 30 * 60 * 1000
 
@@ -69,19 +68,15 @@ class RegistrationWorkerStore(SQLBaseStore):
             desc="get_user_by_id",
         )
 
-    @defer.inlineCallbacks
-    def is_trial_user(self, user_id):
+    async def is_trial_user(self, user_id: str) -> bool:
         """Checks if user is in the "trial" period, i.e. within the first
         N days of registration defined by `mau_trial_days` config
 
         Args:
-            user_id (str)
-
-        Returns:
-            Deferred[bool]
+            user_id: The user to check for trial status.
         """
 
-        info = yield self.get_user_by_id(user_id)
+        info = await self.get_user_by_id(user_id)
         if not info:
             return False
 
@@ -105,41 +100,42 @@ class RegistrationWorkerStore(SQLBaseStore):
             "get_user_by_access_token", self._query_for_auth, token
         )
 
-    @cachedInlineCallbacks()
-    def get_expiration_ts_for_user(self, user_id):
+    @cached()
+    async def get_expiration_ts_for_user(self, user_id: str) -> Optional[None]:
         """Get the expiration timestamp for the account bearing a given user ID.
 
         Args:
-            user_id (str): The ID of the user.
+            user_id: The ID of the user.
         Returns:
-            defer.Deferred: None, if the account has no expiration timestamp,
-                otherwise int representation of the timestamp (as a number of
-                milliseconds since epoch).
+            None, if the account has no expiration timestamp, otherwise int
+            representation of the timestamp (as a number of milliseconds since epoch).
         """
-        res = yield self.db_pool.simple_select_one_onecol(
+        return await self.db_pool.simple_select_one_onecol(
             table="account_validity",
             keyvalues={"user_id": user_id},
             retcol="expiration_ts_ms",
             allow_none=True,
             desc="get_expiration_ts_for_user",
         )
-        return res
 
-    @defer.inlineCallbacks
-    def set_account_validity_for_user(
-        self, user_id, expiration_ts, email_sent, renewal_token=None
-    ):
+    async def set_account_validity_for_user(
+        self,
+        user_id: str,
+        expiration_ts: int,
+        email_sent: bool,
+        renewal_token: Optional[str] = None,
+    ) -> None:
         """Updates the account validity properties of the given account, with the
         given values.
 
         Args:
-            user_id (str): ID of the account to update properties for.
-            expiration_ts (int): New expiration date, as a timestamp in milliseconds
+            user_id: ID of the account to update properties for.
+            expiration_ts: New expiration date, as a timestamp in milliseconds
                 since epoch.
-            email_sent (bool): True means a renewal email has been sent for this
-                account and there's no need to send another one for the current validity
+            email_sent: True means a renewal email has been sent for this account
+                and there's no need to send another one for the current validity
                 period.
-            renewal_token (str): Renewal token the user can use to extend the validity
+            renewal_token: Renewal token the user can use to extend the validity
                 of their account. Defaults to no token.
         """
 
@@ -158,75 +154,69 @@ class RegistrationWorkerStore(SQLBaseStore):
                 txn, self.get_expiration_ts_for_user, (user_id,)
             )
 
-        yield self.db_pool.runInteraction(
+        await self.db_pool.runInteraction(
             "set_account_validity_for_user", set_account_validity_for_user_txn
         )
 
-    @defer.inlineCallbacks
-    def set_renewal_token_for_user(self, user_id, renewal_token):
+    async def set_renewal_token_for_user(
+        self, user_id: str, renewal_token: str
+    ) -> None:
         """Defines a renewal token for a given user.
 
         Args:
-            user_id (str): ID of the user to set the renewal token for.
-            renewal_token (str): Random unique string that will be used to renew the
+            user_id: ID of the user to set the renewal token for.
+            renewal_token: Random unique string that will be used to renew the
                 user's account.
 
         Raises:
             StoreError: The provided token is already set for another user.
         """
-        yield self.db_pool.simple_update_one(
+        await self.db_pool.simple_update_one(
             table="account_validity",
             keyvalues={"user_id": user_id},
             updatevalues={"renewal_token": renewal_token},
             desc="set_renewal_token_for_user",
         )
 
-    @defer.inlineCallbacks
-    def get_user_from_renewal_token(self, renewal_token):
+    async def get_user_from_renewal_token(self, renewal_token: str) -> str:
         """Get a user ID from a renewal token.
 
         Args:
-            renewal_token (str): The renewal token to perform the lookup with.
+            renewal_token: The renewal token to perform the lookup with.
 
         Returns:
-            defer.Deferred[str]: The ID of the user to which the token belongs.
+            The ID of the user to which the token belongs.
         """
-        res = yield self.db_pool.simple_select_one_onecol(
+        return await self.db_pool.simple_select_one_onecol(
             table="account_validity",
             keyvalues={"renewal_token": renewal_token},
             retcol="user_id",
             desc="get_user_from_renewal_token",
         )
 
-        return res
-
-    @defer.inlineCallbacks
-    def get_renewal_token_for_user(self, user_id):
+    async def get_renewal_token_for_user(self, user_id: str) -> str:
         """Get the renewal token associated with a given user ID.
 
         Args:
-            user_id (str): The user ID to lookup a token for.
+            user_id: The user ID to lookup a token for.
 
         Returns:
-            defer.Deferred[str]: The renewal token associated with this user ID.
+            The renewal token associated with this user ID.
         """
-        res = yield self.db_pool.simple_select_one_onecol(
+        return await self.db_pool.simple_select_one_onecol(
             table="account_validity",
             keyvalues={"user_id": user_id},
             retcol="renewal_token",
             desc="get_renewal_token_for_user",
         )
 
-        return res
-
-    @defer.inlineCallbacks
-    def get_users_expiring_soon(self):
+    async def get_users_expiring_soon(self) -> List[Dict[str, int]]:
         """Selects users whose account will expire in the [now, now + renew_at] time
         window (see configuration for account_validity for information on what renew_at
         refers to).
 
         Returns:
-            Deferred: Resolves to a list[dict[user_id (str), expiration_ts_ms (int)]]
+            A list of dictionaries mapping user ID to expiration time (in milliseconds).
         """
 
         def select_users_txn(txn, now_ms, renew_at):
@@ -238,53 +228,49 @@ class RegistrationWorkerStore(SQLBaseStore):
             txn.execute(sql, values)
             return self.db_pool.cursor_to_dict(txn)
 
-        res = yield self.db_pool.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_users_expiring_soon",
             select_users_txn,
             self.clock.time_msec(),
             self.config.account_validity.renew_at,
         )
 
-        return res
-
-    @defer.inlineCallbacks
-    def set_renewal_mail_status(self, user_id, email_sent):
+    async def set_renewal_mail_status(self, user_id: str, email_sent: bool) -> None:
         """Sets or unsets the flag that indicates whether a renewal email has been sent
         to the user (and the user hasn't renewed their account yet).
 
         Args:
-            user_id (str): ID of the user to set/unset the flag for.
-            email_sent (bool): Flag which indicates whether a renewal email has been sent
+            user_id: ID of the user to set/unset the flag for.
+            email_sent: Flag which indicates whether a renewal email has been sent
                 to this user.
         """
-        yield self.db_pool.simple_update_one(
+        await self.db_pool.simple_update_one(
             table="account_validity",
             keyvalues={"user_id": user_id},
             updatevalues={"email_sent": email_sent},
             desc="set_renewal_mail_status",
         )
 
-    @defer.inlineCallbacks
-    def delete_account_validity_for_user(self, user_id):
+    async def delete_account_validity_for_user(self, user_id: str) -> None:
         """Deletes the entry for the given user in the account validity table, removing
         their expiration date and renewal token.
 
         Args:
-            user_id (str): ID of the user to remove from the account validity table.
+            user_id: ID of the user to remove from the account validity table.
         """
-        yield self.db_pool.simple_delete_one(
+        await self.db_pool.simple_delete_one(
             table="account_validity",
             keyvalues={"user_id": user_id},
             desc="delete_account_validity_for_user",
         )
 
-    async def is_server_admin(self, user):
+    async def is_server_admin(self, user: UserID) -> bool:
         """Determines if a user is an admin of this homeserver.
 
         Args:
-            user (UserID): user ID of the user to test
+            user: user ID of the user to test
 
-        Returns (bool):
+        Returns:
             true iff the user is a server admin, false otherwise.
         """
         res = await self.db_pool.simple_select_one_onecol(
@@ -332,32 +318,31 @@ class RegistrationWorkerStore(SQLBaseStore):
 
         return None
 
-    @cachedInlineCallbacks()
-    def is_real_user(self, user_id):
+    @cached()
+    async def is_real_user(self, user_id: str) -> bool:
         """Determines if the user is a real user, ie does not have a 'user_type'.
 
         Args:
-            user_id (str): user id to test
+            user_id: user id to test
 
         Returns:
-            Deferred[bool]: True if user 'user_type' is null or empty string
+            True if user 'user_type' is null or empty string
         """
-        res = yield self.db_pool.runInteraction(
+        return await self.db_pool.runInteraction(
             "is_real_user", self.is_real_user_txn, user_id
         )
-        return res
 
     @cached()
-    def is_support_user(self, user_id):
+    async def is_support_user(self, user_id: str) -> bool:
         """Determines if the user is of type UserTypes.SUPPORT
 
         Args:
-            user_id (str): user id to test
+            user_id: user id to test
 
         Returns:
-            Deferred[bool]: True if user is of type UserTypes.SUPPORT
+            True if user is of type UserTypes.SUPPORT
         """
-        return self.db_pool.runInteraction(
+        return await self.db_pool.runInteraction(
             "is_support_user", self.is_support_user_txn, user_id
         )
 
@@ -413,8 +398,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             desc="get_user_by_external_id",
         )
 
-    @defer.inlineCallbacks
-    def count_all_users(self):
+    async def count_all_users(self):
         """Counts all users registered on the homeserver."""
 
         def _count_users(txn):
@@ -424,8 +408,7 @@ class RegistrationWorkerStore(SQLBaseStore):
                 return rows[0]["users"]
             return 0
 
-        ret = yield self.db_pool.runInteraction("count_users", _count_users)
-        return ret
+        return await self.db_pool.runInteraction("count_users", _count_users)
 
     def count_daily_user_type(self):
         """
@@ -460,8 +443,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             "count_daily_user_type", _count_daily_user_type
         )
 
-    @defer.inlineCallbacks
-    def count_nonbridged_users(self):
+    async def count_nonbridged_users(self):
         def _count_users(txn):
             txn.execute(
                 """
@@ -472,11 +454,9 @@ class RegistrationWorkerStore(SQLBaseStore):
             (count,) = txn.fetchone()
             return count
 
-        ret = yield self.db_pool.runInteraction("count_users", _count_users)
-        return ret
+        return await self.db_pool.runInteraction("count_users", _count_users)
 
-    @defer.inlineCallbacks
-    def count_real_users(self):
+    async def count_real_users(self):
         """Counts all users without a special user_type registered on the homeserver."""
 
         def _count_users(txn):
@@ -486,8 +466,7 @@ class RegistrationWorkerStore(SQLBaseStore):
                 return rows[0]["users"]
             return 0
 
-        ret = yield self.db_pool.runInteraction("count_real_users", _count_users)
-        return ret
+        return await self.db_pool.runInteraction("count_real_users", _count_users)
 
     async def generate_user_id(self) -> str:
         """Generate a suitable localpart for a guest user
@@ -537,23 +516,20 @@ class RegistrationWorkerStore(SQLBaseStore):
             return ret["user_id"]
         return None
 
-    @defer.inlineCallbacks
-    def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
-        yield self.db_pool.simple_upsert(
+    async def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
+        await self.db_pool.simple_upsert(
             "user_threepids",
             {"medium": medium, "address": address},
             {"user_id": user_id, "validated_at": validated_at, "added_at": added_at},
         )
 
-    @defer.inlineCallbacks
-    def user_get_threepids(self, user_id):
-        ret = yield self.db_pool.simple_select_list(
+    async def user_get_threepids(self, user_id):
+        return await self.db_pool.simple_select_list(
             "user_threepids",
             {"user_id": user_id},
             ["medium", "address", "validated_at", "added_at"],
             "user_get_threepids",
         )
-        return ret
 
     def user_delete_threepid(self, user_id, medium, address):
         return self.db_pool.simple_delete(
@@ -668,18 +644,18 @@ class RegistrationWorkerStore(SQLBaseStore):
             desc="get_id_servers_user_bound",
         )
 
-    @cachedInlineCallbacks()
-    def get_user_deactivated_status(self, user_id):
+    @cached()
+    async def get_user_deactivated_status(self, user_id: str) -> bool:
         """Retrieve the value for the `deactivated` property for the provided user.
 
         Args:
-            user_id (str): The ID of the user to retrieve the status for.
+            user_id: The ID of the user to retrieve the status for.
 
         Returns:
-            defer.Deferred(bool): The requested value.
+            True if the user was deactivated, false if the user is still active.
         """
 
-        res = yield self.db_pool.simple_select_one_onecol(
+        res = await self.db_pool.simple_select_one_onecol(
             table="users",
             keyvalues={"name": user_id},
             retcol="deactivated",
@@ -818,8 +794,7 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
             "users_set_deactivated_flag", self._background_update_set_deactivated_flag
         )
 
-    @defer.inlineCallbacks
-    def _background_update_set_deactivated_flag(self, progress, batch_size):
+    async def _background_update_set_deactivated_flag(self, progress, batch_size):
         """Retrieves a list of all deactivated users and sets the 'deactivated' flag to 1
         for each of them.
         """
@@ -870,19 +845,18 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
             else:
                 return False, len(rows)
 
-        end, nb_processed = yield self.db_pool.runInteraction(
+        end, nb_processed = await self.db_pool.runInteraction(
             "users_set_deactivated_flag", _background_update_set_deactivated_flag_txn
         )
 
         if end:
-            yield self.db_pool.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 "users_set_deactivated_flag"
             )
 
         return nb_processed
 
-    @defer.inlineCallbacks
-    def _bg_user_threepids_grandfather(self, progress, batch_size):
+    async def _bg_user_threepids_grandfather(self, progress, batch_size):
         """We now track which identity servers a user binds their 3PID to, so
         we need to handle the case of existing bindings where we didn't track
         this.
@@ -903,11 +877,11 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
             txn.executemany(sql, [(id_server,) for id_server in id_servers])
 
         if id_servers:
-            yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 "_bg_user_threepids_grandfather", _bg_user_threepids_grandfather_txn
             )
 
-        yield self.db_pool.updates._end_background_update("user_threepids_grandfather")
+        await self.db_pool.updates._end_background_update("user_threepids_grandfather")
 
         return 1
 
@@ -937,23 +911,26 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
 
         hs.get_clock().looping_call(start_cull, THIRTY_MINUTES_IN_MS)
 
-    @defer.inlineCallbacks
-    def add_access_token_to_user(self, user_id, token, device_id, valid_until_ms):
+    async def add_access_token_to_user(
+        self,
+        user_id: str,
+        token: str,
+        device_id: Optional[str],
+        valid_until_ms: Optional[int],
+    ) -> None:
         """Adds an access token for the given user.
 
         Args:
-            user_id (str): The user ID.
-            token (str): The new access token to add.
-            device_id (str): ID of the device to associate with the access
-                token
-            valid_until_ms (int|None): when the token is valid until. None for
-                no expiry.
+            user_id: The user ID.
+            token: The new access token to add.
+            device_id: ID of the device to associate with the access token
+            valid_until_ms: when the token is valid until. None for no expiry.
         Raises:
             StoreError if there was a problem adding this.
         """
         next_id = self._access_tokens_id_gen.get_next()
 
-        yield self.db_pool.simple_insert(
+        await self.db_pool.simple_insert(
             "access_tokens",
             {
                 "id": next_id,
@@ -1097,7 +1074,6 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
             )
 
         self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
-        txn.call_after(self.is_guest.invalidate, (user_id,))
 
     def record_user_external_id(
         self, auth_provider: str, external_id: str, user_id: str
@@ -1241,9 +1217,9 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
 
         return self.db_pool.runInteraction("delete_access_token", f)
 
-    @cachedInlineCallbacks()
-    def is_guest(self, user_id):
-        res = yield self.db_pool.simple_select_one_onecol(
+    @cached()
+    async def is_guest(self, user_id: str) -> bool:
+        res = await self.db_pool.simple_select_one_onecol(
             table="users",
             keyvalues={"name": user_id},
             retcol="is_guest",
@@ -1481,16 +1457,17 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
             self.clock.time_msec(),
         )
 
-    @defer.inlineCallbacks
-    def set_user_deactivated_status(self, user_id, deactivated):
+    async def set_user_deactivated_status(
+        self, user_id: str, deactivated: bool
+    ) -> None:
         """Set the `deactivated` property for the provided user to the provided value.
 
         Args:
-            user_id (str): The ID of the user to set the status for.
-            deactivated (bool): The value to set for `deactivated`.
+            user_id: The ID of the user to set the status for.
+            deactivated: The value to set for `deactivated`.
         """
 
-        yield self.db_pool.runInteraction(
+        await self.db_pool.runInteraction(
             "set_user_deactivated_status",
             self.set_user_deactivated_status_txn,
             user_id,
@@ -1507,9 +1484,9 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
         self._invalidate_cache_and_stream(
             txn, self.get_user_deactivated_status, (user_id,)
         )
+        txn.call_after(self.is_guest.invalidate, (user_id,))
 
-    @defer.inlineCallbacks
-    def _set_expiration_date_when_missing(self):
+    async def _set_expiration_date_when_missing(self):
         """
         Retrieves the list of registered users that don't have an expiration date, and
         adds an expiration date for each of them.
@@ -1533,7 +1510,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
                         txn, user["name"], use_delta=True
                     )
 
-        yield self.db_pool.runInteraction(
+        await self.db_pool.runInteraction(
             "get_users_with_no_expiration_date",
             select_users_with_no_expiration_date_txn,
         )
diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index b81f1449b7..a9ceffc20e 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -14,10 +14,12 @@
 # limitations under the License.
 
 import logging
+from typing import Optional
 
 import attr
 
 from synapse.api.constants import RelationTypes
+from synapse.events import EventBase
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.databases.main.stream import generate_pagination_where_clause
 from synapse.storage.relations import (
@@ -25,7 +27,7 @@ from synapse.storage.relations import (
     PaginationChunk,
     RelationPaginationToken,
 )
-from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
+from synapse.util.caches.descriptors import cached
 
 logger = logging.getLogger(__name__)
 
@@ -227,18 +229,18 @@ class RelationsWorkerStore(SQLBaseStore):
             "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn
         )
 
-    @cachedInlineCallbacks()
-    def get_applicable_edit(self, event_id):
+    @cached()
+    async def get_applicable_edit(self, event_id: str) -> Optional[EventBase]:
         """Get the most recent edit (if any) that has happened for the given
         event.
 
         Correctly handles checking whether edits were allowed to happen.
 
         Args:
-            event_id (str): The original event ID
+            event_id: The original event ID
 
         Returns:
-            Deferred[EventBase|None]: Returns the most recent edit, if any.
+            The most recent edit, if any.
         """
 
         # We only allow edits for `m.room.message` events that have the same sender
@@ -268,15 +270,14 @@ class RelationsWorkerStore(SQLBaseStore):
             if row:
                 return row[0]
 
-        edit_id = yield self.db_pool.runInteraction(
+        edit_id = await self.db_pool.runInteraction(
             "get_applicable_edit", _get_applicable_edit_txn
         )
 
         if not edit_id:
-            return
+            return None
 
-        edit_event = yield self.get_event(edit_id, allow_none=True)
-        return edit_event
+        return await self.get_event(edit_id, allow_none=True)
 
     def has_user_annotated_event(self, parent_id, event_type, aggregation_key, sender):
         """Check if a user has already annotated an event with the same key
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index 7c5be251bd..b2fcfc9bfe 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -15,11 +15,13 @@
 # limitations under the License.
 
 import logging
-from typing import Iterable, List, Set
+from typing import TYPE_CHECKING, Awaitable, Iterable, List, Optional, Set
 
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, Membership
+from synapse.events import EventBase
+from synapse.events.snapshot import EventContext
 from synapse.metrics import LaterGauge
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import (
@@ -40,9 +42,12 @@ from synapse.storage.roommember import (
 from synapse.types import Collection, get_domain_from_id
 from synapse.util.async_helpers import Linearizer
 from synapse.util.caches import intern_string
-from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
+from synapse.util.caches.descriptors import _CacheContext, cached, cachedList
 from synapse.util.metrics import Measure
 
+if TYPE_CHECKING:
+    from synapse.state import _StateCacheEntry
+
 logger = logging.getLogger(__name__)
 
 
@@ -150,12 +155,12 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             )
 
     @cached(max_entries=100000, iterable=True)
-    def get_users_in_room(self, room_id):
+    def get_users_in_room(self, room_id: str):
         return self.db_pool.runInteraction(
             "get_users_in_room", self.get_users_in_room_txn, room_id
         )
 
-    def get_users_in_room_txn(self, txn, room_id):
+    def get_users_in_room_txn(self, txn, room_id: str) -> List[str]:
         # If we can assume current_state_events.membership is up to date
         # then we can avoid a join, which is a Very Good Thing given how
         # frequently this function gets called.
@@ -178,11 +183,11 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         return [r[0] for r in txn]
 
     @cached(max_entries=100000)
-    def get_room_summary(self, room_id):
+    def get_room_summary(self, room_id: str):
         """ Get the details of a room roughly suitable for use by the room
         summary extension to /sync. Useful when lazy loading room members.
         Args:
-            room_id (str): The room ID to query
+            room_id: The room ID to query
         Returns:
             Deferred[dict[str, MemberSummary]:
                 dict of membership states, pointing to a MemberSummary named tuple.
@@ -261,78 +266,59 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
         return self.db_pool.runInteraction("get_room_summary", _get_room_summary_txn)
 
-    def _get_user_counts_in_room_txn(self, txn, room_id):
-        """
-        Get the user count in a room by membership.
-
-        Args:
-            room_id (str)
-            membership (Membership)
-
-        Returns:
-            Deferred[int]
-        """
-        sql = """
-        SELECT m.membership, count(*) FROM room_memberships as m
-            INNER JOIN current_state_events as c USING(event_id)
-            WHERE c.type = 'm.room.member' AND c.room_id = ?
-            GROUP BY m.membership
-        """
-
-        txn.execute(sql, (room_id,))
-        return {row[0]: row[1] for row in txn}
-
     @cached()
-    def get_invited_rooms_for_local_user(self, user_id):
-        """ Get all the rooms the *local* user is invited to
+    def get_invited_rooms_for_local_user(self, user_id: str) -> Awaitable[RoomsForUser]:
+        """Get all the rooms the *local* user is invited to.
 
         Args:
-            user_id (str): The user ID.
+            user_id: The user ID.
+
         Returns:
-            A deferred list of RoomsForUser.
+            A awaitable list of RoomsForUser.
         """
 
         return self.get_rooms_for_local_user_where_membership_is(
             user_id, [Membership.INVITE]
         )
 
-    @defer.inlineCallbacks
-    def get_invite_for_local_user_in_room(self, user_id, room_id):
-        """Gets the invite for the given *local* user and room
+    async def get_invite_for_local_user_in_room(
+        self, user_id: str, room_id: str
+    ) -> Optional[RoomsForUser]:
+        """Gets the invite for the given *local* user and room.
 
         Args:
-            user_id (str)
-            room_id (str)
+            user_id: The user ID to find the invite of.
+            room_id: The room to user was invited to.
 
         Returns:
-            Deferred: Resolves to either a RoomsForUser or None if no invite was
-                found.
+            Either a RoomsForUser or None if no invite was found.
         """
-        invites = yield self.get_invited_rooms_for_local_user(user_id)
+        invites = await self.get_invited_rooms_for_local_user(user_id)
         for invite in invites:
             if invite.room_id == room_id:
                 return invite
         return None
 
-    @defer.inlineCallbacks
-    def get_rooms_for_local_user_where_membership_is(self, user_id, membership_list):
-        """ Get all the rooms for this *local* user where the membership for this user
+    async def get_rooms_for_local_user_where_membership_is(
+        self, user_id: str, membership_list: List[str]
+    ) -> Optional[List[RoomsForUser]]:
+        """Get all the rooms for this *local* user where the membership for this user
         matches one in the membership list.
 
         Filters out forgotten rooms.
 
         Args:
-            user_id (str): The user ID.
-            membership_list (list): A list of synapse.api.constants.Membership
-            values which the user must be in.
+            user_id: The user ID.
+            membership_list: A list of synapse.api.constants.Membership
+                values which the user must be in.
 
         Returns:
-            Deferred[list[RoomsForUser]]
+            The RoomsForUser that the user matches the membership types.
         """
         if not membership_list:
-            return defer.succeed(None)
+            return None
 
-        rooms = yield self.db_pool.runInteraction(
+        rooms = await self.db_pool.runInteraction(
             "get_rooms_for_local_user_where_membership_is",
             self._get_rooms_for_local_user_where_membership_is_txn,
             user_id,
@@ -340,12 +326,12 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         )
 
         # Now we filter out forgotten rooms
-        forgotten_rooms = yield self.get_forgotten_rooms_for_user(user_id)
+        forgotten_rooms = await self.get_forgotten_rooms_for_user(user_id)
         return [room for room in rooms if room.room_id not in forgotten_rooms]
 
     def _get_rooms_for_local_user_where_membership_is_txn(
-        self, txn, user_id, membership_list
-    ):
+        self, txn, user_id: str, membership_list: List[str]
+    ) -> List[RoomsForUser]:
         # Paranoia check.
         if not self.hs.is_mine_id(user_id):
             raise Exception(
@@ -374,14 +360,14 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         return results
 
     @cached(max_entries=500000, iterable=True)
-    def get_rooms_for_user_with_stream_ordering(self, user_id):
+    def get_rooms_for_user_with_stream_ordering(self, user_id: str):
         """Returns a set of room_ids the user is currently joined to.
 
         If a remote user only returns rooms this server is currently
         participating in.
 
         Args:
-            user_id (str)
+            user_id
 
         Returns:
             Deferred[frozenset[GetRoomsForUserWithStreamOrdering]]: Returns
@@ -394,7 +380,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             user_id,
         )
 
-    def _get_rooms_for_user_with_stream_ordering_txn(self, txn, user_id):
+    def _get_rooms_for_user_with_stream_ordering_txn(self, txn, user_id: str):
         # We use `current_state_events` here and not `local_current_membership`
         # as a) this gets called with remote users and b) this only gets called
         # for rooms the server is participating in.
@@ -458,37 +444,39 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             _get_users_server_still_shares_room_with_txn,
         )
 
-    @defer.inlineCallbacks
-    def get_rooms_for_user(self, user_id, on_invalidate=None):
+    async def get_rooms_for_user(self, user_id: str, on_invalidate=None):
         """Returns a set of room_ids the user is currently joined to.
 
         If a remote user only returns rooms this server is currently
         participating in.
         """
-        rooms = yield self.get_rooms_for_user_with_stream_ordering(
+        rooms = await self.get_rooms_for_user_with_stream_ordering(
             user_id, on_invalidate=on_invalidate
         )
         return frozenset(r.room_id for r in rooms)
 
-    @cachedInlineCallbacks(max_entries=500000, cache_context=True, iterable=True)
-    def get_users_who_share_room_with_user(self, user_id, cache_context):
+    @cached(max_entries=500000, cache_context=True, iterable=True)
+    async def get_users_who_share_room_with_user(
+        self, user_id: str, cache_context: _CacheContext
+    ) -> Set[str]:
         """Returns the set of users who share a room with `user_id`
         """
-        room_ids = yield self.get_rooms_for_user(
+        room_ids = await self.get_rooms_for_user(
             user_id, on_invalidate=cache_context.invalidate
         )
 
         user_who_share_room = set()
         for room_id in room_ids:
-            user_ids = yield self.get_users_in_room(
+            user_ids = await self.get_users_in_room(
                 room_id, on_invalidate=cache_context.invalidate
             )
             user_who_share_room.update(user_ids)
 
         return user_who_share_room
 
-    @defer.inlineCallbacks
-    def get_joined_users_from_context(self, event, context):
+    async def get_joined_users_from_context(
+        self, event: EventBase, context: EventContext
+    ):
         state_group = context.state_group
         if not state_group:
             # If state_group is None it means it has yet to be assigned a
@@ -497,14 +485,12 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             # To do this we set the state_group to a new object as object() != object()
             state_group = object()
 
-        current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids())
-        result = yield self._get_joined_users_from_context(
+        current_state_ids = await context.get_current_state_ids()
+        return await self._get_joined_users_from_context(
             event.room_id, state_group, current_state_ids, event=event, context=context
         )
-        return result
 
-    @defer.inlineCallbacks
-    def get_joined_users_from_state(self, room_id, state_entry):
+    async def get_joined_users_from_state(self, room_id, state_entry):
         state_group = state_entry.state_group
         if not state_group:
             # If state_group is None it means it has yet to be assigned a
@@ -514,16 +500,12 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             state_group = object()
 
         with Measure(self._clock, "get_joined_users_from_state"):
-            return (
-                yield self._get_joined_users_from_context(
-                    room_id, state_group, state_entry.state, context=state_entry
-                )
+            return await self._get_joined_users_from_context(
+                room_id, state_group, state_entry.state, context=state_entry
             )
 
-    @cachedInlineCallbacks(
-        num_args=2, cache_context=True, iterable=True, max_entries=100000
-    )
-    def _get_joined_users_from_context(
+    @cached(num_args=2, cache_context=True, iterable=True, max_entries=100000)
+    async def _get_joined_users_from_context(
         self,
         room_id,
         state_group,
@@ -535,7 +517,6 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         # We don't use `state_group`, it's there so that we can cache based
         # on it. However, it's important that it's never None, since two current_states
         # with a state_group of None are likely to be different.
-        # See bulk_get_push_rules_for_room for how we work around this.
         assert state_group is not None
 
         users_in_room = {}
@@ -588,7 +569,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
                 missing_member_event_ids.append(event_id)
 
         if missing_member_event_ids:
-            event_to_memberships = yield self._get_joined_profiles_from_event_ids(
+            event_to_memberships = await self._get_joined_profiles_from_event_ids(
                 missing_member_event_ids
             )
             users_in_room.update((row for row in event_to_memberships.values() if row))
@@ -612,12 +593,12 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         list_name="event_ids",
         inlineCallbacks=True,
     )
-    def _get_joined_profiles_from_event_ids(self, event_ids):
+    def _get_joined_profiles_from_event_ids(self, event_ids: Iterable[str]):
         """For given set of member event_ids check if they point to a join
         event and if so return the associated user and profile info.
 
         Args:
-            event_ids (Iterable[str]): The member event IDs to lookup
+            event_ids: The member event IDs to lookup
 
         Returns:
             Deferred[dict[str, Tuple[str, ProfileInfo]|None]]: Map from event ID
@@ -644,8 +625,8 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             for row in rows
         }
 
-    @cachedInlineCallbacks(max_entries=10000)
-    def is_host_joined(self, room_id, host):
+    @cached(max_entries=10000)
+    async def is_host_joined(self, room_id: str, host: str) -> bool:
         if "%" in host or "_" in host:
             raise Exception("Invalid host name")
 
@@ -664,7 +645,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         # the returned user actually has the correct domain.
         like_clause = "%:" + host
 
-        rows = yield self.db_pool.execute(
+        rows = await self.db_pool.execute(
             "is_host_joined", None, sql, room_id, like_clause
         )
 
@@ -678,50 +659,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
         return True
 
-    @cachedInlineCallbacks()
-    def was_host_joined(self, room_id, host):
-        """Check whether the server is or ever was in the room.
-
-        Args:
-            room_id (str)
-            host (str)
-
-        Returns:
-            Deferred: Resolves to True if the host is/was in the room, otherwise
-            False.
-        """
-        if "%" in host or "_" in host:
-            raise Exception("Invalid host name")
-
-        sql = """
-            SELECT user_id FROM room_memberships
-            WHERE room_id = ?
-                AND user_id LIKE ?
-                AND membership = 'join'
-            LIMIT 1
-        """
-
-        # We do need to be careful to ensure that host doesn't have any wild cards
-        # in it, but we checked above for known ones and we'll check below that
-        # the returned user actually has the correct domain.
-        like_clause = "%:" + host
-
-        rows = yield self.db_pool.execute(
-            "was_host_joined", None, sql, room_id, like_clause
-        )
-
-        if not rows:
-            return False
-
-        user_id = rows[0][0]
-        if get_domain_from_id(user_id) != host:
-            # This can only happen if the host name has something funky in it
-            raise Exception("Invalid host name")
-
-        return True
-
-    @defer.inlineCallbacks
-    def get_joined_hosts(self, room_id, state_entry):
+    async def get_joined_hosts(self, room_id: str, state_entry):
         state_group = state_entry.state_group
         if not state_group:
             # If state_group is None it means it has yet to be assigned a
@@ -731,32 +669,28 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             state_group = object()
 
         with Measure(self._clock, "get_joined_hosts"):
-            return (
-                yield self._get_joined_hosts(
-                    room_id, state_group, state_entry.state, state_entry=state_entry
-                )
+            return await self._get_joined_hosts(
+                room_id, state_group, state_entry.state, state_entry=state_entry
             )
 
-    @cachedInlineCallbacks(num_args=2, max_entries=10000, iterable=True)
-    # @defer.inlineCallbacks
-    def _get_joined_hosts(self, room_id, state_group, current_state_ids, state_entry):
+    @cached(num_args=2, max_entries=10000, iterable=True)
+    async def _get_joined_hosts(
+        self, room_id, state_group, current_state_ids, state_entry
+    ):
         # We don't use `state_group`, its there so that we can cache based
         # on it. However, its important that its never None, since two current_state's
         # with a state_group of None are likely to be different.
-        # See bulk_get_push_rules_for_room for how we work around this.
         assert state_group is not None
 
-        cache = yield self._get_joined_hosts_cache(room_id)
-        joined_hosts = yield cache.get_destinations(state_entry)
-
-        return joined_hosts
+        cache = await self._get_joined_hosts_cache(room_id)
+        return await cache.get_destinations(state_entry)
 
     @cached(max_entries=10000)
-    def _get_joined_hosts_cache(self, room_id):
+    def _get_joined_hosts_cache(self, room_id: str) -> "_JoinedHostsCache":
         return _JoinedHostsCache(self, room_id)
 
-    @cachedInlineCallbacks(num_args=2)
-    def did_forget(self, user_id, room_id):
+    @cached(num_args=2)
+    async def did_forget(self, user_id: str, room_id: str) -> bool:
         """Returns whether user_id has elected to discard history for room_id.
 
         Returns False if they have since re-joined."""
@@ -778,15 +712,15 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             rows = txn.fetchall()
             return rows[0][0]
 
-        count = yield self.db_pool.runInteraction("did_forget_membership", f)
+        count = await self.db_pool.runInteraction("did_forget_membership", f)
         return count == 0
 
     @cached()
-    def get_forgotten_rooms_for_user(self, user_id):
+    def get_forgotten_rooms_for_user(self, user_id: str):
         """Gets all rooms the user has forgotten.
 
         Args:
-            user_id (str)
+            user_id
 
         Returns:
             Deferred[set[str]]
@@ -819,18 +753,17 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             "get_forgotten_rooms_for_user", _get_forgotten_rooms_for_user_txn
         )
 
-    @defer.inlineCallbacks
-    def get_rooms_user_has_been_in(self, user_id):
+    async def get_rooms_user_has_been_in(self, user_id: str) -> Set[str]:
         """Get all rooms that the user has ever been in.
 
         Args:
-            user_id (str)
+            user_id: The user ID to get the rooms of.
 
         Returns:
-            Deferred[set[str]]: Set of room IDs.
+            Set of room IDs.
         """
 
-        room_ids = yield self.db_pool.simple_select_onecol(
+        room_ids = await self.db_pool.simple_select_onecol(
             table="room_memberships",
             keyvalues={"membership": Membership.JOIN, "user_id": user_id},
             retcol="room_id",
@@ -905,8 +838,7 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
             where_clause="forgotten = 1",
         )
 
-    @defer.inlineCallbacks
-    def _background_add_membership_profile(self, progress, batch_size):
+    async def _background_add_membership_profile(self, progress, batch_size):
         target_min_stream_id = progress.get(
             "target_min_stream_id_inclusive", self._min_stream_order_on_start
         )
@@ -971,19 +903,18 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
 
             return len(rows)
 
-        result = yield self.db_pool.runInteraction(
+        result = await self.db_pool.runInteraction(
             _MEMBERSHIP_PROFILE_UPDATE_NAME, add_membership_profile_txn
         )
 
         if not result:
-            yield self.db_pool.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 _MEMBERSHIP_PROFILE_UPDATE_NAME
             )
 
         return result
 
-    @defer.inlineCallbacks
-    def _background_current_state_membership(self, progress, batch_size):
+    async def _background_current_state_membership(self, progress, batch_size):
         """Update the new membership column on current_state_events.
 
         This works by iterating over all rooms in alphebetical order.
@@ -1029,14 +960,14 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
         # string, which will compare before all room IDs correctly.
         last_processed_room = progress.get("last_processed_room", "")
 
-        row_count, finished = yield self.db_pool.runInteraction(
+        row_count, finished = await self.db_pool.runInteraction(
             "_background_current_state_membership_update",
             _background_current_state_membership_txn,
             last_processed_room,
         )
 
         if finished:
-            yield self.db_pool.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME
             )
 
@@ -1047,7 +978,7 @@ class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore):
     def __init__(self, database: DatabasePool, db_conn, hs):
         super(RoomMemberStore, self).__init__(database, db_conn, hs)
 
-    def forget(self, user_id, room_id):
+    def forget(self, user_id: str, room_id: str):
         """Indicate that user_id wishes to discard history for room_id."""
 
         def f(txn):
@@ -1088,17 +1019,19 @@ class _JoinedHostsCache(object):
 
         self._len = 0
 
-    @defer.inlineCallbacks
-    def get_destinations(self, state_entry):
+    async def get_destinations(self, state_entry: "_StateCacheEntry") -> Set[str]:
         """Get set of destinations for a state entry
 
         Args:
-            state_entry(synapse.state._StateCacheEntry)
+            state_entry
+
+        Returns:
+            The destinations as a set.
         """
         if state_entry.state_group == self.state_group:
             return frozenset(self.hosts_to_joined_users)
 
-        with (yield self.linearizer.queue(())):
+        with (await self.linearizer.queue(())):
             if state_entry.state_group == self.state_group:
                 pass
             elif state_entry.prev_group == self.state_group:
@@ -1110,7 +1043,7 @@ class _JoinedHostsCache(object):
                     user_id = state_key
                     known_joins = self.hosts_to_joined_users.setdefault(host, set())
 
-                    event = yield self.store.get_event(event_id)
+                    event = await self.store.get_event(event_id)
                     if event.membership == Membership.JOIN:
                         known_joins.add(user_id)
                     else:
@@ -1119,7 +1052,7 @@ class _JoinedHostsCache(object):
                         if not known_joins:
                             self.hosts_to_joined_users.pop(host, None)
             else:
-                joined_users = yield self.store.get_joined_users_from_state(
+                joined_users = await self.store.get_joined_users_from_state(
                     self.room_id, state_entry
                 )
 
diff --git a/synapse/storage/databases/main/schema/delta/58/12unread_messages.sql b/synapse/storage/databases/main/schema/delta/58/12unread_messages.sql
deleted file mode 100644
index 531b532c73..0000000000
--- a/synapse/storage/databases/main/schema/delta/58/12unread_messages.sql
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Copyright 2020 The Matrix.org Foundation C.I.C
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
--- Store a boolean value in the events table for whether the event should be counted in
--- the unread_count property of sync responses.
-ALTER TABLE events ADD COLUMN count_as_unread BOOLEAN;
diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index 2162d0712d..7f8d1880e5 100644
--- a/synapse/storage/databases/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -16,8 +16,7 @@
 import logging
 import re
 from collections import namedtuple
-
-from twisted.internet import defer
+from typing import List, Optional
 
 from synapse.api.errors import SynapseError
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
@@ -114,8 +113,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
             self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME, self._background_reindex_gin_search
         )
 
-    @defer.inlineCallbacks
-    def _background_reindex_search(self, progress, batch_size):
+    async def _background_reindex_search(self, progress, batch_size):
         # we work through the events table from highest stream id to lowest
         target_min_stream_id = progress["target_min_stream_id_inclusive"]
         max_stream_id = progress["max_stream_id_exclusive"]
@@ -206,19 +204,18 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
 
             return len(event_search_rows)
 
-        result = yield self.db_pool.runInteraction(
+        result = await self.db_pool.runInteraction(
             self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn
         )
 
         if not result:
-            yield self.db_pool.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 self.EVENT_SEARCH_UPDATE_NAME
             )
 
         return result
 
-    @defer.inlineCallbacks
-    def _background_reindex_gin_search(self, progress, batch_size):
+    async def _background_reindex_gin_search(self, progress, batch_size):
         """This handles old synapses which used GIST indexes, if any;
         converting them back to be GIN as per the actual schema.
         """
@@ -255,15 +252,14 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
                 conn.set_session(autocommit=False)
 
         if isinstance(self.database_engine, PostgresEngine):
-            yield self.db_pool.runWithConnection(create_index)
+            await self.db_pool.runWithConnection(create_index)
 
-        yield self.db_pool.updates._end_background_update(
+        await self.db_pool.updates._end_background_update(
             self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME
         )
         return 1
 
-    @defer.inlineCallbacks
-    def _background_reindex_search_order(self, progress, batch_size):
+    async def _background_reindex_search_order(self, progress, batch_size):
         target_min_stream_id = progress["target_min_stream_id_inclusive"]
         max_stream_id = progress["max_stream_id_exclusive"]
         rows_inserted = progress.get("rows_inserted", 0)
@@ -288,12 +284,12 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
                 )
                 conn.set_session(autocommit=False)
 
-            yield self.db_pool.runWithConnection(create_index)
+            await self.db_pool.runWithConnection(create_index)
 
             pg = dict(progress)
             pg["have_added_indexes"] = True
 
-            yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 self.EVENT_SEARCH_ORDER_UPDATE_NAME,
                 self.db_pool.updates._background_update_progress_txn,
                 self.EVENT_SEARCH_ORDER_UPDATE_NAME,
@@ -331,12 +327,12 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
 
             return len(rows), True
 
-        num_rows, finished = yield self.db_pool.runInteraction(
+        num_rows, finished = await self.db_pool.runInteraction(
             self.EVENT_SEARCH_ORDER_UPDATE_NAME, reindex_search_txn
         )
 
         if not finished:
-            yield self.db_pool.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 self.EVENT_SEARCH_ORDER_UPDATE_NAME
             )
 
@@ -347,8 +343,7 @@ class SearchStore(SearchBackgroundUpdateStore):
     def __init__(self, database: DatabasePool, db_conn, hs):
         super(SearchStore, self).__init__(database, db_conn, hs)
 
-    @defer.inlineCallbacks
-    def search_msgs(self, room_ids, search_term, keys):
+    async def search_msgs(self, room_ids, search_term, keys):
         """Performs a full text search over events with given keys.
 
         Args:
@@ -425,7 +420,7 @@ class SearchStore(SearchBackgroundUpdateStore):
         # entire table from the database.
         sql += " ORDER BY rank DESC LIMIT 500"
 
-        results = yield self.db_pool.execute(
+        results = await self.db_pool.execute(
             "search_msgs", self.db_pool.cursor_to_dict, sql, *args
         )
 
@@ -433,7 +428,7 @@ class SearchStore(SearchBackgroundUpdateStore):
 
         # We set redact_behaviour to BLOCK here to prevent redacted events being returned in
         # search results (which is a data leak)
-        events = yield self.get_events_as_list(
+        events = await self.get_events_as_list(
             [r["event_id"] for r in results],
             redact_behaviour=EventRedactBehaviour.BLOCK,
         )
@@ -442,11 +437,11 @@ class SearchStore(SearchBackgroundUpdateStore):
 
         highlights = None
         if isinstance(self.database_engine, PostgresEngine):
-            highlights = yield self._find_highlights_in_postgres(search_query, events)
+            highlights = await self._find_highlights_in_postgres(search_query, events)
 
         count_sql += " GROUP BY room_id"
 
-        count_results = yield self.db_pool.execute(
+        count_results = await self.db_pool.execute(
             "search_rooms_count", self.db_pool.cursor_to_dict, count_sql, *count_args
         )
 
@@ -462,19 +457,25 @@ class SearchStore(SearchBackgroundUpdateStore):
             "count": count,
         }
 
-    @defer.inlineCallbacks
-    def search_rooms(self, room_ids, search_term, keys, limit, pagination_token=None):
+    async def search_rooms(
+        self,
+        room_ids: List[str],
+        search_term: str,
+        keys: List[str],
+        limit,
+        pagination_token: Optional[str] = None,
+    ) -> List[dict]:
         """Performs a full text search over events with given keys.
 
         Args:
-            room_id (list): The room_ids to search in
-            search_term (str): Search term to search for
-            keys (list): List of keys to search in, currently supports
-                "content.body", "content.name", "content.topic"
-            pagination_token (str): A pagination token previously returned
+            room_ids: The room_ids to search in
+            search_term: Search term to search for
+            keys: List of keys to search in, currently supports "content.body",
+                "content.name", "content.topic"
+            pagination_token: A pagination token previously returned
 
         Returns:
-            list of dicts
+            Each match as a dictionary.
         """
         clauses = []
 
@@ -577,7 +578,7 @@ class SearchStore(SearchBackgroundUpdateStore):
 
         args.append(limit)
 
-        results = yield self.db_pool.execute(
+        results = await self.db_pool.execute(
             "search_rooms", self.db_pool.cursor_to_dict, sql, *args
         )
 
@@ -585,7 +586,7 @@ class SearchStore(SearchBackgroundUpdateStore):
 
         # We set redact_behaviour to BLOCK here to prevent redacted events being returned in
         # search results (which is a data leak)
-        events = yield self.get_events_as_list(
+        events = await self.get_events_as_list(
             [r["event_id"] for r in results],
             redact_behaviour=EventRedactBehaviour.BLOCK,
         )
@@ -594,11 +595,11 @@ class SearchStore(SearchBackgroundUpdateStore):
 
         highlights = None
         if isinstance(self.database_engine, PostgresEngine):
-            highlights = yield self._find_highlights_in_postgres(search_query, events)
+            highlights = await self._find_highlights_in_postgres(search_query, events)
 
         count_sql += " GROUP BY room_id"
 
-        count_results = yield self.db_pool.execute(
+        count_results = await self.db_pool.execute(
             "search_rooms_count", self.db_pool.cursor_to_dict, count_sql, *count_args
         )
 
diff --git a/synapse/storage/databases/main/signatures.py b/synapse/storage/databases/main/signatures.py
index dae8e8bd29..be191dd870 100644
--- a/synapse/storage/databases/main/signatures.py
+++ b/synapse/storage/databases/main/signatures.py
@@ -15,8 +15,6 @@
 
 from unpaddedbase64 import encode_base64
 
-from twisted.internet import defer
-
 from synapse.storage._base import SQLBaseStore
 from synapse.util.caches.descriptors import cached, cachedList
 
@@ -40,9 +38,8 @@ class SignatureWorkerStore(SQLBaseStore):
 
         return self.db_pool.runInteraction("get_event_reference_hashes", f)
 
-    @defer.inlineCallbacks
-    def add_event_hashes(self, event_ids):
-        hashes = yield self.get_event_reference_hashes(event_ids)
+    async def add_event_hashes(self, event_ids):
+        hashes = await self.get_event_reference_hashes(event_ids)
         hashes = {
             e_id: {k: encode_base64(v) for k, v in h.items() if k == "sha256"}
             for e_id, h in hashes.items()
diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py
index eedd2d96c3..e4e0a0c433 100644
--- a/synapse/storage/databases/main/tags.py
+++ b/synapse/storage/databases/main/tags.py
@@ -15,14 +15,13 @@
 # limitations under the License.
 
 import logging
-from typing import List, Tuple
+from typing import Dict, List, Tuple
 
 from canonicaljson import json
 
-from twisted.internet import defer
-
 from synapse.storage._base import db_to_json
 from synapse.storage.databases.main.account_data import AccountDataWorkerStore
+from synapse.types import JsonDict
 from synapse.util.caches.descriptors import cached
 
 logger = logging.getLogger(__name__)
@@ -30,30 +29,26 @@ logger = logging.getLogger(__name__)
 
 class TagsWorkerStore(AccountDataWorkerStore):
     @cached()
-    def get_tags_for_user(self, user_id):
+    async def get_tags_for_user(self, user_id: str) -> Dict[str, Dict[str, JsonDict]]:
         """Get all the tags for a user.
 
 
         Args:
-            user_id(str): The user to get the tags for.
+            user_id: The user to get the tags for.
         Returns:
-            A deferred dict mapping from room_id strings to dicts mapping from
-            tag strings to tag content.
+            A mapping from room_id strings to dicts mapping from tag strings to
+            tag content.
         """
 
-        deferred = self.db_pool.simple_select_list(
+        rows = await self.db_pool.simple_select_list(
             "room_tags", {"user_id": user_id}, ["room_id", "tag", "content"]
         )
 
-        @deferred.addCallback
-        def tags_by_room(rows):
-            tags_by_room = {}
-            for row in rows:
-                room_tags = tags_by_room.setdefault(row["room_id"], {})
-                room_tags[row["tag"]] = db_to_json(row["content"])
-            return tags_by_room
-
-        return deferred
+        tags_by_room = {}
+        for row in rows:
+            room_tags = tags_by_room.setdefault(row["room_id"], {})
+            room_tags[row["tag"]] = db_to_json(row["content"])
+        return tags_by_room
 
     async def get_all_updated_tags(
         self, instance_name: str, last_id: int, current_id: int, limit: int
@@ -127,17 +122,19 @@ class TagsWorkerStore(AccountDataWorkerStore):
 
         return results, upto_token, limited
 
-    @defer.inlineCallbacks
-    def get_updated_tags(self, user_id, stream_id):
+    async def get_updated_tags(
+        self, user_id: str, stream_id: int
+    ) -> Dict[str, List[str]]:
         """Get all the tags for the rooms where the tags have changed since the
         given version
 
         Args:
             user_id(str): The user to get the tags for.
             stream_id(int): The earliest update to get for the user.
+
         Returns:
-            A deferred dict mapping from room_id strings to lists of tag
-            strings for all the rooms that changed since the stream_id token.
+            A mapping from room_id strings to lists of tag strings for all the
+            rooms that changed since the stream_id token.
         """
 
         def get_updated_tags_txn(txn):
@@ -155,47 +152,53 @@ class TagsWorkerStore(AccountDataWorkerStore):
         if not changed:
             return {}
 
-        room_ids = yield self.db_pool.runInteraction(
+        room_ids = await self.db_pool.runInteraction(
             "get_updated_tags", get_updated_tags_txn
         )
 
         results = {}
         if room_ids:
-            tags_by_room = yield self.get_tags_for_user(user_id)
+            tags_by_room = await self.get_tags_for_user(user_id)
             for room_id in room_ids:
                 results[room_id] = tags_by_room.get(room_id, {})
 
         return results
 
-    def get_tags_for_room(self, user_id, room_id):
+    async def get_tags_for_room(
+        self, user_id: str, room_id: str
+    ) -> Dict[str, JsonDict]:
         """Get all the tags for the given room
+
         Args:
-            user_id(str): The user to get tags for
-            room_id(str): The room to get tags for
+            user_id: The user to get tags for
+            room_id: The room to get tags for
+
         Returns:
-            A deferred list of string tags.
+            A mapping of tags to tag content.
         """
-        return self.db_pool.simple_select_list(
+        rows = await self.db_pool.simple_select_list(
             table="room_tags",
             keyvalues={"user_id": user_id, "room_id": room_id},
             retcols=("tag", "content"),
             desc="get_tags_for_room",
-        ).addCallback(
-            lambda rows: {row["tag"]: db_to_json(row["content"]) for row in rows}
         )
+        return {row["tag"]: db_to_json(row["content"]) for row in rows}
 
 
 class TagsStore(TagsWorkerStore):
-    @defer.inlineCallbacks
-    def add_tag_to_room(self, user_id, room_id, tag, content):
+    async def add_tag_to_room(
+        self, user_id: str, room_id: str, tag: str, content: JsonDict
+    ) -> int:
         """Add a tag to a room for a user.
+
         Args:
-            user_id(str): The user to add a tag for.
-            room_id(str): The room to add a tag for.
-            tag(str): The tag name to add.
-            content(dict): A json object to associate with the tag.
+            user_id: The user to add a tag for.
+            room_id: The room to add a tag for.
+            tag: The tag name to add.
+            content: A json object to associate with the tag.
+
         Returns:
-            A deferred that completes once the tag has been added.
+            The next account data ID.
         """
         content_json = json.dumps(content)
 
@@ -209,18 +212,17 @@ class TagsStore(TagsWorkerStore):
             self._update_revision_txn(txn, user_id, room_id, next_id)
 
         with self._account_data_id_gen.get_next() as next_id:
-            yield self.db_pool.runInteraction("add_tag", add_tag_txn, next_id)
+            await self.db_pool.runInteraction("add_tag", add_tag_txn, next_id)
 
         self.get_tags_for_user.invalidate((user_id,))
 
-        result = self._account_data_id_gen.get_current_token()
-        return result
+        return self._account_data_id_gen.get_current_token()
 
-    @defer.inlineCallbacks
-    def remove_tag_from_room(self, user_id, room_id, tag):
+    async def remove_tag_from_room(self, user_id: str, room_id: str, tag: str) -> int:
         """Remove a tag from a room for a user.
+
         Returns:
-            A deferred that completes once the tag has been removed
+            The next account data ID.
         """
 
         def remove_tag_txn(txn, next_id):
@@ -232,21 +234,22 @@ class TagsStore(TagsWorkerStore):
             self._update_revision_txn(txn, user_id, room_id, next_id)
 
         with self._account_data_id_gen.get_next() as next_id:
-            yield self.db_pool.runInteraction("remove_tag", remove_tag_txn, next_id)
+            await self.db_pool.runInteraction("remove_tag", remove_tag_txn, next_id)
 
         self.get_tags_for_user.invalidate((user_id,))
 
-        result = self._account_data_id_gen.get_current_token()
-        return result
+        return self._account_data_id_gen.get_current_token()
 
-    def _update_revision_txn(self, txn, user_id, room_id, next_id):
+    def _update_revision_txn(
+        self, txn, user_id: str, room_id: str, next_id: int
+    ) -> None:
         """Update the latest revision of the tags for the given user and room.
 
         Args:
             txn: The database cursor
-            user_id(str): The ID of the user.
-            room_id(str): The ID of the room.
-            next_id(int): The the revision to advance to.
+            user_id: The ID of the user.
+            room_id: The ID of the room.
+            next_id: The the revision to advance to.
         """
 
         txn.call_after(
diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py
index 8804c0e4ac..52668dbdf9 100644
--- a/synapse/storage/databases/main/transactions.py
+++ b/synapse/storage/databases/main/transactions.py
@@ -18,8 +18,6 @@ from collections import namedtuple
 
 from canonicaljson import encode_canonical_json
 
-from twisted.internet import defer
-
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore, db_to_json
 from synapse.storage.database import DatabasePool
@@ -126,8 +124,7 @@ class TransactionStore(SQLBaseStore):
             desc="set_received_txn_response",
         )
 
-    @defer.inlineCallbacks
-    def get_destination_retry_timings(self, destination):
+    async def get_destination_retry_timings(self, destination):
         """Gets the current retry timings (if any) for a given destination.
 
         Args:
@@ -142,7 +139,7 @@ class TransactionStore(SQLBaseStore):
         if result is not SENTINEL:
             return result
 
-        result = yield self.db_pool.runInteraction(
+        result = await self.db_pool.runInteraction(
             "get_destination_retry_timings",
             self._get_destination_retry_timings,
             destination,
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index d73a8e8ab9..af21fe457a 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -16,8 +16,6 @@
 import logging
 import re
 
-from twisted.internet import defer
-
 from synapse.api.constants import EventTypes, JoinRules
 from synapse.storage.database import DatabasePool
 from synapse.storage.databases.main.state import StateFilter
@@ -59,8 +57,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
             "populate_user_directory_cleanup", self._populate_user_directory_cleanup
         )
 
-    @defer.inlineCallbacks
-    def _populate_user_directory_createtables(self, progress, batch_size):
+    async def _populate_user_directory_createtables(self, progress, batch_size):
 
         # Get all the rooms that we want to process.
         def _make_staging_area(txn):
@@ -102,45 +99,43 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
 
                 self.db_pool.simple_insert_many_txn(txn, TEMP_TABLE + "_users", users)
 
-        new_pos = yield self.get_max_stream_id_in_current_state_deltas()
-        yield self.db_pool.runInteraction(
+        new_pos = await self.get_max_stream_id_in_current_state_deltas()
+        await self.db_pool.runInteraction(
             "populate_user_directory_temp_build", _make_staging_area
         )
-        yield self.db_pool.simple_insert(
+        await self.db_pool.simple_insert(
             TEMP_TABLE + "_position", {"position": new_pos}
         )
 
-        yield self.db_pool.updates._end_background_update(
+        await self.db_pool.updates._end_background_update(
             "populate_user_directory_createtables"
         )
         return 1
 
-    @defer.inlineCallbacks
-    def _populate_user_directory_cleanup(self, progress, batch_size):
+    async def _populate_user_directory_cleanup(self, progress, batch_size):
         """
         Update the user directory stream position, then clean up the old tables.
         """
-        position = yield self.db_pool.simple_select_one_onecol(
+        position = await self.db_pool.simple_select_one_onecol(
             TEMP_TABLE + "_position", None, "position"
         )
-        yield self.update_user_directory_stream_pos(position)
+        await self.update_user_directory_stream_pos(position)
 
         def _delete_staging_area(txn):
             txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_rooms")
             txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_users")
             txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_position")
 
-        yield self.db_pool.runInteraction(
+        await self.db_pool.runInteraction(
             "populate_user_directory_cleanup", _delete_staging_area
         )
 
-        yield self.db_pool.updates._end_background_update(
+        await self.db_pool.updates._end_background_update(
             "populate_user_directory_cleanup"
         )
         return 1
 
-    @defer.inlineCallbacks
-    def _populate_user_directory_process_rooms(self, progress, batch_size):
+    async def _populate_user_directory_process_rooms(self, progress, batch_size):
         """
         Args:
             progress (dict)
@@ -151,7 +146,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
 
         # If we don't have progress filed, delete everything.
         if not progress:
-            yield self.delete_all_from_user_dir()
+            await self.delete_all_from_user_dir()
 
         def _get_next_batch(txn):
             # Only fetch 250 rooms, so we don't fetch too many at once, even
@@ -176,13 +171,13 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
 
             return rooms_to_work_on
 
-        rooms_to_work_on = yield self.db_pool.runInteraction(
+        rooms_to_work_on = await self.db_pool.runInteraction(
             "populate_user_directory_temp_read", _get_next_batch
         )
 
         # No more rooms -- complete the transaction.
         if not rooms_to_work_on:
-            yield self.db_pool.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 "populate_user_directory_process_rooms"
             )
             return 1
@@ -195,21 +190,19 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
         processed_event_count = 0
 
         for room_id, event_count in rooms_to_work_on:
-            is_in_room = yield self.is_host_joined(room_id, self.server_name)
+            is_in_room = await self.is_host_joined(room_id, self.server_name)
 
             if is_in_room:
-                is_public = yield self.is_room_world_readable_or_publicly_joinable(
+                is_public = await self.is_room_world_readable_or_publicly_joinable(
                     room_id
                 )
 
-                users_with_profile = yield defer.ensureDeferred(
-                    state.get_current_users_in_room(room_id)
-                )
+                users_with_profile = await state.get_current_users_in_room(room_id)
                 user_ids = set(users_with_profile)
 
                 # Update each user in the user directory.
                 for user_id, profile in users_with_profile.items():
-                    yield self.update_profile_in_user_dir(
+                    await self.update_profile_in_user_dir(
                         user_id, profile.display_name, profile.avatar_url
                     )
 
@@ -223,7 +216,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
                         to_insert.add(user_id)
 
                     if to_insert:
-                        yield self.add_users_in_public_rooms(room_id, to_insert)
+                        await self.add_users_in_public_rooms(room_id, to_insert)
                         to_insert.clear()
                 else:
                     for user_id in user_ids:
@@ -243,22 +236,22 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
                             # If it gets too big, stop and write to the database
                             # to prevent storing too much in RAM.
                             if len(to_insert) >= self.SHARE_PRIVATE_WORKING_SET:
-                                yield self.add_users_who_share_private_room(
+                                await self.add_users_who_share_private_room(
                                     room_id, to_insert
                                 )
                                 to_insert.clear()
 
                     if to_insert:
-                        yield self.add_users_who_share_private_room(room_id, to_insert)
+                        await self.add_users_who_share_private_room(room_id, to_insert)
                         to_insert.clear()
 
             # We've finished a room. Delete it from the table.
-            yield self.db_pool.simple_delete_one(
+            await self.db_pool.simple_delete_one(
                 TEMP_TABLE + "_rooms", {"room_id": room_id}
             )
             # Update the remaining counter.
             progress["remaining"] -= 1
-            yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 "populate_user_directory",
                 self.db_pool.updates._background_update_progress_txn,
                 "populate_user_directory_process_rooms",
@@ -273,13 +266,12 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
 
         return processed_event_count
 
-    @defer.inlineCallbacks
-    def _populate_user_directory_process_users(self, progress, batch_size):
+    async def _populate_user_directory_process_users(self, progress, batch_size):
         """
         If search_all_users is enabled, add all of the users to the user directory.
         """
         if not self.hs.config.user_directory_search_all_users:
-            yield self.db_pool.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 "populate_user_directory_process_users"
             )
             return 1
@@ -305,13 +297,13 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
 
             return users_to_work_on
 
-        users_to_work_on = yield self.db_pool.runInteraction(
+        users_to_work_on = await self.db_pool.runInteraction(
             "populate_user_directory_temp_read", _get_next_batch
         )
 
         # No more users -- complete the transaction.
         if not users_to_work_on:
-            yield self.db_pool.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 "populate_user_directory_process_users"
             )
             return 1
@@ -322,18 +314,18 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
         )
 
         for user_id in users_to_work_on:
-            profile = yield self.get_profileinfo(get_localpart_from_id(user_id))
-            yield self.update_profile_in_user_dir(
+            profile = await self.get_profileinfo(get_localpart_from_id(user_id))
+            await self.update_profile_in_user_dir(
                 user_id, profile.display_name, profile.avatar_url
             )
 
             # We've finished processing a user. Delete it from the table.
-            yield self.db_pool.simple_delete_one(
+            await self.db_pool.simple_delete_one(
                 TEMP_TABLE + "_users", {"user_id": user_id}
             )
             # Update the remaining counter.
             progress["remaining"] -= 1
-            yield self.db_pool.runInteraction(
+            await self.db_pool.runInteraction(
                 "populate_user_directory",
                 self.db_pool.updates._background_update_progress_txn,
                 "populate_user_directory_process_users",
@@ -342,8 +334,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
 
         return len(users_to_work_on)
 
-    @defer.inlineCallbacks
-    def is_room_world_readable_or_publicly_joinable(self, room_id):
+    async def is_room_world_readable_or_publicly_joinable(self, room_id):
         """Check if the room is either world_readable or publically joinable
         """
 
@@ -353,20 +344,20 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
             (EventTypes.RoomHistoryVisibility, ""),
         )
 
-        current_state_ids = yield self.get_filtered_current_state_ids(
+        current_state_ids = await self.get_filtered_current_state_ids(
             room_id, StateFilter.from_types(types_to_filter)
         )
 
         join_rules_id = current_state_ids.get((EventTypes.JoinRules, ""))
         if join_rules_id:
-            join_rule_ev = yield self.get_event(join_rules_id, allow_none=True)
+            join_rule_ev = await self.get_event(join_rules_id, allow_none=True)
             if join_rule_ev:
                 if join_rule_ev.content.get("join_rule") == JoinRules.PUBLIC:
                     return True
 
         hist_vis_id = current_state_ids.get((EventTypes.RoomHistoryVisibility, ""))
         if hist_vis_id:
-            hist_vis_ev = yield self.get_event(hist_vis_id, allow_none=True)
+            hist_vis_ev = await self.get_event(hist_vis_id, allow_none=True)
             if hist_vis_ev:
                 if hist_vis_ev.content.get("history_visibility") == "world_readable":
                     return True
@@ -590,19 +581,18 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
             "remove_from_user_dir", _remove_from_user_dir_txn
         )
 
-    @defer.inlineCallbacks
-    def get_users_in_dir_due_to_room(self, room_id):
+    async def get_users_in_dir_due_to_room(self, room_id):
         """Get all user_ids that are in the room directory because they're
         in the given room_id
         """
-        user_ids_share_pub = yield self.db_pool.simple_select_onecol(
+        user_ids_share_pub = await self.db_pool.simple_select_onecol(
             table="users_in_public_rooms",
             keyvalues={"room_id": room_id},
             retcol="user_id",
             desc="get_users_in_dir_due_to_room",
         )
 
-        user_ids_share_priv = yield self.db_pool.simple_select_onecol(
+        user_ids_share_priv = await self.db_pool.simple_select_onecol(
             table="users_who_share_private_rooms",
             keyvalues={"room_id": room_id},
             retcol="other_user_id",
@@ -645,8 +635,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
             "remove_user_who_share_room", _remove_user_who_share_room_txn
         )
 
-    @defer.inlineCallbacks
-    def get_user_dir_rooms_user_is_in(self, user_id):
+    async def get_user_dir_rooms_user_is_in(self, user_id):
         """
         Returns the rooms that a user is in.
 
@@ -656,14 +645,14 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
         Returns:
             list: user_id
         """
-        rows = yield self.db_pool.simple_select_onecol(
+        rows = await self.db_pool.simple_select_onecol(
             table="users_who_share_private_rooms",
             keyvalues={"user_id": user_id},
             retcol="room_id",
             desc="get_rooms_user_is_in",
         )
 
-        pub_rows = yield self.db_pool.simple_select_onecol(
+        pub_rows = await self.db_pool.simple_select_onecol(
             table="users_in_public_rooms",
             keyvalues={"user_id": user_id},
             retcol="room_id",
@@ -674,32 +663,6 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
         users.update(rows)
         return list(users)
 
-    @defer.inlineCallbacks
-    def get_rooms_in_common_for_users(self, user_id, other_user_id):
-        """Given two user_ids find out the list of rooms they share.
-        """
-        sql = """
-            SELECT room_id FROM (
-                SELECT c.room_id FROM current_state_events AS c
-                INNER JOIN room_memberships AS m USING (event_id)
-                WHERE type = 'm.room.member'
-                    AND m.membership = 'join'
-                    AND state_key = ?
-            ) AS f1 INNER JOIN (
-                SELECT c.room_id FROM current_state_events AS c
-                INNER JOIN room_memberships AS m USING (event_id)
-                WHERE type = 'm.room.member'
-                    AND m.membership = 'join'
-                    AND state_key = ?
-            ) f2 USING (room_id)
-        """
-
-        rows = yield self.db_pool.execute(
-            "get_rooms_in_common_for_users", None, sql, user_id, other_user_id
-        )
-
-        return [room_id for room_id, in rows]
-
     def get_user_directory_stream_pos(self):
         return self.db_pool.simple_select_one_onecol(
             table="user_directory_stream_pos",
@@ -708,8 +671,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
             desc="get_user_directory_stream_pos",
         )
 
-    @defer.inlineCallbacks
-    def search_user_dir(self, user_id, search_term, limit):
+    async def search_user_dir(self, user_id, search_term, limit):
         """Searches for users in directory
 
         Returns:
@@ -806,7 +768,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
             # This should be unreachable.
             raise Exception("Unrecognized database engine")
 
-        results = yield self.db_pool.execute(
+        results = await self.db_pool.execute(
             "search_user_dir", self.db_pool.cursor_to_dict, sql, *args
         )
 
diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py
index 1e2d584098..139085b672 100644
--- a/synapse/storage/databases/state/bg_updates.py
+++ b/synapse/storage/databases/state/bg_updates.py
@@ -15,8 +15,6 @@
 
 import logging
 
-from twisted.internet import defer
-
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.database import DatabasePool
 from synapse.storage.engines import PostgresEngine
@@ -198,8 +196,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
             columns=["room_id"],
         )
 
-    @defer.inlineCallbacks
-    def _background_deduplicate_state(self, progress, batch_size):
+    async def _background_deduplicate_state(self, progress, batch_size):
         """This background update will slowly deduplicate state by reencoding
         them as deltas.
         """
@@ -212,7 +209,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
         batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR))
 
         if max_group is None:
-            rows = yield self.db_pool.execute(
+            rows = await self.db_pool.execute(
                 "_background_deduplicate_state",
                 None,
                 "SELECT coalesce(max(id), 0) FROM state_groups",
@@ -330,19 +327,18 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
 
             return False, batch_size
 
-        finished, result = yield self.db_pool.runInteraction(
+        finished, result = await self.db_pool.runInteraction(
             self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, reindex_txn
         )
 
         if finished:
-            yield self.db_pool.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME
             )
 
         return result * BATCH_SIZE_SCALE_FACTOR
 
-    @defer.inlineCallbacks
-    def _background_index_state(self, progress, batch_size):
+    async def _background_index_state(self, progress, batch_size):
         def reindex_txn(conn):
             conn.rollback()
             if isinstance(self.database_engine, PostgresEngine):
@@ -365,9 +361,9 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
                 )
                 txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
 
-        yield self.db_pool.runWithConnection(reindex_txn)
+        await self.db_pool.runWithConnection(reindex_txn)
 
-        yield self.db_pool.updates._end_background_update(
+        await self.db_pool.updates._end_background_update(
             self.STATE_GROUP_INDEX_UPDATE_NAME
         )
 
diff --git a/synapse/types.py b/synapse/types.py
index 238b938064..9e580f4295 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -13,11 +13,12 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import abc
 import re
 import string
 import sys
 from collections import namedtuple
-from typing import Any, Dict, Tuple, TypeVar
+from typing import Any, Dict, Tuple, Type, TypeVar
 
 import attr
 from signedjson.key import decode_verify_key_bytes
@@ -33,7 +34,7 @@ else:
 
     T_co = TypeVar("T_co", covariant=True)
 
-    class Collection(Iterable[T_co], Container[T_co], Sized):
+    class Collection(Iterable[T_co], Container[T_co], Sized):  # type: ignore
         __slots__ = ()
 
 
@@ -141,6 +142,9 @@ def get_localpart_from_id(string):
     return string[1:idx]
 
 
+DS = TypeVar("DS", bound="DomainSpecificString")
+
+
 class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "domain"))):
     """Common base class among ID/name strings that have a local part and a
     domain name, prefixed with a sigil.
@@ -151,6 +155,10 @@ class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "dom
         'domain' : The domain part of the name
     """
 
+    __metaclass__ = abc.ABCMeta
+
+    SIGIL = abc.abstractproperty()  # type: str  # type: ignore
+
     # Deny iteration because it will bite you if you try to create a singleton
     # set by:
     #    users = set(user)
@@ -166,7 +174,7 @@ class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "dom
         return self
 
     @classmethod
-    def from_string(cls, s: str):
+    def from_string(cls: Type[DS], s: str) -> DS:
         """Parse the string given by 's' into a structure object."""
         if len(s) < 1 or s[0:1] != cls.SIGIL:
             raise SynapseError(
@@ -190,12 +198,12 @@ class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "dom
         # names on one HS
         return cls(localpart=parts[0], domain=domain)
 
-    def to_string(self):
+    def to_string(self) -> str:
         """Return a string encoding the fields of the structure object."""
         return "%s%s:%s" % (self.SIGIL, self.localpart, self.domain)
 
     @classmethod
-    def is_valid(cls, s):
+    def is_valid(cls: Type[DS], s: str) -> bool:
         try:
             cls.from_string(s)
             return True
@@ -235,8 +243,9 @@ class GroupID(DomainSpecificString):
     SIGIL = "+"
 
     @classmethod
-    def from_string(cls, s):
-        group_id = super(GroupID, cls).from_string(s)
+    def from_string(cls: Type[DS], s: str) -> DS:
+        group_id = super().from_string(s)  # type: DS # type: ignore
+
         if not group_id.localpart:
             raise SynapseError(400, "Group ID cannot be empty", Codes.INVALID_PARAM)
 
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index c63256d3bd..b3f76428b6 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -17,6 +17,7 @@ import logging
 import re
 
 import attr
+from canonicaljson import json
 
 from twisted.internet import defer, task
 
@@ -24,6 +25,9 @@ from synapse.logging import context
 
 logger = logging.getLogger(__name__)
 
+# Create a custom encoder to reduce the whitespace produced by JSON encoding.
+json_encoder = json.JSONEncoder(separators=(",", ":"))
+
 
 def unwrapFirstError(failure):
     # defer.gatherResults and DeferredLists wrap failures.
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 9b09c08b89..c2d72a82cf 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -192,7 +192,7 @@ class Cache(object):
         callbacks = [callback] if callback else []
         self.check_thread()
         observable = ObservableDeferred(value, consumeErrors=True)
-        observer = defer.maybeDeferred(observable.observe)
+        observer = observable.observe()
         entry = CacheEntry(deferred=observable, callbacks=callbacks)
 
         existing_entry = self._pending_deferred_cache.pop(key, None)
diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py
index eab78dd256..0e445e01d7 100644
--- a/synapse/util/frozenutils.py
+++ b/synapse/util/frozenutils.py
@@ -63,5 +63,8 @@ def _handle_frozendict(obj):
     )
 
 
-# A JSONEncoder which is capable of encoding frozendicts without barfing
-frozendict_json_encoder = json.JSONEncoder(default=_handle_frozendict)
+# A JSONEncoder which is capable of encoding frozendicts without barfing.
+# Additionally reduce the whitespace produced by JSON encoding.
+frozendict_json_encoder = json.JSONEncoder(
+    default=_handle_frozendict, separators=(",", ":"),
+)
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index ec61e14423..13775b43f9 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -13,14 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import inspect
 import logging
 from functools import wraps
+from typing import Any, Callable, Optional, TypeVar, cast
 
 from prometheus_client import Counter
 
-from twisted.internet import defer
-
 from synapse.logging.context import LoggingContext, current_context
 from synapse.metrics import InFlightGauge
 
@@ -60,29 +58,37 @@ in_flight = InFlightGauge(
     sub_metrics=["real_time_max", "real_time_sum"],
 )
 
+T = TypeVar("T", bound=Callable[..., Any])
 
-def measure_func(name=None):
-    def wrapper(func):
-        block_name = func.__name__ if name is None else name
 
-        if inspect.iscoroutinefunction(func):
+def measure_func(name: Optional[str] = None) -> Callable[[T], T]:
+    """
+    Used to decorate an async function with a `Measure` context manager.
+
+    Usage:
+
+    @measure_func()
+    async def foo(...):
+        ...
 
-            @wraps(func)
-            async def measured_func(self, *args, **kwargs):
-                with Measure(self.clock, block_name):
-                    r = await func(self, *args, **kwargs)
-                return r
+    Which is analogous to:
 
-        else:
+    async def foo(...):
+        with Measure(...):
+            ...
+
+    """
+
+    def wrapper(func: T) -> T:
+        block_name = func.__name__ if name is None else name
 
-            @wraps(func)
-            @defer.inlineCallbacks
-            def measured_func(self, *args, **kwargs):
-                with Measure(self.clock, block_name):
-                    r = yield func(self, *args, **kwargs)
-                return r
+        @wraps(func)
+        async def measured_func(self, *args, **kwargs):
+            with Measure(self.clock, block_name):
+                r = await func(self, *args, **kwargs)
+            return r
 
-        return measured_func
+        return cast(T, measured_func)
 
     return wrapper
 
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index 8794317caa..919988d3bc 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -15,8 +15,6 @@
 import logging
 import random
 
-from twisted.internet import defer
-
 import synapse.logging.context
 from synapse.api.errors import CodeMessageException
 
@@ -54,8 +52,7 @@ class NotRetryingDestination(Exception):
         self.destination = destination
 
 
-@defer.inlineCallbacks
-def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs):
+async def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs):
     """For a given destination check if we have previously failed to
     send a request there and are waiting before retrying the destination.
     If we are not ready to retry the destination, this will raise a
@@ -73,9 +70,9 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs)
     Example usage:
 
         try:
-            limiter = yield get_retry_limiter(destination, clock, store)
+            limiter = await get_retry_limiter(destination, clock, store)
             with limiter:
-                response = yield do_request()
+                response = await do_request()
         except NotRetryingDestination:
             # We aren't ready to retry that destination.
             raise
@@ -83,7 +80,7 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs)
     failure_ts = None
     retry_last_ts, retry_interval = (0, 0)
 
-    retry_timings = yield store.get_destination_retry_timings(destination)
+    retry_timings = await store.get_destination_retry_timings(destination)
 
     if retry_timings:
         failure_ts = retry_timings["failure_ts"]
@@ -222,10 +219,9 @@ class RetryDestinationLimiter(object):
             if self.failure_ts is None:
                 self.failure_ts = retry_last_ts
 
-        @defer.inlineCallbacks
-        def store_retry_timings():
+        async def store_retry_timings():
             try:
-                yield self.store.set_destination_retry_timings(
+                await self.store.set_destination_retry_timings(
                     self.destination,
                     self.failure_ts,
                     retry_last_ts,
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index 0bfb86bf1f..5d45689c8c 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -62,12 +62,15 @@ class AuthTestCase(unittest.TestCase):
         # this is overridden for the appservice tests
         self.store.get_app_service_by_token = Mock(return_value=None)
 
+        self.store.insert_client_ip = Mock(return_value=defer.succeed(None))
         self.store.is_support_user = Mock(return_value=defer.succeed(False))
 
     @defer.inlineCallbacks
     def test_get_user_by_req_user_valid_token(self):
         user_info = {"name": self.test_user, "token_id": "ditto", "device_id": "device"}
-        self.store.get_user_by_access_token = Mock(return_value=user_info)
+        self.store.get_user_by_access_token = Mock(
+            return_value=defer.succeed(user_info)
+        )
 
         request = Mock(args={})
         request.args[b"access_token"] = [self.test_token]
@@ -76,23 +79,25 @@ class AuthTestCase(unittest.TestCase):
         self.assertEquals(requester.user.to_string(), self.test_user)
 
     def test_get_user_by_req_user_bad_token(self):
-        self.store.get_user_by_access_token = Mock(return_value=None)
+        self.store.get_user_by_access_token = Mock(return_value=defer.succeed(None))
 
         request = Mock(args={})
         request.args[b"access_token"] = [self.test_token]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
-        d = self.auth.get_user_by_req(request)
+        d = defer.ensureDeferred(self.auth.get_user_by_req(request))
         f = self.failureResultOf(d, InvalidClientTokenError).value
         self.assertEqual(f.code, 401)
         self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN")
 
     def test_get_user_by_req_user_missing_token(self):
         user_info = {"name": self.test_user, "token_id": "ditto"}
-        self.store.get_user_by_access_token = Mock(return_value=user_info)
+        self.store.get_user_by_access_token = Mock(
+            return_value=defer.succeed(user_info)
+        )
 
         request = Mock(args={})
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
-        d = self.auth.get_user_by_req(request)
+        d = defer.ensureDeferred(self.auth.get_user_by_req(request))
         f = self.failureResultOf(d, MissingClientTokenError).value
         self.assertEqual(f.code, 401)
         self.assertEqual(f.errcode, "M_MISSING_TOKEN")
@@ -103,7 +108,7 @@ class AuthTestCase(unittest.TestCase):
             token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None
         )
         self.store.get_app_service_by_token = Mock(return_value=app_service)
-        self.store.get_user_by_access_token = Mock(return_value=None)
+        self.store.get_user_by_access_token = Mock(return_value=defer.succeed(None))
 
         request = Mock(args={})
         request.getClientIP.return_value = "127.0.0.1"
@@ -123,7 +128,7 @@ class AuthTestCase(unittest.TestCase):
             ip_range_whitelist=IPSet(["192.168/16"]),
         )
         self.store.get_app_service_by_token = Mock(return_value=app_service)
-        self.store.get_user_by_access_token = Mock(return_value=None)
+        self.store.get_user_by_access_token = Mock(return_value=defer.succeed(None))
 
         request = Mock(args={})
         request.getClientIP.return_value = "192.168.10.10"
@@ -142,25 +147,25 @@ class AuthTestCase(unittest.TestCase):
             ip_range_whitelist=IPSet(["192.168/16"]),
         )
         self.store.get_app_service_by_token = Mock(return_value=app_service)
-        self.store.get_user_by_access_token = Mock(return_value=None)
+        self.store.get_user_by_access_token = Mock(return_value=defer.succeed(None))
 
         request = Mock(args={})
         request.getClientIP.return_value = "131.111.8.42"
         request.args[b"access_token"] = [self.test_token]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
-        d = self.auth.get_user_by_req(request)
+        d = defer.ensureDeferred(self.auth.get_user_by_req(request))
         f = self.failureResultOf(d, InvalidClientTokenError).value
         self.assertEqual(f.code, 401)
         self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN")
 
     def test_get_user_by_req_appservice_bad_token(self):
         self.store.get_app_service_by_token = Mock(return_value=None)
-        self.store.get_user_by_access_token = Mock(return_value=None)
+        self.store.get_user_by_access_token = Mock(return_value=defer.succeed(None))
 
         request = Mock(args={})
         request.args[b"access_token"] = [self.test_token]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
-        d = self.auth.get_user_by_req(request)
+        d = defer.ensureDeferred(self.auth.get_user_by_req(request))
         f = self.failureResultOf(d, InvalidClientTokenError).value
         self.assertEqual(f.code, 401)
         self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN")
@@ -168,11 +173,11 @@ class AuthTestCase(unittest.TestCase):
     def test_get_user_by_req_appservice_missing_token(self):
         app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
         self.store.get_app_service_by_token = Mock(return_value=app_service)
-        self.store.get_user_by_access_token = Mock(return_value=None)
+        self.store.get_user_by_access_token = Mock(return_value=defer.succeed(None))
 
         request = Mock(args={})
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
-        d = self.auth.get_user_by_req(request)
+        d = defer.ensureDeferred(self.auth.get_user_by_req(request))
         f = self.failureResultOf(d, MissingClientTokenError).value
         self.assertEqual(f.code, 401)
         self.assertEqual(f.errcode, "M_MISSING_TOKEN")
@@ -185,7 +190,11 @@ class AuthTestCase(unittest.TestCase):
         )
         app_service.is_interested_in_user = Mock(return_value=True)
         self.store.get_app_service_by_token = Mock(return_value=app_service)
-        self.store.get_user_by_access_token = Mock(return_value=None)
+        # This just needs to return a truth-y value.
+        self.store.get_user_by_id = Mock(
+            return_value=defer.succeed({"is_guest": False})
+        )
+        self.store.get_user_by_access_token = Mock(return_value=defer.succeed(None))
 
         request = Mock(args={})
         request.getClientIP.return_value = "127.0.0.1"
@@ -204,20 +213,22 @@ class AuthTestCase(unittest.TestCase):
         )
         app_service.is_interested_in_user = Mock(return_value=False)
         self.store.get_app_service_by_token = Mock(return_value=app_service)
-        self.store.get_user_by_access_token = Mock(return_value=None)
+        self.store.get_user_by_access_token = Mock(return_value=defer.succeed(None))
 
         request = Mock(args={})
         request.getClientIP.return_value = "127.0.0.1"
         request.args[b"access_token"] = [self.test_token]
         request.args[b"user_id"] = [masquerading_user_id]
         request.requestHeaders.getRawHeaders = mock_getRawHeaders()
-        d = self.auth.get_user_by_req(request)
+        d = defer.ensureDeferred(self.auth.get_user_by_req(request))
         self.failureResultOf(d, AuthError)
 
     @defer.inlineCallbacks
     def test_get_user_from_macaroon(self):
         self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org", "device_id": "device"}
+            return_value=defer.succeed(
+                {"name": "@baldrick:matrix.org", "device_id": "device"}
+            )
         )
 
         user_id = "@baldrick:matrix.org"
@@ -241,8 +252,8 @@ class AuthTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_guest_user_from_macaroon(self):
-        self.store.get_user_by_id = Mock(return_value={"is_guest": True})
-        self.store.get_user_by_access_token = Mock(return_value=None)
+        self.store.get_user_by_id = Mock(return_value=defer.succeed({"is_guest": True}))
+        self.store.get_user_by_access_token = Mock(return_value=defer.succeed(None))
 
         user_id = "@baldrick:matrix.org"
         macaroon = pymacaroons.Macaroon(
@@ -282,16 +293,20 @@ class AuthTestCase(unittest.TestCase):
 
         def get_user(tok):
             if token != tok:
-                return None
-            return {
-                "name": USER_ID,
-                "is_guest": False,
-                "token_id": 1234,
-                "device_id": "DEVICE",
-            }
+                return defer.succeed(None)
+            return defer.succeed(
+                {
+                    "name": USER_ID,
+                    "is_guest": False,
+                    "token_id": 1234,
+                    "device_id": "DEVICE",
+                }
+            )
 
         self.store.get_user_by_access_token = get_user
-        self.store.get_user_by_id = Mock(return_value={"is_guest": False})
+        self.store.get_user_by_id = Mock(
+            return_value=defer.succeed({"is_guest": False})
+        )
 
         # check the token works
         request = Mock(args={})
diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py
index 4e67503cf0..1fab1d6b69 100644
--- a/tests/api/test_filtering.py
+++ b/tests/api/test_filtering.py
@@ -375,8 +375,10 @@ class FilteringTestCase(unittest.TestCase):
         event = MockEvent(sender="@foo:bar", type="m.profile")
         events = [event]
 
-        user_filter = yield self.filtering.get_user_filter(
-            user_localpart=user_localpart, filter_id=filter_id
+        user_filter = yield defer.ensureDeferred(
+            self.filtering.get_user_filter(
+                user_localpart=user_localpart, filter_id=filter_id
+            )
         )
 
         results = user_filter.filter_presence(events=events)
@@ -396,8 +398,10 @@ class FilteringTestCase(unittest.TestCase):
         )
         events = [event]
 
-        user_filter = yield self.filtering.get_user_filter(
-            user_localpart=user_localpart + "2", filter_id=filter_id
+        user_filter = yield defer.ensureDeferred(
+            self.filtering.get_user_filter(
+                user_localpart=user_localpart + "2", filter_id=filter_id
+            )
         )
 
         results = user_filter.filter_presence(events=events)
@@ -412,8 +416,10 @@ class FilteringTestCase(unittest.TestCase):
         event = MockEvent(sender="@foo:bar", type="m.room.topic", room_id="!foo:bar")
         events = [event]
 
-        user_filter = yield self.filtering.get_user_filter(
-            user_localpart=user_localpart, filter_id=filter_id
+        user_filter = yield defer.ensureDeferred(
+            self.filtering.get_user_filter(
+                user_localpart=user_localpart, filter_id=filter_id
+            )
         )
 
         results = user_filter.filter_room_state(events=events)
@@ -430,8 +436,10 @@ class FilteringTestCase(unittest.TestCase):
         )
         events = [event]
 
-        user_filter = yield self.filtering.get_user_filter(
-            user_localpart=user_localpart, filter_id=filter_id
+        user_filter = yield defer.ensureDeferred(
+            self.filtering.get_user_filter(
+                user_localpart=user_localpart, filter_id=filter_id
+            )
         )
 
         results = user_filter.filter_room_state(events)
@@ -465,8 +473,10 @@ class FilteringTestCase(unittest.TestCase):
         self.assertEquals(
             user_filter_json,
             (
-                yield self.datastore.get_user_filter(
-                    user_localpart=user_localpart, filter_id=0
+                yield defer.ensureDeferred(
+                    self.datastore.get_user_filter(
+                        user_localpart=user_localpart, filter_id=0
+                    )
                 )
             ),
         )
@@ -479,8 +489,10 @@ class FilteringTestCase(unittest.TestCase):
             user_localpart=user_localpart, user_filter=user_filter_json
         )
 
-        filter = yield self.filtering.get_user_filter(
-            user_localpart=user_localpart, filter_id=filter_id
+        filter = yield defer.ensureDeferred(
+            self.filtering.get_user_filter(
+                user_localpart=user_localpart, filter_id=filter_id
+            )
         )
 
         self.assertEquals(filter.get_filter_json(), user_filter_json)
diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py
index d580e729c5..1e1f30d790 100644
--- a/tests/api/test_ratelimiting.py
+++ b/tests/api/test_ratelimiting.py
@@ -1,4 +1,6 @@
 from synapse.api.ratelimiting import LimitExceededError, Ratelimiter
+from synapse.appservice import ApplicationService
+from synapse.types import create_requester
 
 from tests import unittest
 
@@ -18,6 +20,77 @@ class TestRatelimiter(unittest.TestCase):
         self.assertTrue(allowed)
         self.assertEquals(20.0, time_allowed)
 
+    def test_allowed_user_via_can_requester_do_action(self):
+        user_requester = create_requester("@user:example.com")
+        limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
+        allowed, time_allowed = limiter.can_requester_do_action(
+            user_requester, _time_now_s=0
+        )
+        self.assertTrue(allowed)
+        self.assertEquals(10.0, time_allowed)
+
+        allowed, time_allowed = limiter.can_requester_do_action(
+            user_requester, _time_now_s=5
+        )
+        self.assertFalse(allowed)
+        self.assertEquals(10.0, time_allowed)
+
+        allowed, time_allowed = limiter.can_requester_do_action(
+            user_requester, _time_now_s=10
+        )
+        self.assertTrue(allowed)
+        self.assertEquals(20.0, time_allowed)
+
+    def test_allowed_appservice_ratelimited_via_can_requester_do_action(self):
+        appservice = ApplicationService(
+            None, "example.com", id="foo", rate_limited=True,
+        )
+        as_requester = create_requester("@user:example.com", app_service=appservice)
+
+        limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
+        allowed, time_allowed = limiter.can_requester_do_action(
+            as_requester, _time_now_s=0
+        )
+        self.assertTrue(allowed)
+        self.assertEquals(10.0, time_allowed)
+
+        allowed, time_allowed = limiter.can_requester_do_action(
+            as_requester, _time_now_s=5
+        )
+        self.assertFalse(allowed)
+        self.assertEquals(10.0, time_allowed)
+
+        allowed, time_allowed = limiter.can_requester_do_action(
+            as_requester, _time_now_s=10
+        )
+        self.assertTrue(allowed)
+        self.assertEquals(20.0, time_allowed)
+
+    def test_allowed_appservice_via_can_requester_do_action(self):
+        appservice = ApplicationService(
+            None, "example.com", id="foo", rate_limited=False,
+        )
+        as_requester = create_requester("@user:example.com", app_service=appservice)
+
+        limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
+        allowed, time_allowed = limiter.can_requester_do_action(
+            as_requester, _time_now_s=0
+        )
+        self.assertTrue(allowed)
+        self.assertEquals(-1, time_allowed)
+
+        allowed, time_allowed = limiter.can_requester_do_action(
+            as_requester, _time_now_s=5
+        )
+        self.assertTrue(allowed)
+        self.assertEquals(-1, time_allowed)
+
+        allowed, time_allowed = limiter.can_requester_do_action(
+            as_requester, _time_now_s=10
+        )
+        self.assertTrue(allowed)
+        self.assertEquals(-1, time_allowed)
+
     def test_allowed_via_ratelimit(self):
         limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
 
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index 628f7d8db0..2a0b7c1b56 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -120,7 +120,7 @@ class AppServiceHandlerTestCase(unittest.TestCase):
 
         self.mock_as_api.query_alias.return_value = make_awaitable(True)
         self.mock_store.get_app_services.return_value = services
-        self.mock_store.get_association_from_room_alias.return_value = defer.succeed(
+        self.mock_store.get_association_from_room_alias.return_value = make_awaitable(
             Mock(room_id=room_id, servers=servers)
         )
 
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 6d45c4b233..e364b1bd62 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -22,6 +22,7 @@ from synapse.api.errors import Codes, ResourceLimitError, SynapseError
 from synapse.handlers.register import RegistrationHandler
 from synapse.types import RoomAlias, UserID, create_requester
 
+from tests.test_utils import make_awaitable
 from tests.unittest import override_config
 
 from .. import unittest
@@ -187,7 +188,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
         room_alias_str = "#room:test"
         self.hs.config.auto_join_rooms = [room_alias_str]
 
-        self.store.is_real_user = Mock(return_value=defer.succeed(False))
+        self.store.is_real_user = Mock(return_value=make_awaitable(False))
         user_id = self.get_success(self.handler.register_user(localpart="support"))
         rooms = self.get_success(self.store.get_rooms_for_user(user_id))
         self.assertEqual(len(rooms), 0)
@@ -199,8 +200,8 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
     def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self):
         room_alias_str = "#room:test"
 
-        self.store.count_real_users = Mock(return_value=defer.succeed(1))
-        self.store.is_real_user = Mock(return_value=defer.succeed(True))
+        self.store.count_real_users = Mock(return_value=make_awaitable(1))
+        self.store.is_real_user = Mock(return_value=make_awaitable(True))
         user_id = self.get_success(self.handler.register_user(localpart="real"))
         rooms = self.get_success(self.store.get_rooms_for_user(user_id))
         directory_handler = self.hs.get_handlers().directory_handler
@@ -214,8 +215,8 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
         room_alias_str = "#room:test"
         self.hs.config.auto_join_rooms = [room_alias_str]
 
-        self.store.count_real_users = Mock(return_value=defer.succeed(2))
-        self.store.is_real_user = Mock(return_value=defer.succeed(True))
+        self.store.count_real_users = Mock(return_value=make_awaitable(2))
+        self.store.is_real_user = Mock(return_value=make_awaitable(True))
         user_id = self.get_success(self.handler.register_user(localpart="real"))
         rooms = self.get_success(self.store.get_rooms_for_user(user_id))
         self.assertEqual(len(rooms), 0)
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 5878f74175..64afd581bc 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -24,6 +24,7 @@ from synapse.api.errors import AuthError
 from synapse.types import UserID
 
 from tests import unittest
+from tests.test_utils import make_awaitable
 from tests.unittest import override_config
 from tests.utils import register_federation_servlets
 
@@ -115,7 +116,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
             retry_timings_res
         )
 
-        self.datastore.get_device_updates_by_remote.return_value = defer.succeed(
+        self.datastore.get_device_updates_by_remote.side_effect = lambda destination, from_stream_id, limit: make_awaitable(
             (0, [])
         )
 
@@ -126,10 +127,10 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
 
         self.room_members = []
 
-        def check_user_in_room(room_id, user_id):
+        async def check_user_in_room(room_id, user_id):
             if user_id not in [u.to_string() for u in self.room_members]:
                 raise AuthError(401, "User is not in the room")
-            return defer.succeed(None)
+            return None
 
         hs.get_auth().check_user_in_room = check_user_in_room
 
@@ -151,7 +152,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
         self.datastore.get_current_state_deltas.return_value = (0, None)
 
         self.datastore.get_to_device_stream_token = lambda: 0
-        self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: defer.succeed(
+        self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: make_awaitable(
             ([], 0)
         )
         self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index f16eef15f7..17d0aae2e9 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -20,6 +20,8 @@ import urllib.parse
 
 from mock import Mock
 
+from twisted.internet import defer
+
 import synapse.rest.admin
 from synapse.api.constants import UserTypes
 from synapse.api.errors import HttpResponseException, ResourceLimitError
@@ -335,7 +337,9 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
         store = self.hs.get_datastore()
 
         # Set monthly active users to the limit
-        store.get_monthly_active_count = Mock(return_value=self.hs.config.max_mau_value)
+        store.get_monthly_active_count = Mock(
+            return_value=defer.succeed(self.hs.config.max_mau_value)
+        )
         # Check that the blocking of monthly active users is working as expected
         # The registration of a new user fails due to the limit
         self.get_failure(
@@ -588,7 +592,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
 
         # Set monthly active users to the limit
         self.store.get_monthly_active_count = Mock(
-            return_value=self.hs.config.max_mau_value
+            return_value=defer.succeed(self.hs.config.max_mau_value)
         )
         # Check that the blocking of monthly active users is working as expected
         # The registration of a new user fails due to the limit
@@ -628,7 +632,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
 
         # Set monthly active users to the limit
         self.store.get_monthly_active_count = Mock(
-            return_value=self.hs.config.max_mau_value
+            return_value=defer.succeed(self.hs.config.max_mau_value)
         )
         # Check that the blocking of monthly active users is working as expected
         # The registration of a new user fails due to the limit
diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
index e54ffea150..0b191d13c6 100644
--- a/tests/rest/client/test_retention.py
+++ b/tests/rest/client/test_retention.py
@@ -144,7 +144,9 @@ class RetentionTestCase(unittest.HomeserverTestCase):
         # Get the create event to, later, check that we can still access it.
         message_handler = self.hs.get_message_handler()
         create_event = self.get_success(
-            message_handler.get_room_data(self.user_id, room_id, EventTypes.Create)
+            message_handler.get_room_data(
+                self.user_id, room_id, EventTypes.Create, state_key="", is_guest=False
+            )
         )
 
         # Send a first event to the room. This is the event we'll want to be purged at the
diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py
index 8df58b4a63..ace0a3c08d 100644
--- a/tests/rest/client/v1/test_profile.py
+++ b/tests/rest/client/v1/test_profile.py
@@ -70,8 +70,8 @@ class MockHandlerProfileTestCase(unittest.TestCase):
             profile_handler=self.mock_handler,
         )
 
-        def _get_user_by_req(request=None, allow_guest=False):
-            return defer.succeed(synapse.types.create_requester(myid))
+        async def _get_user_by_req(request=None, allow_guest=False):
+            return synapse.types.create_requester(myid)
 
         hs.get_auth().get_user_by_req = _get_user_by_req
 
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
index 5ccda8b2bd..e74bddc1e5 100644
--- a/tests/rest/client/v1/test_rooms.py
+++ b/tests/rest/client/v1/test_rooms.py
@@ -23,14 +23,12 @@ from urllib import parse as urlparse
 
 from mock import Mock
 
-from twisted.internet import defer
-
 import synapse.rest.admin
 from synapse.api.constants import EventContentFields, EventTypes, Membership
 from synapse.handlers.pagination import PurgeStatus
 from synapse.rest.client.v1 import directory, login, profile, room
 from synapse.rest.client.v2_alpha import account
-from synapse.types import JsonDict, RoomAlias
+from synapse.types import JsonDict, RoomAlias, UserID
 from synapse.util.stringutils import random_string
 
 from tests import unittest
@@ -51,8 +49,8 @@ class RoomBase(unittest.HomeserverTestCase):
 
         self.hs.get_federation_handler = Mock(return_value=Mock())
 
-        def _insert_client_ip(*args, **kwargs):
-            return defer.succeed(None)
+        async def _insert_client_ip(*args, **kwargs):
+            return None
 
         self.hs.get_datastore().insert_client_ip = _insert_client_ip
 
@@ -677,6 +675,91 @@ class RoomMemberStateTestCase(RoomBase):
         self.assertEquals(json.loads(content), channel.json_body)
 
 
+class RoomJoinRatelimitTestCase(RoomBase):
+    user_id = "@sid1:red"
+
+    servlets = [
+        profile.register_servlets,
+        room.register_servlets,
+    ]
+
+    @unittest.override_config(
+        {"rc_joins": {"local": {"per_second": 3, "burst_count": 3}}}
+    )
+    def test_join_local_ratelimit(self):
+        """Tests that local joins are actually rate-limited."""
+        for i in range(5):
+            self.helper.create_room_as(self.user_id)
+
+        self.helper.create_room_as(self.user_id, expect_code=429)
+
+    @unittest.override_config(
+        {"rc_joins": {"local": {"per_second": 3, "burst_count": 3}}}
+    )
+    def test_join_local_ratelimit_profile_change(self):
+        """Tests that sending a profile update into all of the user's joined rooms isn't
+        rate-limited by the rate-limiter on joins."""
+
+        # Create and join more rooms than the rate-limiting config allows in a second.
+        room_ids = [
+            self.helper.create_room_as(self.user_id),
+            self.helper.create_room_as(self.user_id),
+            self.helper.create_room_as(self.user_id),
+        ]
+        self.reactor.advance(1)
+        room_ids = room_ids + [
+            self.helper.create_room_as(self.user_id),
+            self.helper.create_room_as(self.user_id),
+            self.helper.create_room_as(self.user_id),
+        ]
+
+        # Create a profile for the user, since it hasn't been done on registration.
+        store = self.hs.get_datastore()
+        store.create_profile(UserID.from_string(self.user_id).localpart)
+
+        # Update the display name for the user.
+        path = "/_matrix/client/r0/profile/%s/displayname" % self.user_id
+        request, channel = self.make_request("PUT", path, {"displayname": "John Doe"})
+        self.render(request)
+        self.assertEquals(channel.code, 200, channel.json_body)
+
+        # Check that all the rooms have been sent a profile update into.
+        for room_id in room_ids:
+            path = "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" % (
+                room_id,
+                self.user_id,
+            )
+
+            request, channel = self.make_request("GET", path)
+            self.render(request)
+            self.assertEquals(channel.code, 200)
+
+            self.assertIn("displayname", channel.json_body)
+            self.assertEquals(channel.json_body["displayname"], "John Doe")
+
+    @unittest.override_config(
+        {"rc_joins": {"local": {"per_second": 3, "burst_count": 3}}}
+    )
+    def test_join_local_ratelimit_idempotent(self):
+        """Tests that the room join endpoints remain idempotent despite rate-limiting
+        on room joins."""
+        room_id = self.helper.create_room_as(self.user_id)
+
+        # Let's test both paths to be sure.
+        paths_to_test = [
+            "/_matrix/client/r0/rooms/%s/join",
+            "/_matrix/client/r0/join/%s",
+        ]
+
+        for path in paths_to_test:
+            # Make sure we send more requests than the rate-limiting config would allow
+            # if all of these requests ended up joining the user to a room.
+            for i in range(6):
+                request, channel = self.make_request("POST", path % room_id, {})
+                self.render(request)
+                self.assertEquals(channel.code, 200)
+
+
 class RoomMessagesTestCase(RoomBase):
     """ Tests /rooms/$room_id/messages/$user_id/$msg_id REST events. """
 
diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py
index 18260bb90e..94d2bf2eb1 100644
--- a/tests/rest/client/v1/test_typing.py
+++ b/tests/rest/client/v1/test_typing.py
@@ -46,7 +46,7 @@ class RoomTypingTestCase(unittest.HomeserverTestCase):
 
         hs.get_handlers().federation_handler = Mock()
 
-        def get_user_by_access_token(token=None, allow_guest=False):
+        async def get_user_by_access_token(token=None, allow_guest=False):
             return {
                 "user": UserID.from_string(self.auth_user_id),
                 "token_id": 1,
@@ -55,8 +55,8 @@ class RoomTypingTestCase(unittest.HomeserverTestCase):
 
         hs.get_auth().get_user_by_access_token = get_user_by_access_token
 
-        def _insert_client_ip(*args, **kwargs):
-            return defer.succeed(None)
+        async def _insert_client_ip(*args, **kwargs):
+            return None
 
         hs.get_datastore().insert_client_ip = _insert_client_ip
 
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index 51941f99f9..e66c9a4c4c 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -39,7 +39,9 @@ class RestHelper(object):
     resource = attr.ib()
     auth_user_id = attr.ib()
 
-    def create_room_as(self, room_creator=None, is_public=True, tok=None):
+    def create_room_as(
+        self, room_creator=None, is_public=True, tok=None, expect_code=200,
+    ):
         temp_id = self.auth_user_id
         self.auth_user_id = room_creator
         path = "/_matrix/client/r0/createRoom"
@@ -54,9 +56,11 @@ class RestHelper(object):
         )
         render(request, self.resource, self.hs.get_reactor())
 
-        assert channel.result["code"] == b"200", channel.result
+        assert channel.result["code"] == b"%d" % expect_code, channel.result
         self.auth_user_id = temp_id
-        return channel.json_body["room_id"]
+
+        if expect_code == 200:
+            return channel.json_body["room_id"]
 
     def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
         self.change_membership(
@@ -165,26 +169,6 @@ class RestHelper(object):
 
         return channel.json_body
 
-    def redact(self, room_id, event_id, txn_id=None, tok=None, expect_code=200):
-        if txn_id is None:
-            txn_id = "m%s" % (str(time.time()))
-
-        path = "/_matrix/client/r0/rooms/%s/redact/%s/%s" % (room_id, event_id, txn_id)
-        if tok:
-            path = path + "?access_token=%s" % tok
-
-        request, channel = make_request(
-            self.hs.get_reactor(), "PUT", path, json.dumps({}).encode("utf8")
-        )
-        render(request, self.resource, self.hs.get_reactor())
-
-        assert int(channel.result["code"]) == expect_code, (
-            "Expected: %d, got: %d, resp: %r"
-            % (expect_code, int(channel.result["code"]), channel.result["body"])
-        )
-
-        return channel.json_body
-
     def _read_write_state(
         self,
         room_id: str,
diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index 7deaf5b24a..53a43038f0 100644
--- a/tests/rest/client/v2_alpha/test_register.py
+++ b/tests/rest/client/v2_alpha/test_register.py
@@ -116,8 +116,8 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
         self.assertEquals(channel.result["code"], b"200", channel.result)
         self.assertDictContainsSubset(det_data, channel.json_body)
 
+    @override_config({"enable_registration": False})
     def test_POST_disabled_registration(self):
-        self.hs.config.enable_registration = False
         request_data = json.dumps({"username": "kermit", "password": "monkey"})
         self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None)
 
diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py
index a31e44c97e..fa3a3ec1bd 100644
--- a/tests/rest/client/v2_alpha/test_sync.py
+++ b/tests/rest/client/v2_alpha/test_sync.py
@@ -16,9 +16,9 @@
 import json
 
 import synapse.rest.admin
-from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
+from synapse.api.constants import EventContentFields, EventTypes
 from synapse.rest.client.v1 import login, room
-from synapse.rest.client.v2_alpha import read_marker, sync
+from synapse.rest.client.v2_alpha import sync
 
 from tests import unittest
 from tests.server import TimedOutException
@@ -324,156 +324,3 @@ class SyncTypingTests(unittest.HomeserverTestCase):
             "GET", sync_url % (access_token, next_batch)
         )
         self.assertRaises(TimedOutException, self.render, request)
-
-
-class UnreadMessagesTestCase(unittest.HomeserverTestCase):
-    servlets = [
-        synapse.rest.admin.register_servlets,
-        login.register_servlets,
-        read_marker.register_servlets,
-        room.register_servlets,
-        sync.register_servlets,
-    ]
-
-    def prepare(self, reactor, clock, hs):
-        self.url = "/sync?since=%s"
-        self.next_batch = "s0"
-
-        # Register the first user (used to check the unread counts).
-        self.user_id = self.register_user("kermit", "monkey")
-        self.tok = self.login("kermit", "monkey")
-
-        # Create the room we'll check unread counts for.
-        self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok)
-
-        # Register the second user (used to send events to the room).
-        self.user2 = self.register_user("kermit2", "monkey")
-        self.tok2 = self.login("kermit2", "monkey")
-
-        # Change the power levels of the room so that the second user can send state
-        # events.
-        self.helper.send_state(
-            self.room_id,
-            EventTypes.PowerLevels,
-            {
-                "users": {self.user_id: 100, self.user2: 100},
-                "users_default": 0,
-                "events": {
-                    "m.room.name": 50,
-                    "m.room.power_levels": 100,
-                    "m.room.history_visibility": 100,
-                    "m.room.canonical_alias": 50,
-                    "m.room.avatar": 50,
-                    "m.room.tombstone": 100,
-                    "m.room.server_acl": 100,
-                    "m.room.encryption": 100,
-                },
-                "events_default": 0,
-                "state_default": 50,
-                "ban": 50,
-                "kick": 50,
-                "redact": 50,
-                "invite": 0,
-            },
-            tok=self.tok,
-        )
-
-    def test_unread_counts(self):
-        """Tests that /sync returns the right value for the unread count (MSC2654)."""
-
-        # Check that our own messages don't increase the unread count.
-        self.helper.send(self.room_id, "hello", tok=self.tok)
-        self._check_unread_count(0)
-
-        # Join the new user and check that this doesn't increase the unread count.
-        self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2)
-        self._check_unread_count(0)
-
-        # Check that the new user sending a message increases our unread count.
-        res = self.helper.send(self.room_id, "hello", tok=self.tok2)
-        self._check_unread_count(1)
-
-        # Send a read receipt to tell the server we've read the latest event.
-        body = json.dumps({"m.read": res["event_id"]}).encode("utf8")
-        request, channel = self.make_request(
-            "POST",
-            "/rooms/%s/read_markers" % self.room_id,
-            body,
-            access_token=self.tok,
-        )
-        self.render(request)
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        # Check that the unread counter is back to 0.
-        self._check_unread_count(0)
-
-        # Check that room name changes increase the unread counter.
-        self.helper.send_state(
-            self.room_id, "m.room.name", {"name": "my super room"}, tok=self.tok2,
-        )
-        self._check_unread_count(1)
-
-        # Check that room topic changes increase the unread counter.
-        self.helper.send_state(
-            self.room_id, "m.room.topic", {"topic": "welcome!!!"}, tok=self.tok2,
-        )
-        self._check_unread_count(2)
-
-        # Check that encrypted messages increase the unread counter.
-        self.helper.send_event(self.room_id, EventTypes.Encrypted, {}, tok=self.tok2)
-        self._check_unread_count(3)
-
-        # Check that custom events with a body increase the unread counter.
-        self.helper.send_event(
-            self.room_id, "org.matrix.custom_type", {"body": "hello"}, tok=self.tok2,
-        )
-        self._check_unread_count(4)
-
-        # Check that edits don't increase the unread counter.
-        self.helper.send_event(
-            room_id=self.room_id,
-            type=EventTypes.Message,
-            content={
-                "body": "hello",
-                "msgtype": "m.text",
-                "m.relates_to": {"rel_type": RelationTypes.REPLACE},
-            },
-            tok=self.tok2,
-        )
-        self._check_unread_count(4)
-
-        # Check that notices don't increase the unread counter.
-        self.helper.send_event(
-            room_id=self.room_id,
-            type=EventTypes.Message,
-            content={"body": "hello", "msgtype": "m.notice"},
-            tok=self.tok2,
-        )
-        self._check_unread_count(4)
-
-        # Check that tombstone events changes increase the unread counter.
-        self.helper.send_state(
-            self.room_id,
-            EventTypes.Tombstone,
-            {"replacement_room": "!someroom:test"},
-            tok=self.tok2,
-        )
-        self._check_unread_count(5)
-
-    def _check_unread_count(self, expected_count: True):
-        """Syncs and compares the unread count with the expected value."""
-
-        request, channel = self.make_request(
-            "GET", self.url % self.next_batch, access_token=self.tok,
-        )
-        self.render(request)
-
-        self.assertEqual(channel.code, 200, channel.json_body)
-
-        room_entry = channel.json_body["rooms"]["join"][self.room_id]
-        self.assertEqual(
-            room_entry["org.matrix.msc2654.unread_count"], expected_count, room_entry,
-        )
-
-        # Store the next batch for the next request.
-        self.next_batch = channel.json_body["next_batch"]
diff --git a/tests/rest/test_health.py b/tests/rest/test_health.py
new file mode 100644
index 0000000000..2d021f6565
--- /dev/null
+++ b/tests/rest/test_health.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from synapse.rest.health import HealthResource
+
+from tests import unittest
+
+
+class HealthCheckTests(unittest.HomeserverTestCase):
+    def setUp(self):
+        super().setUp()
+
+        # replace the JsonResource with a HealthResource.
+        self.resource = HealthResource()
+
+    def test_health(self):
+        request, channel = self.make_request("GET", "/health", shorthand=False)
+        self.render(request)
+
+        self.assertEqual(request.code, 200)
+        self.assertEqual(channel.result["body"], b"OK")
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index 7f70353b0d..2858d13558 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -27,6 +27,7 @@ from synapse.server_notices.resource_limits_server_notices import (
 )
 
 from tests import unittest
+from tests.test_utils import make_awaitable
 from tests.unittest import override_config
 from tests.utils import default_config
 
@@ -79,7 +80,9 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase):
             return_value=defer.succeed("!something:localhost")
         )
         self._rlsn._store.add_tag_to_room = Mock(return_value=defer.succeed(None))
-        self._rlsn._store.get_tags_for_room = Mock(return_value=defer.succeed({}))
+        self._rlsn._store.get_tags_for_room = Mock(
+            side_effect=lambda user_id, room_id: make_awaitable({})
+        )
 
     @override_config({"hs_disabled": True})
     def test_maybe_send_server_notice_disabled_hs(self):
@@ -258,7 +261,7 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase):
         self.user_id = "@user_id:test"
 
     def test_server_notice_only_sent_once(self):
-        self.store.get_monthly_active_count = Mock(return_value=1000)
+        self.store.get_monthly_active_count = Mock(return_value=defer.succeed(1000))
 
         self.store.user_last_seen_monthly_active = Mock(
             return_value=defer.succeed(1000)
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index 1b516b7976..98b74890d5 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -178,14 +178,14 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def test_get_appservice_state_none(self):
         service = Mock(id="999")
-        state = yield self.store.get_appservice_state(service)
+        state = yield defer.ensureDeferred(self.store.get_appservice_state(service))
         self.assertEquals(None, state)
 
     @defer.inlineCallbacks
     def test_get_appservice_state_up(self):
         yield self._set_state(self.as_list[0]["id"], ApplicationServiceState.UP)
         service = Mock(id=self.as_list[0]["id"])
-        state = yield self.store.get_appservice_state(service)
+        state = yield defer.ensureDeferred(self.store.get_appservice_state(service))
         self.assertEquals(ApplicationServiceState.UP, state)
 
     @defer.inlineCallbacks
@@ -194,13 +194,13 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         yield self._set_state(self.as_list[1]["id"], ApplicationServiceState.DOWN)
         yield self._set_state(self.as_list[2]["id"], ApplicationServiceState.DOWN)
         service = Mock(id=self.as_list[1]["id"])
-        state = yield self.store.get_appservice_state(service)
+        state = yield defer.ensureDeferred(self.store.get_appservice_state(service))
         self.assertEquals(ApplicationServiceState.DOWN, state)
 
     @defer.inlineCallbacks
     def test_get_appservices_by_state_none(self):
-        services = yield self.store.get_appservices_by_state(
-            ApplicationServiceState.DOWN
+        services = yield defer.ensureDeferred(
+            self.store.get_appservices_by_state(ApplicationServiceState.DOWN)
         )
         self.assertEquals(0, len(services))
 
@@ -339,7 +339,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
     def test_get_oldest_unsent_txn_none(self):
         service = Mock(id=self.as_list[0]["id"])
 
-        txn = yield self.store.get_oldest_unsent_txn(service)
+        txn = yield defer.ensureDeferred(self.store.get_oldest_unsent_txn(service))
         self.assertEquals(None, txn)
 
     @defer.inlineCallbacks
@@ -349,14 +349,14 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         other_events = [Mock(event_id="e5"), Mock(event_id="e6")]
 
         # we aren't testing store._base stuff here, so mock this out
-        self.store.get_events_as_list = Mock(return_value=events)
+        self.store.get_events_as_list = Mock(return_value=defer.succeed(events))
 
         yield self._insert_txn(self.as_list[1]["id"], 9, other_events)
         yield self._insert_txn(service.id, 10, events)
         yield self._insert_txn(service.id, 11, other_events)
         yield self._insert_txn(service.id, 12, other_events)
 
-        txn = yield self.store.get_oldest_unsent_txn(service)
+        txn = yield defer.ensureDeferred(self.store.get_oldest_unsent_txn(service))
         self.assertEquals(service, txn.service)
         self.assertEquals(10, txn.id)
         self.assertEquals(events, txn.events)
@@ -366,8 +366,8 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         yield self._set_state(self.as_list[0]["id"], ApplicationServiceState.DOWN)
         yield self._set_state(self.as_list[1]["id"], ApplicationServiceState.UP)
 
-        services = yield self.store.get_appservices_by_state(
-            ApplicationServiceState.DOWN
+        services = yield defer.ensureDeferred(
+            self.store.get_appservices_by_state(ApplicationServiceState.DOWN)
         )
         self.assertEquals(1, len(services))
         self.assertEquals(self.as_list[0]["id"], services[0].id)
@@ -379,8 +379,8 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
         yield self._set_state(self.as_list[2]["id"], ApplicationServiceState.DOWN)
         yield self._set_state(self.as_list[3]["id"], ApplicationServiceState.UP)
 
-        services = yield self.store.get_appservices_by_state(
-            ApplicationServiceState.DOWN
+        services = yield defer.ensureDeferred(
+            self.store.get_appservices_by_state(ApplicationServiceState.DOWN)
         )
         self.assertEquals(2, len(services))
         self.assertEquals(
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index c2539b353a..87ed8f8cd1 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -34,7 +34,9 @@ class DeviceStoreTestCase(tests.unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_store_new_device(self):
-        yield self.store.store_device("user_id", "device_id", "display_name")
+        yield defer.ensureDeferred(
+            self.store.store_device("user_id", "device_id", "display_name")
+        )
 
         res = yield self.store.get_device("user_id", "device_id")
         self.assertDictContainsSubset(
@@ -48,11 +50,17 @@ class DeviceStoreTestCase(tests.unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_get_devices_by_user(self):
-        yield self.store.store_device("user_id", "device1", "display_name 1")
-        yield self.store.store_device("user_id", "device2", "display_name 2")
-        yield self.store.store_device("user_id2", "device3", "display_name 3")
+        yield defer.ensureDeferred(
+            self.store.store_device("user_id", "device1", "display_name 1")
+        )
+        yield defer.ensureDeferred(
+            self.store.store_device("user_id", "device2", "display_name 2")
+        )
+        yield defer.ensureDeferred(
+            self.store.store_device("user_id2", "device3", "display_name 3")
+        )
 
-        res = yield self.store.get_devices_by_user("user_id")
+        res = yield defer.ensureDeferred(self.store.get_devices_by_user("user_id"))
         self.assertEqual(2, len(res.keys()))
         self.assertDictContainsSubset(
             {
@@ -76,13 +84,13 @@ class DeviceStoreTestCase(tests.unittest.TestCase):
         device_ids = ["device_id1", "device_id2"]
 
         # Add two device updates with a single stream_id
-        yield self.store.add_device_change_to_streams(
-            "user_id", device_ids, ["somehost"]
+        yield defer.ensureDeferred(
+            self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"])
         )
 
         # Get all device updates ever meant for this remote
-        now_stream_id, device_updates = yield self.store.get_device_updates_by_remote(
-            "somehost", -1, limit=100
+        now_stream_id, device_updates = yield defer.ensureDeferred(
+            self.store.get_device_updates_by_remote("somehost", -1, limit=100)
         )
 
         # Check original device_ids are contained within these updates
@@ -99,19 +107,23 @@ class DeviceStoreTestCase(tests.unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_update_device(self):
-        yield self.store.store_device("user_id", "device_id", "display_name 1")
+        yield defer.ensureDeferred(
+            self.store.store_device("user_id", "device_id", "display_name 1")
+        )
 
         res = yield self.store.get_device("user_id", "device_id")
         self.assertEqual("display_name 1", res["display_name"])
 
         # do a no-op first
-        yield self.store.update_device("user_id", "device_id")
+        yield defer.ensureDeferred(self.store.update_device("user_id", "device_id"))
         res = yield self.store.get_device("user_id", "device_id")
         self.assertEqual("display_name 1", res["display_name"])
 
         # do the update
-        yield self.store.update_device(
-            "user_id", "device_id", new_display_name="display_name 2"
+        yield defer.ensureDeferred(
+            self.store.update_device(
+                "user_id", "device_id", new_display_name="display_name 2"
+            )
         )
 
         # check it worked
@@ -121,7 +133,9 @@ class DeviceStoreTestCase(tests.unittest.TestCase):
     @defer.inlineCallbacks
     def test_update_unknown_device(self):
         with self.assertRaises(synapse.api.errors.StoreError) as cm:
-            yield self.store.update_device(
-                "user_id", "unknown_device_id", new_display_name="display_name 2"
+            yield defer.ensureDeferred(
+                self.store.update_device(
+                    "user_id", "unknown_device_id", new_display_name="display_name 2"
+                )
             )
         self.assertEqual(404, cm.exception.code)
diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py
index 4e128e1047..daac947cb2 100644
--- a/tests/storage/test_directory.py
+++ b/tests/storage/test_directory.py
@@ -34,8 +34,10 @@ class DirectoryStoreTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_room_to_alias(self):
-        yield self.store.create_room_alias_association(
-            room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
+        yield defer.ensureDeferred(
+            self.store.create_room_alias_association(
+                room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
+            )
         )
 
         self.assertEquals(
@@ -45,24 +47,36 @@ class DirectoryStoreTestCase(unittest.TestCase):
 
     @defer.inlineCallbacks
     def test_alias_to_room(self):
-        yield self.store.create_room_alias_association(
-            room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
+        yield defer.ensureDeferred(
+            self.store.create_room_alias_association(
+                room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
+            )
         )
 
         self.assertObjectHasAttributes(
             {"room_id": self.room.to_string(), "servers": ["test"]},
-            (yield self.store.get_association_from_room_alias(self.alias)),
+            (
+                yield defer.ensureDeferred(
+                    self.store.get_association_from_room_alias(self.alias)
+                )
+            ),
         )
 
     @defer.inlineCallbacks
     def test_delete_alias(self):
-        yield self.store.create_room_alias_association(
-            room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
+        yield defer.ensureDeferred(
+            self.store.create_room_alias_association(
+                room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
+            )
         )
 
-        room_id = yield self.store.delete_room_alias(self.alias)
+        room_id = yield defer.ensureDeferred(self.store.delete_room_alias(self.alias))
         self.assertEqual(self.room.to_string(), room_id)
 
         self.assertIsNone(
-            (yield self.store.get_association_from_room_alias(self.alias))
+            (
+                yield defer.ensureDeferred(
+                    self.store.get_association_from_room_alias(self.alias)
+                )
+            )
         )
diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py
index 398d546280..d57cdffd8b 100644
--- a/tests/storage/test_end_to_end_keys.py
+++ b/tests/storage/test_end_to_end_keys.py
@@ -30,11 +30,13 @@ class EndToEndKeyStoreTestCase(tests.unittest.TestCase):
         now = 1470174257070
         json = {"key": "value"}
 
-        yield self.store.store_device("user", "device", None)
+        yield defer.ensureDeferred(self.store.store_device("user", "device", None))
 
         yield self.store.set_e2e_device_keys("user", "device", now, json)
 
-        res = yield self.store.get_e2e_device_keys((("user", "device"),))
+        res = yield defer.ensureDeferred(
+            self.store.get_e2e_device_keys((("user", "device"),))
+        )
         self.assertIn("user", res)
         self.assertIn("device", res["user"])
         dev = res["user"]["device"]
@@ -45,7 +47,7 @@ class EndToEndKeyStoreTestCase(tests.unittest.TestCase):
         now = 1470174257070
         json = {"key": "value"}
 
-        yield self.store.store_device("user", "device", None)
+        yield defer.ensureDeferred(self.store.store_device("user", "device", None))
 
         changed = yield self.store.set_e2e_device_keys("user", "device", now, json)
         self.assertTrue(changed)
@@ -61,9 +63,13 @@ class EndToEndKeyStoreTestCase(tests.unittest.TestCase):
         json = {"key": "value"}
 
         yield self.store.set_e2e_device_keys("user", "device", now, json)
-        yield self.store.store_device("user", "device", "display_name")
+        yield defer.ensureDeferred(
+            self.store.store_device("user", "device", "display_name")
+        )
 
-        res = yield self.store.get_e2e_device_keys((("user", "device"),))
+        res = yield defer.ensureDeferred(
+            self.store.get_e2e_device_keys((("user", "device"),))
+        )
         self.assertIn("user", res)
         self.assertIn("device", res["user"])
         dev = res["user"]["device"]
@@ -75,18 +81,18 @@ class EndToEndKeyStoreTestCase(tests.unittest.TestCase):
     def test_multiple_devices(self):
         now = 1470174257070
 
-        yield self.store.store_device("user1", "device1", None)
-        yield self.store.store_device("user1", "device2", None)
-        yield self.store.store_device("user2", "device1", None)
-        yield self.store.store_device("user2", "device2", None)
+        yield defer.ensureDeferred(self.store.store_device("user1", "device1", None))
+        yield defer.ensureDeferred(self.store.store_device("user1", "device2", None))
+        yield defer.ensureDeferred(self.store.store_device("user2", "device1", None))
+        yield defer.ensureDeferred(self.store.store_device("user2", "device2", None))
 
         yield self.store.set_e2e_device_keys("user1", "device1", now, {"key": "json11"})
         yield self.store.set_e2e_device_keys("user1", "device2", now, {"key": "json12"})
         yield self.store.set_e2e_device_keys("user2", "device1", now, {"key": "json21"})
         yield self.store.set_e2e_device_keys("user2", "device2", now, {"key": "json22"})
 
-        res = yield self.store.get_e2e_device_keys(
-            (("user1", "device1"), ("user2", "device2"))
+        res = yield defer.ensureDeferred(
+            self.store.get_e2e_device_keys((("user1", "device1"), ("user2", "device2")))
         )
         self.assertIn("user1", res)
         self.assertIn("device1", res["user1"])
diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index 259f2215f1..9870c74883 100644
--- a/tests/storage/test_monthly_active_users.py
+++ b/tests/storage/test_monthly_active_users.py
@@ -19,6 +19,7 @@ from twisted.internet import defer
 from synapse.api.constants import UserTypes
 
 from tests import unittest
+from tests.test_utils import make_awaitable
 from tests.unittest import default_config, override_config
 
 FORTY_DAYS = 40 * 24 * 60 * 60
@@ -230,7 +231,9 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
         )
         self.get_success(d)
 
-        self.store.upsert_monthly_active_user = Mock()
+        self.store.upsert_monthly_active_user = Mock(
+            side_effect=lambda user_id: make_awaitable(None)
+        )
 
         d = self.store.populate_monthly_active_users(user_id)
         self.get_success(d)
@@ -238,7 +241,9 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
         self.store.upsert_monthly_active_user.assert_not_called()
 
     def test_populate_monthly_users_should_update(self):
-        self.store.upsert_monthly_active_user = Mock()
+        self.store.upsert_monthly_active_user = Mock(
+            side_effect=lambda user_id: make_awaitable(None)
+        )
 
         self.store.is_trial_user = Mock(return_value=defer.succeed(False))
 
@@ -251,7 +256,9 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
         self.store.upsert_monthly_active_user.assert_called_once()
 
     def test_populate_monthly_users_should_not_update(self):
-        self.store.upsert_monthly_active_user = Mock()
+        self.store.upsert_monthly_active_user = Mock(
+            side_effect=lambda user_id: make_awaitable(None)
+        )
 
         self.store.is_trial_user = Mock(return_value=defer.succeed(False))
         self.store.user_last_seen_monthly_active = Mock(
@@ -293,8 +300,12 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
         self.get_success(self.store.register_user(user_id=user2, password_hash=None))
 
         now = int(self.hs.get_clock().time_msec())
-        self.store.user_add_threepid(user1, "email", user1_email, now, now)
-        self.store.user_add_threepid(user2, "email", user2_email, now, now)
+        self.get_success(
+            self.store.user_add_threepid(user1, "email", user1_email, now, now)
+        )
+        self.get_success(
+            self.store.user_add_threepid(user2, "email", user2_email, now, now)
+        )
 
         users = self.get_success(self.store.get_registered_reserved_users())
         self.assertEqual(len(users), len(threepids))
@@ -333,7 +344,9 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
 
     @override_config({"limit_usage_by_mau": False, "mau_stats_only": False})
     def test_no_users_when_not_tracking(self):
-        self.store.upsert_monthly_active_user = Mock()
+        self.store.upsert_monthly_active_user = Mock(
+            side_effect=lambda user_id: make_awaitable(None)
+        )
 
         self.get_success(self.store.populate_monthly_active_users("@user:sever"))
 
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index 41511d479f..1ea35d60c1 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -251,6 +251,10 @@ class RedactionTestCase(unittest.HomeserverTestCase):
             def room_id(self):
                 return self._base_builder.room_id
 
+            @property
+            def type(self):
+                return self._base_builder.type
+
         event_1, context_1 = self.get_success(
             self.event_creation_handler.create_new_client_event(
                 EventIdManglingBuilder(
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index 71a40a0a49..840db66072 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -58,8 +58,10 @@ class RegistrationStoreTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     def test_add_tokens(self):
         yield self.store.register_user(self.user_id, self.pwhash)
-        yield self.store.add_access_token_to_user(
-            self.user_id, self.tokens[1], self.device_id, valid_until_ms=None
+        yield defer.ensureDeferred(
+            self.store.add_access_token_to_user(
+                self.user_id, self.tokens[1], self.device_id, valid_until_ms=None
+            )
         )
 
         result = yield self.store.get_user_by_access_token(self.tokens[1])
@@ -74,11 +76,15 @@ class RegistrationStoreTestCase(unittest.TestCase):
     def test_user_delete_access_tokens(self):
         # add some tokens
         yield self.store.register_user(self.user_id, self.pwhash)
-        yield self.store.add_access_token_to_user(
-            self.user_id, self.tokens[0], device_id=None, valid_until_ms=None
+        yield defer.ensureDeferred(
+            self.store.add_access_token_to_user(
+                self.user_id, self.tokens[0], device_id=None, valid_until_ms=None
+            )
         )
-        yield self.store.add_access_token_to_user(
-            self.user_id, self.tokens[1], self.device_id, valid_until_ms=None
+        yield defer.ensureDeferred(
+            self.store.add_access_token_to_user(
+                self.user_id, self.tokens[1], self.device_id, valid_until_ms=None
+            )
         )
 
         # now delete some
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index 6a545d2eb0..ecfafe68a9 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -40,7 +40,7 @@ class UserDirectoryStoreTestCase(unittest.TestCase):
     def test_search_user_dir(self):
         # normally when alice searches the directory she should just find
         # bob because bobby doesn't share a room with her.
-        r = yield self.store.search_user_dir(ALICE, "bob", 10)
+        r = yield defer.ensureDeferred(self.store.search_user_dir(ALICE, "bob", 10))
         self.assertFalse(r["limited"])
         self.assertEqual(1, len(r["results"]))
         self.assertDictEqual(
@@ -51,7 +51,7 @@ class UserDirectoryStoreTestCase(unittest.TestCase):
     def test_search_user_dir_all_users(self):
         self.hs.config.user_directory_search_all_users = True
         try:
-            r = yield self.store.search_user_dir(ALICE, "bob", 10)
+            r = yield defer.ensureDeferred(self.store.search_user_dir(ALICE, "bob", 10))
             self.assertFalse(r["limited"])
             self.assertEqual(2, len(r["results"]))
             self.assertDictEqual(
diff --git a/tests/test_federation.py b/tests/test_federation.py
index c2f12c2741..f2fa42bfb9 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -1,3 +1,18 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 from mock import Mock
 
 from twisted.internet.defer import ensureDeferred, maybeDeferred, succeed
@@ -10,6 +25,7 @@ from synapse.util.retryutils import NotRetryingDestination
 
 from tests import unittest
 from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver
+from tests.test_utils import make_awaitable
 
 
 class MessageAcceptTests(unittest.HomeserverTestCase):
@@ -173,7 +189,7 @@ class MessageAcceptTests(unittest.HomeserverTestCase):
         # Register a mock on the store so that the incoming update doesn't fail because
         # we don't share a room with the user.
         store = self.homeserver.get_datastore()
-        store.get_rooms_for_user = Mock(return_value=succeed(["!someroom:test"]))
+        store.get_rooms_for_user = Mock(return_value=make_awaitable(["!someroom:test"]))
 
         # Manually inject a fake device list update. We need this update to include at
         # least one prev_id so that the user's device list will need to be retried.
diff --git a/tests/unittest.py b/tests/unittest.py
index 2152c693f2..d0bba3ddef 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -241,20 +241,16 @@ class HomeserverTestCase(TestCase):
         if hasattr(self, "user_id"):
             if self.hijack_auth:
 
-                def get_user_by_access_token(token=None, allow_guest=False):
-                    return succeed(
-                        {
-                            "user": UserID.from_string(self.helper.auth_user_id),
-                            "token_id": 1,
-                            "is_guest": False,
-                        }
-                    )
-
-                def get_user_by_req(request, allow_guest=False, rights="access"):
-                    return succeed(
-                        create_requester(
-                            UserID.from_string(self.helper.auth_user_id), 1, False, None
-                        )
+                async def get_user_by_access_token(token=None, allow_guest=False):
+                    return {
+                        "user": UserID.from_string(self.helper.auth_user_id),
+                        "token_id": 1,
+                        "is_guest": False,
+                    }
+
+                async def get_user_by_req(request, allow_guest=False, rights="access"):
+                    return create_requester(
+                        UserID.from_string(self.helper.auth_user_id), 1, False, None
                     )
 
                 self.hs.get_auth().get_user_by_req = get_user_by_req
diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py
index 9e348694ad..bc42ffce88 100644
--- a/tests/util/test_retryutils.py
+++ b/tests/util/test_retryutils.py
@@ -26,9 +26,7 @@ class RetryLimiterTestCase(HomeserverTestCase):
     def test_new_destination(self):
         """A happy-path case with a new destination and a successful operation"""
         store = self.hs.get_datastore()
-        d = get_retry_limiter("test_dest", self.clock, store)
-        self.pump()
-        limiter = self.successResultOf(d)
+        limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store))
 
         # advance the clock a bit before making the request
         self.pump(1)
@@ -36,18 +34,14 @@ class RetryLimiterTestCase(HomeserverTestCase):
         with limiter:
             pass
 
-        d = store.get_destination_retry_timings("test_dest")
-        self.pump()
-        new_timings = self.successResultOf(d)
+        new_timings = self.get_success(store.get_destination_retry_timings("test_dest"))
         self.assertIsNone(new_timings)
 
     def test_limiter(self):
         """General test case which walks through the process of a failing request"""
         store = self.hs.get_datastore()
 
-        d = get_retry_limiter("test_dest", self.clock, store)
-        self.pump()
-        limiter = self.successResultOf(d)
+        limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store))
 
         self.pump(1)
         try:
@@ -58,29 +52,22 @@ class RetryLimiterTestCase(HomeserverTestCase):
         except AssertionError:
             pass
 
-        # wait for the update to land
-        self.pump()
-
-        d = store.get_destination_retry_timings("test_dest")
-        self.pump()
-        new_timings = self.successResultOf(d)
+        new_timings = self.get_success(store.get_destination_retry_timings("test_dest"))
         self.assertEqual(new_timings["failure_ts"], failure_ts)
         self.assertEqual(new_timings["retry_last_ts"], failure_ts)
         self.assertEqual(new_timings["retry_interval"], MIN_RETRY_INTERVAL)
 
         # now if we try again we should get a failure
-        d = get_retry_limiter("test_dest", self.clock, store)
-        self.pump()
-        self.failureResultOf(d, NotRetryingDestination)
+        self.get_failure(
+            get_retry_limiter("test_dest", self.clock, store), NotRetryingDestination
+        )
 
         #
         # advance the clock and try again
         #
 
         self.pump(MIN_RETRY_INTERVAL)
-        d = get_retry_limiter("test_dest", self.clock, store)
-        self.pump()
-        limiter = self.successResultOf(d)
+        limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store))
 
         self.pump(1)
         try:
@@ -91,12 +78,7 @@ class RetryLimiterTestCase(HomeserverTestCase):
         except AssertionError:
             pass
 
-        # wait for the update to land
-        self.pump()
-
-        d = store.get_destination_retry_timings("test_dest")
-        self.pump()
-        new_timings = self.successResultOf(d)
+        new_timings = self.get_success(store.get_destination_retry_timings("test_dest"))
         self.assertEqual(new_timings["failure_ts"], failure_ts)
         self.assertEqual(new_timings["retry_last_ts"], retry_ts)
         self.assertGreaterEqual(
@@ -110,9 +92,7 @@ class RetryLimiterTestCase(HomeserverTestCase):
         # one more go, with success
         #
         self.pump(MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 2.0)
-        d = get_retry_limiter("test_dest", self.clock, store)
-        self.pump()
-        limiter = self.successResultOf(d)
+        limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store))
 
         self.pump(1)
         with limiter:
@@ -121,7 +101,5 @@ class RetryLimiterTestCase(HomeserverTestCase):
         # wait for the update to land
         self.pump()
 
-        d = store.get_destination_retry_timings("test_dest")
-        self.pump()
-        new_timings = self.successResultOf(d)
+        new_timings = self.get_success(store.get_destination_retry_timings("test_dest"))
         self.assertIsNone(new_timings)
diff --git a/tox.ini b/tox.ini
index 9a052c1e33..e5413eb110 100644
--- a/tox.ini
+++ b/tox.ini
@@ -179,6 +179,7 @@ commands = mypy \
             synapse/appservice \
             synapse/config \
             synapse/event_auth.py \
+            synapse/events/builder.py \
             synapse/events/spamcheck.py \
             synapse/federation \
             synapse/handlers/auth.py \
@@ -186,6 +187,7 @@ commands = mypy \
             synapse/handlers/directory.py \
             synapse/handlers/federation.py \
             synapse/handlers/identity.py \
+            synapse/handlers/message.py \
             synapse/handlers/oidc_handler.py \
             synapse/handlers/presence.py \
             synapse/handlers/room_member.py \
@@ -198,10 +200,12 @@ commands = mypy \
             synapse/logging/ \
             synapse/metrics \
             synapse/module_api \
+            synapse/notifier.py \
             synapse/push/pusherpool.py \
             synapse/push/push_rule_evaluator.py \
             synapse/replication \
             synapse/rest \
+            synapse/server.py \
             synapse/server_notices \
             synapse/spam_checker_api \
             synapse/storage/databases/main/ui_auth.py \
@@ -210,7 +214,9 @@ commands = mypy \
             synapse/storage/state.py \
             synapse/storage/util \
             synapse/streams \
+            synapse/types.py \
             synapse/util/caches/stream_change_cache.py \
+            synapse/util/metrics.py \
             tests/replication \
             tests/test_utils \
             tests/rest/client/v2_alpha/test_auth.py \