summary refs log tree commit diff
diff options
context:
space:
mode:
authorErik Johnston <erik@matrix.org>2020-01-22 16:53:28 +0000
committerErik Johnston <erik@matrix.org>2020-01-22 16:53:28 +0000
commit57a60365da0c47f286ea4608d766abbca5762233 (patch)
treeaaef0948f26f3352092b787d32e1dda0743d697e
parentPull out more info about room key requests (diff)
parentRemove unnecessary abstractions in admin handler (#6751) (diff)
downloadsynapse-github/erikj/debug_direct_message_checks.tar.xz
Merge branch 'develop' of github.com:matrix-org/synapse into erikj/debug_direct_message_checks github/erikj/debug_direct_message_checks erikj/debug_direct_message_checks
-rw-r--r--.buildkite/docker-compose.py35.pg95.yaml22
-rw-r--r--.buildkite/docker-compose.py37.pg11.yaml22
-rw-r--r--.buildkite/docker-compose.py37.pg95.yaml22
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md6
-rw-r--r--CHANGES.md74
-rw-r--r--CONTRIBUTING.md4
-rw-r--r--INSTALL.md5
-rw-r--r--changelog.d/5742.feature1
-rw-r--r--changelog.d/6621.doc1
-rw-r--r--changelog.d/6624.doc1
-rw-r--r--changelog.d/6654.bugfix1
-rw-r--r--changelog.d/6656.doc1
-rw-r--r--changelog.d/6664.bugfix1
-rw-r--r--changelog.d/6665.doc1
-rw-r--r--changelog.d/6751.misc1
-rw-r--r--docs/admin_api/media_admin_api.md76
-rw-r--r--docs/admin_api/rooms.md173
-rw-r--r--docs/sample_config.yaml284
-rw-r--r--docs/tcp_replication.md11
-rw-r--r--docs/workers.md8
-rw-r--r--mypy.ini12
-rwxr-xr-xscripts-dev/update_database20
-rwxr-xr-xscripts/hash_password2
-rwxr-xr-xscripts/synapse_port_db233
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/api/auth.py8
-rw-r--r--synapse/api/errors.py33
-rw-r--r--synapse/api/filtering.py4
-rw-r--r--synapse/api/ratelimiting.py7
-rw-r--r--synapse/app/admin_cmd.py3
-rw-r--r--synapse/app/appservice.py5
-rw-r--r--synapse/app/client_reader.py4
-rw-r--r--synapse/app/event_creator.py4
-rw-r--r--synapse/app/federation_reader.py4
-rw-r--r--synapse/app/federation_sender.py17
-rw-r--r--synapse/app/homeserver.py13
-rw-r--r--synapse/app/pusher.py5
-rw-r--r--synapse/app/synchrotron.py9
-rw-r--r--synapse/app/user_dir.py5
-rw-r--r--synapse/config/emailconfig.py222
-rw-r--r--synapse/config/push.py2
-rw-r--r--synapse/config/registration.py86
-rw-r--r--synapse/config/saml2_config.py1
-rw-r--r--synapse/config/server.py8
-rw-r--r--synapse/event_auth.py2
-rw-r--r--synapse/events/snapshot.py11
-rw-r--r--synapse/federation/federation_server.py70
-rw-r--r--synapse/federation/send_queue.py4
-rw-r--r--synapse/federation/sender/__init__.py18
-rw-r--r--synapse/federation/sender/per_destination_queue.py3
-rw-r--r--synapse/federation/transport/server.py19
-rw-r--r--synapse/handlers/admin.py89
-rw-r--r--synapse/handlers/deactivate_account.py2
-rw-r--r--synapse/handlers/federation.py10
-rw-r--r--synapse/handlers/initial_sync.py2
-rw-r--r--synapse/handlers/pagination.py15
-rw-r--r--synapse/handlers/register.py14
-rw-r--r--synapse/handlers/room.py24
-rw-r--r--synapse/handlers/room_member.py2
-rw-r--r--synapse/handlers/saml_handler.py62
-rw-r--r--synapse/handlers/search.py2
-rw-r--r--synapse/handlers/sync.py2
-rw-r--r--synapse/handlers/typing.py2
-rw-r--r--synapse/http/server.py13
-rw-r--r--synapse/http/site.py2
-rw-r--r--synapse/logging/context.py3
-rw-r--r--synapse/module_api/__init__.py47
-rw-r--r--synapse/module_api/errors.py18
-rw-r--r--synapse/notifier.py31
-rw-r--r--synapse/push/push_tools.py2
-rw-r--r--synapse/replication/http/_base.py10
-rw-r--r--synapse/replication/slave/storage/_base.py14
-rw-r--r--synapse/replication/slave/storage/events.py2
-rw-r--r--synapse/replication/slave/storage/presence.py2
-rw-r--r--synapse/replication/tcp/client.py26
-rw-r--r--synapse/replication/tcp/commands.py59
-rw-r--r--synapse/replication/tcp/protocol.py121
-rw-r--r--synapse/replication/tcp/resource.py43
-rw-r--r--synapse/replication/tcp/streams/_base.py104
-rw-r--r--synapse/replication/tcp/streams/events.py25
-rw-r--r--synapse/replication/tcp/streams/federation.py4
-rw-r--r--synapse/rest/admin/__init__.py5
-rw-r--r--synapse/rest/admin/_base.py15
-rw-r--r--synapse/rest/admin/media.py68
-rw-r--r--synapse/rest/admin/rooms.py82
-rw-r--r--synapse/rest/admin/users.py46
-rw-r--r--synapse/rest/client/v1/events.py2
-rw-r--r--synapse/rest/client/v1/login.py2
-rw-r--r--synapse/rest/client/v1/room.py18
-rw-r--r--synapse/rest/client/v2_alpha/_base.py2
-rw-r--r--synapse/rest/client/v2_alpha/auth.py4
-rw-r--r--synapse/rest/client/v2_alpha/register.py3
-rw-r--r--synapse/rest/client/v2_alpha/sendtodevice.py3
-rw-r--r--synapse/rest/client/versions.py2
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py5
-rw-r--r--synapse/rest/media/v1/media_repository.py3
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py7
-rw-r--r--synapse/rest/media/v1/thumbnail_resource.py14
-rw-r--r--synapse/server.pyi12
-rw-r--r--synapse/server_notices/server_notices_manager.py2
-rw-r--r--synapse/state/__init__.py5
-rw-r--r--synapse/state/v1.py5
-rw-r--r--synapse/state/v2.py9
-rw-r--r--synapse/storage/_base.py18
-rw-r--r--synapse/storage/data_stores/__init__.py2
-rw-r--r--synapse/storage/data_stores/main/cache.py29
-rw-r--r--synapse/storage/data_stores/main/devices.py2
-rw-r--r--synapse/storage/data_stores/main/events.py119
-rw-r--r--synapse/storage/data_stores/main/events_worker.py2
-rw-r--r--synapse/storage/data_stores/main/keys.py2
-rw-r--r--synapse/storage/data_stores/main/monthly_active_users.py165
-rw-r--r--synapse/storage/data_stores/main/presence.py2
-rw-r--r--synapse/storage/data_stores/main/registration.py2
-rw-r--r--synapse/storage/data_stores/main/room.py239
-rw-r--r--synapse/storage/data_stores/main/roommember.py189
-rw-r--r--synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py98
-rw-r--r--synapse/storage/data_stores/main/state.py11
-rw-r--r--synapse/storage/data_stores/main/stream.py10
-rw-r--r--synapse/storage/data_stores/state/store.py52
-rw-r--r--synapse/storage/engines/postgres.py34
-rw-r--r--synapse/storage/engines/sqlite.py7
-rw-r--r--synapse/storage/persist_events.py123
-rw-r--r--synapse/storage/prepare_database.py2
-rw-r--r--synapse/storage/purge_events.py2
-rw-r--r--synapse/storage/state.py35
-rw-r--r--synapse/types.py9
-rw-r--r--synapse/util/__init__.py17
-rw-r--r--synapse/util/iterutils.py48
-rw-r--r--synapse/util/module_loader.py2
-rw-r--r--tests/handlers/test_register.py2
-rw-r--r--tests/handlers/test_sync.py4
-rw-r--r--tests/replication/slave/storage/test_events.py4
-rw-r--r--tests/replication/tcp/streams/_base.py2
-rw-r--r--tests/rest/admin/test_admin.py737
-rw-r--r--tests/rest/admin/test_user.py13
-rw-r--r--tests/rest/client/v1/test_events.py27
-rw-r--r--tests/rest/client/v1/utils.py37
-rw-r--r--tests/rest/client/v2_alpha/test_account.py12
-rw-r--r--tests/rest/client/v2_alpha/test_sync.py9
-rw-r--r--tests/rest/media/v1/test_media_storage.py48
-rw-r--r--tests/storage/test_roommember.py2
-rw-r--r--tests/test_server.py79
-rw-r--r--tests/unittest.py2
-rw-r--r--tests/util/test_itertools.py47
-rw-r--r--tox.ini5
145 files changed, 3565 insertions, 1303 deletions
diff --git a/.buildkite/docker-compose.py35.pg95.yaml b/.buildkite/docker-compose.py35.pg95.yaml
deleted file mode 100644
index 43237b7775..0000000000
--- a/.buildkite/docker-compose.py35.pg95.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-version: '3.1'
-
-services:
-
-  postgres:
-    image: postgres:9.5
-    environment:
-      POSTGRES_PASSWORD: postgres
-    command: -c fsync=off
-
-  testenv:
-    image: python:3.5
-    depends_on:
-      - postgres
-    env_file: .env
-    environment:
-      SYNAPSE_POSTGRES_HOST: postgres
-      SYNAPSE_POSTGRES_USER: postgres
-      SYNAPSE_POSTGRES_PASSWORD: postgres
-    working_dir: /src
-    volumes:
-      - ..:/src
diff --git a/.buildkite/docker-compose.py37.pg11.yaml b/.buildkite/docker-compose.py37.pg11.yaml
deleted file mode 100644
index b767228147..0000000000
--- a/.buildkite/docker-compose.py37.pg11.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-version: '3.1'
-
-services:
-
-  postgres:
-    image: postgres:11
-    environment:
-      POSTGRES_PASSWORD: postgres
-    command: -c fsync=off
-
-  testenv:
-    image: python:3.7
-    depends_on:
-      - postgres
-    env_file: .env
-    environment:
-      SYNAPSE_POSTGRES_HOST: postgres
-      SYNAPSE_POSTGRES_USER: postgres
-      SYNAPSE_POSTGRES_PASSWORD: postgres
-    working_dir: /src
-    volumes:
-      - ..:/src
diff --git a/.buildkite/docker-compose.py37.pg95.yaml b/.buildkite/docker-compose.py37.pg95.yaml
deleted file mode 100644
index 02fcd28304..0000000000
--- a/.buildkite/docker-compose.py37.pg95.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-version: '3.1'
-
-services:
-
-  postgres:
-    image: postgres:9.5
-    environment:
-      POSTGRES_PASSWORD: postgres
-    command: -c fsync=off
-
-  testenv:
-    image: python:3.7
-    depends_on:
-      - postgres
-    env_file: .env
-    environment:
-      SYNAPSE_POSTGRES_HOST: postgres
-      SYNAPSE_POSTGRES_USER: postgres
-      SYNAPSE_POSTGRES_PASSWORD: postgres
-    working_dir: /src
-    volumes:
-      - ..:/src
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 11fb05ca96..fc22d89426 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -3,6 +3,10 @@
 <!-- Please read CONTRIBUTING.md before submitting your pull request -->
 
 * [ ] Pull request is based on the develop branch
-* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog)
+* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog). The entry should:
+  - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
+  - Use markdown where necessary, mostly for `code blocks`.
+  - End with either a period (.) or an exclamation mark (!).
+  - Start with a capital letter.
 * [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#sign-off)
 * [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#code-style))
diff --git a/CHANGES.md b/CHANGES.md
index c8840e9c74..0392acbde4 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,77 @@
+Synapse 1.9.0rc1 (2020-01-22)
+=============================
+
+**WARNING**: As of this release, Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)).
+
+If your Synapse deployment uses workers, note that the reverse-proxy configurations for the `synapse.app.media_repository`, `synapse.app.federation_reader` and `synapse.app.event_creator` have changed, with the addition of a few paths (see the updated configurations [here](docs/workers.md#available-worker-applications)).
+
+Features
+--------
+
+- Allow admin to create or modify a user. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5742](https://github.com/matrix-org/synapse/issues/5742))
+- Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media. ([\#6681](https://github.com/matrix-org/synapse/issues/6681), [\#6756](https://github.com/matrix-org/synapse/issues/6756))
+- Add `org.matrix.e2e_cross_signing` to `unstable_features` in `/versions` as per [MSC1756](https://github.com/matrix-org/matrix-doc/pull/1756). ([\#6712](https://github.com/matrix-org/synapse/issues/6712))
+- Add a new admin API to list and filter rooms on the server. ([\#6720](https://github.com/matrix-org/synapse/issues/6720))
+
+
+Bugfixes
+--------
+
+- Correctly proxy HTTP errors due to API calls to remote group servers. ([\#6654](https://github.com/matrix-org/synapse/issues/6654))
+- Fix media repo admin APIs when using a media worker. ([\#6664](https://github.com/matrix-org/synapse/issues/6664))
+- Fix "CRITICAL" errors being logged when a request is received for a uri containing non-ascii characters. ([\#6682](https://github.com/matrix-org/synapse/issues/6682))
+- Fix a bug where we would assign a numeric user ID if somebody tried registering with an empty username. ([\#6690](https://github.com/matrix-org/synapse/issues/6690))
+- Fix `purge_room` admin API. ([\#6711](https://github.com/matrix-org/synapse/issues/6711))
+- Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies when running the automated purge jobs. ([\#6714](https://github.com/matrix-org/synapse/issues/6714))
+- Fix the `synapse_port_db` not correctly running background updates. Thanks @tadzik for reporting. ([\#6718](https://github.com/matrix-org/synapse/issues/6718))
+- Fix changing password via user admin API. ([\#6730](https://github.com/matrix-org/synapse/issues/6730))
+- Fix `/events/:event_id` deprecated API. ([\#6731](https://github.com/matrix-org/synapse/issues/6731))
+- Fix monthly active user limiting support for worker mode, fixes [#4639](https://github.com/matrix-org/synapse/issues/4639). ([\#6742](https://github.com/matrix-org/synapse/issues/6742))
+- Fix bug when setting `account_validity` to an empty block in the config. Thanks to @Sorunome for reporting. ([\#6747](https://github.com/matrix-org/synapse/issues/6747))
+- Fix `AttributeError: 'NoneType' object has no attribute 'get'` in `hash_password` when configuration has an empty `password_config`. Contributed by @ivilata. ([\#6753](https://github.com/matrix-org/synapse/issues/6753))
+- Fix the `docker-compose.yaml` overriding the entire `/etc` folder of the container. Contributed by Fabian Meyer. ([\#6656](https://github.com/matrix-org/synapse/issues/6656))
+
+
+Improved Documentation
+----------------------
+
+- Fix a typo in the configuration example for purge jobs in the sample configuration file. ([\#6621](https://github.com/matrix-org/synapse/issues/6621))
+- Add complete documentation of the message retention policies support. ([\#6624](https://github.com/matrix-org/synapse/issues/6624), [\#6665](https://github.com/matrix-org/synapse/issues/6665))
+- Add some helpful tips about changelog entries to the GitHub pull request template. ([\#6663](https://github.com/matrix-org/synapse/issues/6663))
+- Clarify the `account_validity` and `email` sections of the sample configuration. ([\#6685](https://github.com/matrix-org/synapse/issues/6685))
+- Add more endpoints to the documentation for Synapse workers. ([\#6698](https://github.com/matrix-org/synapse/issues/6698))
+
+
+Deprecations and Removals
+-------------------------
+
+- Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)). ([\#6675](https://github.com/matrix-org/synapse/issues/6675))
+
+
+Internal Changes
+----------------
+
+- Add `local_current_membership` table for tracking local user membership state in rooms. ([\#6655](https://github.com/matrix-org/synapse/issues/6655), [\#6728](https://github.com/matrix-org/synapse/issues/6728))
+- Port `synapse.replication.tcp` to async/await. ([\#6666](https://github.com/matrix-org/synapse/issues/6666))
+- Fixup `synapse.replication` to pass mypy checks. ([\#6667](https://github.com/matrix-org/synapse/issues/6667))
+- Allow `additional_resources` to implement `IResource` directly. ([\#6686](https://github.com/matrix-org/synapse/issues/6686))
+- Allow REST endpoint implementations to raise a `RedirectException`, which will redirect the user's browser to a given location. ([\#6687](https://github.com/matrix-org/synapse/issues/6687))
+- Updates and extensions to the module API. ([\#6688](https://github.com/matrix-org/synapse/issues/6688))
+- Updates to the SAML mapping provider API. ([\#6689](https://github.com/matrix-org/synapse/issues/6689), [\#6723](https://github.com/matrix-org/synapse/issues/6723))
+- Remove redundant `RegistrationError` class. ([\#6691](https://github.com/matrix-org/synapse/issues/6691))
+- Don't block processing of incoming EDUs behind processing PDUs in the same transaction. ([\#6697](https://github.com/matrix-org/synapse/issues/6697))
+- Remove duplicate check for the `session` query parameter on the `/auth/xxx/fallback/web` Client-Server endpoint. ([\#6702](https://github.com/matrix-org/synapse/issues/6702))
+- Attempt to retry sending a transaction when we detect a remote server has come back online, rather than waiting for a transaction to be triggered by new data. ([\#6706](https://github.com/matrix-org/synapse/issues/6706))
+- Add `StateMap` type alias to simplify types. ([\#6715](https://github.com/matrix-org/synapse/issues/6715))
+- Add a `DeltaState` to track changes to be made to current state during event persistence. ([\#6716](https://github.com/matrix-org/synapse/issues/6716))
+- Add more logging around message retention policies support. ([\#6717](https://github.com/matrix-org/synapse/issues/6717))
+- When processing a SAML response, log the assertions for easier configuration. ([\#6724](https://github.com/matrix-org/synapse/issues/6724))
+- Fixup `synapse.rest` to pass mypy. ([\#6732](https://github.com/matrix-org/synapse/issues/6732), [\#6764](https://github.com/matrix-org/synapse/issues/6764))
+- Fixup `synapse.api` to pass mypy. ([\#6733](https://github.com/matrix-org/synapse/issues/6733))
+- Allow streaming cache 'invalidate all' to workers. ([\#6749](https://github.com/matrix-org/synapse/issues/6749))
+- Remove unused CI docker compose files. ([\#6754](https://github.com/matrix-org/synapse/issues/6754))
+
+
 Synapse 1.8.0 (2020-01-09)
 ==========================
 
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c0091346f3..5736ede6c4 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -101,8 +101,8 @@ in the format of `PRnumber.type`. The type can be one of the following:
 The content of the file is your changelog entry, which should be a short
 description of your change in the same style as the rest of our [changelog](
 https://github.com/matrix-org/synapse/blob/master/CHANGES.md). The file can
-contain Markdown formatting, and should end with a full stop ('.') for
-consistency.
+contain Markdown formatting, and should end with a full stop (.) or an
+exclamation mark (!) for consistency.
 
 Adding credits to the changelog is encouraged, we value your
 contributions and would like to have you shouted out in the release notes!
diff --git a/INSTALL.md b/INSTALL.md
index 9da2e3c734..d25fcf0753 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -133,6 +133,11 @@ sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
 sudo yum groupinstall "Development Tools"
 ```
 
+Note that Synapse does not support versions of SQLite before 3.11, and CentOS 7
+uses SQLite 3.7. You may be able to work around this by installing a more
+recent SQLite version, but it is recommended that you instead use a Postgres
+database: see [docs/postgres.md](docs/postgres.md).
+
 #### macOS
 
 Installing prerequisites on macOS:
diff --git a/changelog.d/5742.feature b/changelog.d/5742.feature
deleted file mode 100644
index de10302275..0000000000
--- a/changelog.d/5742.feature
+++ /dev/null
@@ -1 +0,0 @@
-Allow admin to create or modify a user. Contributed by Awesome Technologies Innovationslabor GmbH.
diff --git a/changelog.d/6621.doc b/changelog.d/6621.doc
deleted file mode 100644
index 6722ccfda3..0000000000
--- a/changelog.d/6621.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix a typo in the configuration example for purge jobs in the sample configuration file.
diff --git a/changelog.d/6624.doc b/changelog.d/6624.doc
deleted file mode 100644
index bc9a022db2..0000000000
--- a/changelog.d/6624.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add complete documentation of the message retention policies support.
diff --git a/changelog.d/6654.bugfix b/changelog.d/6654.bugfix
deleted file mode 100644
index fed35252db..0000000000
--- a/changelog.d/6654.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Correctly proxy HTTP errors due to API calls to remote group servers.
diff --git a/changelog.d/6656.doc b/changelog.d/6656.doc
deleted file mode 100644
index 9f32da1a88..0000000000
--- a/changelog.d/6656.doc
+++ /dev/null
@@ -1 +0,0 @@
-No more overriding the entire /etc folder of the container in docker-compose.yaml. Contributed by Fabian Meyer.
diff --git a/changelog.d/6664.bugfix b/changelog.d/6664.bugfix
deleted file mode 100644
index 8c6a6fa1c8..0000000000
--- a/changelog.d/6664.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix media repo admin APIs when using a media worker.
diff --git a/changelog.d/6665.doc b/changelog.d/6665.doc
deleted file mode 100644
index bc9a022db2..0000000000
--- a/changelog.d/6665.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add complete documentation of the message retention policies support.
diff --git a/changelog.d/6751.misc b/changelog.d/6751.misc
new file mode 100644
index 0000000000..7222520528
--- /dev/null
+++ b/changelog.d/6751.misc
@@ -0,0 +1 @@
+Remove some unnecessary admin handler abstraction methods.
\ No newline at end of file
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index 8b3666d5f5..46ba7a1a71 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -22,19 +22,81 @@ It returns a JSON body like the following:
 }
 ```
 
-# Quarantine media in a room
+# Quarantine media
 
-This API 'quarantines' all the media in a room.
+Quarantining media means that it is marked as inaccessible by users. It applies
+to any local media, and any locally-cached copies of remote media.
 
-The API is:
+The media file itself (and any thumbnails) is not deleted from the server.
+
+## Quarantining media by ID
+
+This API quarantines a single piece of local or remote media.
+
+Request:
 
 ```
-POST /_synapse/admin/v1/quarantine_media/<room_id>
+POST /_synapse/admin/v1/media/quarantine/<server_name>/<media_id>
 
 {}
 ```
 
-Quarantining media means that it is marked as inaccessible by users. It applies
-to any local media, and any locally-cached copies of remote media.
+Where `server_name` is in the form of `example.org`, and `media_id` is in the
+form of `abcdefg12345...`.
+
+Response:
+
+```
+{}
+```
+
+## Quarantining media in a room
+
+This API quarantines all local and remote media in a room.
+
+Request:
+
+```
+POST /_synapse/admin/v1/room/<room_id>/media/quarantine
+
+{}
+```
+
+Where `room_id` is in the form of `!roomid12345:example.org`.
+
+Response:
+
+```
+{
+  "num_quarantined": 10  # The number of media items successfully quarantined
+}
+```
+
+Note that there is a legacy endpoint, `POST
+/_synapse/admin/v1/quarantine_media/<room_id >`, that operates the same.
+However, it is deprecated and may be removed in a future release.
+
+## Quarantining all media of a user
+
+This API quarantines all *local* media that a *local* user has uploaded. That is to say, if
+you would like to quarantine media uploaded by a user on a remote homeserver, you should
+instead use one of the other APIs.
+
+Request:
+
+```
+POST /_synapse/admin/v1/user/<user_id>/media/quarantine
+
+{}
+```
+
+Where `user_id` is in the form of `@bob:example.org`.
+
+Response:
+
+```
+{
+  "num_quarantined": 10  # The number of media items successfully quarantined
+}
+```
 
-The media file itself (and any thumbnails) is not deleted from the server.
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
new file mode 100644
index 0000000000..082721ea95
--- /dev/null
+++ b/docs/admin_api/rooms.md
@@ -0,0 +1,173 @@
+# List Room API
+
+The List Room admin API allows server admins to get a list of rooms on their
+server. There are various parameters available that allow for filtering and
+sorting the returned list. This API supports pagination.
+
+## Parameters
+
+The following query parameters are available:
+
+* `from` - Offset in the returned list. Defaults to `0`.
+* `limit` - Maximum amount of rooms to return. Defaults to `100`.
+* `order_by` - The method in which to sort the returned list of rooms. Valid values are:
+  - `alphabetical` - Rooms are ordered alphabetically by room name. This is the default.
+  - `size` - Rooms are ordered by the number of members. Largest to smallest.
+* `dir` - Direction of room order. Either `f` for forwards or `b` for backwards. Setting
+          this value to `b` will reverse the above sort order. Defaults to `f`.
+* `search_term` - Filter rooms by their room name. Search term can be contained in any
+                  part of the room name. Defaults to no filtering.
+
+The following fields are possible in the JSON response body:
+
+* `rooms` - An array of objects, each containing information about a room.
+  - Room objects contain the following fields:
+    - `room_id` - The ID of the room.
+    - `name` - The name of the room.
+    - `canonical_alias` - The canonical (main) alias address of the room.
+    - `joined_members` - How many users are currently in the room.
+* `offset` - The current pagination offset in rooms. This parameter should be
+             used instead of `next_token` for room offset as `next_token` is
+             not intended to be parsed.
+* `total_rooms` - The total number of rooms this query can return. Using this
+                  and `offset`, you have enough information to know the current
+                  progression through the list.
+* `next_batch` - If this field is present, we know that there are potentially
+                 more rooms on the server that did not all fit into this response.
+                 We can use `next_batch` to get the "next page" of results. To do
+                 so, simply repeat your request, setting the `from` parameter to
+                 the value of `next_batch`.
+* `prev_batch` - If this field is present, it is possible to paginate backwards.
+                 Use `prev_batch` for the `from` value in the next request to
+                 get the "previous page" of results.
+
+## Usage
+
+A standard request with no filtering:
+
+```
+GET /_synapse/admin/rooms
+
+{}
+```
+
+Response:
+
+```
+{
+  "rooms": [
+    {
+      "room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
+      "name": "Matrix HQ",
+      "canonical_alias": "#matrix:matrix.org",
+      "joined_members": 8326
+    },
+    ... (8 hidden items) ...
+    {
+      "room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
+      "name": "This Week In Matrix (TWIM)",
+      "canonical_alias": "#twim:matrix.org",
+      "joined_members": 314
+    }
+  ],
+  "offset": 0,
+  "total_rooms": 10
+}
+```
+
+Filtering by room name:
+
+```
+GET /_synapse/admin/rooms?search_term=TWIM
+
+{}
+```
+
+Response:
+
+```
+{
+  "rooms": [
+    {
+      "room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
+      "name": "This Week In Matrix (TWIM)",
+      "canonical_alias": "#twim:matrix.org",
+      "joined_members": 314
+    }
+  ],
+  "offset": 0,
+  "total_rooms": 1
+}
+```
+
+Paginating through a list of rooms:
+
+```
+GET /_synapse/admin/rooms?order_by=size
+
+{}
+```
+
+Response:
+
+```
+{
+  "rooms": [
+    {
+      "room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
+      "name": "Matrix HQ",
+      "canonical_alias": "#matrix:matrix.org",
+      "joined_members": 8326
+    },
+    ... (98 hidden items) ...
+    {
+      "room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
+      "name": "This Week In Matrix (TWIM)",
+      "canonical_alias": "#twim:matrix.org",
+      "joined_members": 314
+    }
+  ],
+  "offset": 0,
+  "total_rooms": 150
+  "next_token": 100
+}
+```
+
+The presence of the `next_token` parameter tells us that there are more rooms
+than returned in this request, and we need to make another request to get them.
+To get the next batch of room results, we repeat our request, setting the `from`
+parameter to the value of `next_token`.
+
+```
+GET /_synapse/admin/rooms?order_by=size&from=100
+
+{}
+```
+
+Response:
+
+```
+{
+  "rooms": [
+    {
+      "room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
+      "name": "Music Theory",
+      "canonical_alias": "#musictheory:matrix.org",
+      "joined_members": 127
+    },
+    ... (48 hidden items) ...
+    {
+      "room_id": "!twcBhHVdZlQWuuxBhN:termina.org.uk",
+      "name": "weechat-matrix",
+      "canonical_alias": "#weechat-matrix:termina.org.uk",
+      "joined_members": 137
+    }
+  ],
+  "offset": 100,
+  "prev_batch": 0,
+  "total_rooms": 150
+}
+```
+
+Once the `next_token` parameter is no longer present, we know we've reached the
+end of the list.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 0a2505e7bb..8e8cf513b0 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -874,23 +874,6 @@ media_store_path: "DATADIR/media_store"
 # Optional account validity configuration. This allows for accounts to be denied
 # any request after a given period.
 #
-# ``enabled`` defines whether the account validity feature is enabled. Defaults
-# to False.
-#
-# ``period`` allows setting the period after which an account is valid
-# after its registration. When renewing the account, its validity period
-# will be extended by this amount of time. This parameter is required when using
-# the account validity feature.
-#
-# ``renew_at`` is the amount of time before an account's expiry date at which
-# Synapse will send an email to the account's email address with a renewal link.
-# This needs the ``email`` and ``public_baseurl`` configuration sections to be
-# filled.
-#
-# ``renew_email_subject`` is the subject of the email sent out with the renewal
-# link. ``%(app)s`` can be used as a placeholder for the ``app_name`` parameter
-# from the ``email`` section.
-#
 # Once this feature is enabled, Synapse will look for registered users without an
 # expiration date at startup and will add one to every account it found using the
 # current settings at that time.
@@ -901,21 +884,55 @@ media_store_path: "DATADIR/media_store"
 # date will be randomly selected within a range [now + period - d ; now + period],
 # where d is equal to 10% of the validity period.
 #
-#account_validity:
-#  enabled: true
-#  period: 6w
-#  renew_at: 1w
-#  renew_email_subject: "Renew your %(app)s account"
-#  # Directory in which Synapse will try to find the HTML files to serve to the
-#  # user when trying to renew an account. Optional, defaults to
-#  # synapse/res/templates.
-#  template_dir: "res/templates"
-#  # HTML to be displayed to the user after they successfully renewed their
-#  # account. Optional.
-#  account_renewed_html_path: "account_renewed.html"
-#  # HTML to be displayed when the user tries to renew an account with an invalid
-#  # renewal token. Optional.
-#  invalid_token_html_path: "invalid_token.html"
+account_validity:
+  # The account validity feature is disabled by default. Uncomment the
+  # following line to enable it.
+  #
+  #enabled: true
+
+  # The period after which an account is valid after its registration. When
+  # renewing the account, its validity period will be extended by this amount
+  # of time. This parameter is required when using the account validity
+  # feature.
+  #
+  #period: 6w
+
+  # The amount of time before an account's expiry date at which Synapse will
+  # send an email to the account's email address with a renewal link. By
+  # default, no such emails are sent.
+  #
+  # If you enable this setting, you will also need to fill out the 'email' and
+  # 'public_baseurl' configuration sections.
+  #
+  #renew_at: 1w
+
+  # The subject of the email sent out with the renewal link. '%(app)s' can be
+  # used as a placeholder for the 'app_name' parameter from the 'email'
+  # section.
+  #
+  # Note that the placeholder must be written '%(app)s', including the
+  # trailing 's'.
+  #
+  # If this is not set, a default value is used.
+  #
+  #renew_email_subject: "Renew your %(app)s account"
+
+  # Directory in which Synapse will try to find templates for the HTML files to
+  # serve to the user when trying to renew an account. If not set, default
+  # templates from within the Synapse package will be used.
+  #
+  #template_dir: "res/templates"
+
+  # File within 'template_dir' giving the HTML to be displayed to the user after
+  # they successfully renewed their account. If not set, default text is used.
+  #
+  #account_renewed_html_path: "account_renewed.html"
+
+  # File within 'template_dir' giving the HTML to be displayed when the user
+  # tries to renew an account with an invalid renewal token. If not set,
+  # default text is used.
+  #
+  #invalid_token_html_path: "invalid_token.html"
 
 # Time that a user's session remains valid for, after they log in.
 #
@@ -1353,107 +1370,110 @@ password_config:
    #pepper: "EVEN_MORE_SECRET"
 
 
+# Configuration for sending emails from Synapse.
+#
+email:
+  # The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
+  #
+  #smtp_host: mail.server
+
+  # The port on the mail server for outgoing SMTP. Defaults to 25.
+  #
+  #smtp_port: 587
+
+  # Username/password for authentication to the SMTP server. By default, no
+  # authentication is attempted.
+  #
+  # smtp_user: "exampleusername"
+  # smtp_pass: "examplepassword"
+
+  # Uncomment the following to require TLS transport security for SMTP.
+  # By default, Synapse will connect over plain text, and will then switch to
+  # TLS via STARTTLS *if the SMTP server supports it*. If this option is set,
+  # Synapse will refuse to connect unless the server supports STARTTLS.
+  #
+  #require_transport_security: true
+
+  # Enable sending emails for messages that the user has missed
+  #
+  #enable_notifs: false
+
+  # notif_from defines the "From" address to use when sending emails.
+  # It must be set if email sending is enabled.
+  #
+  # The placeholder '%(app)s' will be replaced by the application name,
+  # which is normally 'app_name' (below), but may be overridden by the
+  # Matrix client application.
+  #
+  # Note that the placeholder must be written '%(app)s', including the
+  # trailing 's'.
+  #
+  #notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
+
+  # app_name defines the default value for '%(app)s' in notif_from. It
+  # defaults to 'Matrix'.
+  #
+  #app_name: my_branded_matrix_server
+
+  # Uncomment the following to disable automatic subscription to email
+  # notifications for new users. Enabled by default.
+  #
+  #notif_for_new_users: false
+
+  # Custom URL for client links within the email notifications. By default
+  # links will be based on "https://matrix.to".
+  #
+  # (This setting used to be called riot_base_url; the old name is still
+  # supported for backwards-compatibility but is now deprecated.)
+  #
+  #client_base_url: "http://localhost/riot"
 
-# Enable sending emails for password resets, notification events or
-# account expiry notices
-#
-# If your SMTP server requires authentication, the optional smtp_user &
-# smtp_pass variables should be used
-#
-#email:
-#   enable_notifs: false
-#   smtp_host: "localhost"
-#   smtp_port: 25 # SSL: 465, STARTTLS: 587
-#   smtp_user: "exampleusername"
-#   smtp_pass: "examplepassword"
-#   require_transport_security: false
-#
-#   # notif_from defines the "From" address to use when sending emails.
-#   # It must be set if email sending is enabled.
-#   #
-#   # The placeholder '%(app)s' will be replaced by the application name,
-#   # which is normally 'app_name' (below), but may be overridden by the
-#   # Matrix client application.
-#   #
-#   # Note that the placeholder must be written '%(app)s', including the
-#   # trailing 's'.
-#   #
-#   notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
-#
-#   # app_name defines the default value for '%(app)s' in notif_from. It
-#   # defaults to 'Matrix'.
-#   #
-#   #app_name: my_branded_matrix_server
-#
-#   # Enable email notifications by default
-#   #
-#   notif_for_new_users: true
-#
-#   # Defining a custom URL for Riot is only needed if email notifications
-#   # should contain links to a self-hosted installation of Riot; when set
-#   # the "app_name" setting is ignored
-#   #
-#   riot_base_url: "http://localhost/riot"
-#
-#   # Configure the time that a validation email or text message code
-#   # will expire after sending
-#   #
-#   # This is currently used for password resets
-#   #
-#   #validation_token_lifetime: 1h
-#
-#   # Template directory. All template files should be stored within this
-#   # directory. If not set, default templates from within the Synapse
-#   # package will be used
-#   #
-#   # For the list of default templates, please see
-#   # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
-#   #
-#   #template_dir: res/templates
-#
-#   # Templates for email notifications
-#   #
-#   notif_template_html: notif_mail.html
-#   notif_template_text: notif_mail.txt
-#
-#   # Templates for account expiry notices
-#   #
-#   expiry_template_html: notice_expiry.html
-#   expiry_template_text: notice_expiry.txt
-#
-#   # Templates for password reset emails sent by the homeserver
-#   #
-#   #password_reset_template_html: password_reset.html
-#   #password_reset_template_text: password_reset.txt
-#
-#   # Templates for registration emails sent by the homeserver
-#   #
-#   #registration_template_html: registration.html
-#   #registration_template_text: registration.txt
-#
-#   # Templates for validation emails sent by the homeserver when adding an email to
-#   # your user account
-#   #
-#   #add_threepid_template_html: add_threepid.html
-#   #add_threepid_template_text: add_threepid.txt
-#
-#   # Templates for password reset success and failure pages that a user
-#   # will see after attempting to reset their password
-#   #
-#   #password_reset_template_success_html: password_reset_success.html
-#   #password_reset_template_failure_html: password_reset_failure.html
-#
-#   # Templates for registration success and failure pages that a user
-#   # will see after attempting to register using an email or phone
-#   #
-#   #registration_template_success_html: registration_success.html
-#   #registration_template_failure_html: registration_failure.html
-#
-#   # Templates for success and failure pages that a user will see after attempting
-#   # to add an email or phone to their account
-#   #
-#   #add_threepid_success_html: add_threepid_success.html
-#   #add_threepid_failure_html: add_threepid_failure.html
+  # Configure the time that a validation email will expire after sending.
+  # Defaults to 1h.
+  #
+  #validation_token_lifetime: 15m
+
+  # Directory in which Synapse will try to find the template files below.
+  # If not set, default templates from within the Synapse package will be used.
+  #
+  # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
+  # If you *do* uncomment it, you will need to make sure that all the templates
+  # below are in the directory.
+  #
+  # Synapse will look for the following templates in this directory:
+  #
+  # * The contents of email notifications of missed events: 'notif_mail.html' and
+  #   'notif_mail.txt'.
+  #
+  # * The contents of account expiry notice emails: 'notice_expiry.html' and
+  #   'notice_expiry.txt'.
+  #
+  # * The contents of password reset emails sent by the homeserver:
+  #   'password_reset.html' and 'password_reset.txt'
+  #
+  # * HTML pages for success and failure that a user will see when they follow
+  #   the link in the password reset email: 'password_reset_success.html' and
+  #   'password_reset_failure.html'
+  #
+  # * The contents of address verification emails sent during registration:
+  #   'registration.html' and 'registration.txt'
+  #
+  # * HTML pages for success and failure that a user will see when they follow
+  #   the link in an address verification email sent during registration:
+  #   'registration_success.html' and 'registration_failure.html'
+  #
+  # * The contents of address verification emails sent when an address is added
+  #   to a Matrix account: 'add_threepid.html' and 'add_threepid.txt'
+  #
+  # * HTML pages for success and failure that a user will see when they follow
+  #   the link in an address verification email sent when an address is added
+  #   to a Matrix account: 'add_threepid_success.html' and
+  #   'add_threepid_failure.html'
+  #
+  # You can see the default templates at:
+  # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
+  #
+  #template_dir: "res/templates"
 
 
 #password_providers:
diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md
index ba9e874d07..e3a4634b14 100644
--- a/docs/tcp_replication.md
+++ b/docs/tcp_replication.md
@@ -209,7 +209,7 @@ Where `<token>` may be either:
  * a numeric stream_id to stream updates since (exclusive)
  * `NOW` to stream all subsequent updates.
 
-The `<stream_name>` is the name of a replication stream to subscribe 
+The `<stream_name>` is the name of a replication stream to subscribe
 to (see [here](../synapse/replication/tcp/streams/_base.py) for a list
 of streams). It can also be `ALL` to subscribe to all known streams,
 in which case the `<token>` must be set to `NOW`.
@@ -234,6 +234,10 @@ in which case the `<token>` must be set to `NOW`.
 
    Used exclusively in tests
 
+### REMOTE_SERVER_UP (S, C)
+
+   Inform other processes that a remote server may have come back online.
+
 See `synapse/replication/tcp/commands.py` for a detailed description and
 the format of each command.
 
@@ -250,6 +254,11 @@ and they key to invalidate. For example:
 
     > RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
 
+Alternatively, an entire cache can be invalidated by sending down a `null`
+instead of the key. For example:
+
+    > RDATA caches 550953772 ["get_user_by_id", null, 1550574873252]
+
 However, there are times when a number of caches need to be invalidated
 at the same time with the same key. To reduce traffic we batch those
 invalidations into a single poke by defining a special cache name that
diff --git a/docs/workers.md b/docs/workers.md
index 1b5d94f5eb..0ab269fd96 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -168,8 +168,11 @@ endpoints matching the following regular expressions:
     ^/_matrix/federation/v1/make_join/
     ^/_matrix/federation/v1/make_leave/
     ^/_matrix/federation/v1/send_join/
+    ^/_matrix/federation/v2/send_join/
     ^/_matrix/federation/v1/send_leave/
+    ^/_matrix/federation/v2/send_leave/
     ^/_matrix/federation/v1/invite/
+    ^/_matrix/federation/v2/invite/
     ^/_matrix/federation/v1/query_auth/
     ^/_matrix/federation/v1/event_auth/
     ^/_matrix/federation/v1/exchange_third_party_invite/
@@ -199,7 +202,9 @@ Handles the media repository. It can handle all endpoints starting with:
 ... and the following regular expressions matching media-specific administration APIs:
 
     ^/_synapse/admin/v1/purge_media_cache$
-    ^/_synapse/admin/v1/room/.*/media$
+    ^/_synapse/admin/v1/room/.*/media.*$
+    ^/_synapse/admin/v1/user/.*/media.*$
+    ^/_synapse/admin/v1/media/.*$
     ^/_synapse/admin/v1/quarantine_media/.*$
 
 You should also set `enable_media_repo: False` in the shared configuration
@@ -288,6 +293,7 @@ file. For example:
 Handles some event creation. It can handle REST endpoints matching:
 
     ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
+    ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/
     ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
     ^/_matrix/client/(api/v1|r0|unstable)/join/
     ^/_matrix/client/(api/v1|r0|unstable)/profile/
diff --git a/mypy.ini b/mypy.ini
index a66434b76b..69be2f67ad 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -7,6 +7,9 @@ show_error_codes = True
 show_traceback = True
 mypy_path = stubs
 
+[mypy-pymacaroons.*]
+ignore_missing_imports = True
+
 [mypy-zope]
 ignore_missing_imports = True
 
@@ -63,3 +66,12 @@ ignore_missing_imports = True
 
 [mypy-sentry_sdk]
 ignore_missing_imports = True
+
+[mypy-PIL.*]
+ignore_missing_imports = True
+
+[mypy-lxml]
+ignore_missing_imports = True
+
+[mypy-jwt.*]
+ignore_missing_imports = True
diff --git a/scripts-dev/update_database b/scripts-dev/update_database
index 1d62f0403a..94aa8758b4 100755
--- a/scripts-dev/update_database
+++ b/scripts-dev/update_database
@@ -22,10 +22,12 @@ import yaml
 
 from twisted.internet import defer, reactor
 
+import synapse
 from synapse.config.homeserver import HomeServerConfig
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.server import HomeServer
 from synapse.storage import DataStore
+from synapse.util.versionstring import get_version_string
 
 logger = logging.getLogger("update_database")
 
@@ -38,6 +40,8 @@ class MockHomeserver(HomeServer):
             config.server_name, reactor=reactor, config=config, **kwargs
         )
 
+        self.version_string = "Synapse/"+get_version_string(synapse)
+
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(
@@ -81,15 +85,17 @@ if __name__ == "__main__":
     hs.setup()
     store = hs.get_datastore()
 
-    @defer.inlineCallbacks
-    def run_background_updates():
-        yield store.db.updates.run_background_updates(sleep=False)
+    async def run_background_updates():
+        await store.db.updates.run_background_updates(sleep=False)
         # Stop the reactor to exit the script once every background update is run.
         reactor.stop()
 
-    # Apply all background updates on the database.
-    reactor.callWhenRunning(
-        lambda: run_as_background_process("background_updates", run_background_updates)
-    )
+    def run():
+        # Apply all background updates on the database.
+        defer.ensureDeferred(
+            run_as_background_process("background_updates", run_background_updates)
+        )
+
+    reactor.callWhenRunning(run)
 
     reactor.run()
diff --git a/scripts/hash_password b/scripts/hash_password
index a1eb0769da..a30767f758 100755
--- a/scripts/hash_password
+++ b/scripts/hash_password
@@ -52,7 +52,7 @@ if __name__ == "__main__":
     if "config" in args and args.config:
         config = yaml.safe_load(args.config)
         bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
-        password_config = config.get("password_config", {})
+        password_config = config.get("password_config", None) or {}
         password_pepper = password_config.get("pepper", password_pepper)
     password = args.password
 
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index cb77314f1e..e8b698f3ff 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -27,13 +27,16 @@ from six import string_types
 
 import yaml
 
-from twisted.enterprise import adbapi
 from twisted.internet import defer, reactor
 
+import synapse
 from synapse.config.database import DatabaseConnectionConfig
 from synapse.config.homeserver import HomeServerConfig
-from synapse.logging.context import PreserveLoggingContext
-from synapse.storage._base import LoggingTransaction
+from synapse.logging.context import (
+    LoggingContext,
+    make_deferred_yieldable,
+    run_in_background,
+)
 from synapse.storage.data_stores.main.client_ips import ClientIpBackgroundUpdateStore
 from synapse.storage.data_stores.main.deviceinbox import (
     DeviceInboxBackgroundUpdateStore,
@@ -61,6 +64,7 @@ from synapse.storage.database import Database, make_conn
 from synapse.storage.engines import create_engine
 from synapse.storage.prepare_database import prepare_database
 from synapse.util import Clock
+from synapse.util.versionstring import get_version_string
 
 logger = logging.getLogger("synapse_port_db")
 
@@ -125,6 +129,13 @@ APPEND_ONLY_TABLES = [
 ]
 
 
+# Error returned by the run function. Used at the top-level part of the script to
+# handle errors and return codes.
+end_error = None
+# The exec_info for the error, if any. If error is defined but not exec_info the script
+# will show only the error message without the stacktrace, if exec_info is defined but
+# not the error then the script will show nothing outside of what's printed in the run
+# function. If both are defined, the script will print both the error and the stacktrace.
 end_error_exec_info = None
 
 
@@ -177,6 +188,7 @@ class MockHomeserver:
         self.clock = Clock(reactor)
         self.config = config
         self.hostname = config.server_name
+        self.version_string = "Synapse/"+get_version_string(synapse)
 
     def get_clock(self):
         return self.clock
@@ -189,11 +201,10 @@ class Porter(object):
     def __init__(self, **kwargs):
         self.__dict__.update(kwargs)
 
-    @defer.inlineCallbacks
-    def setup_table(self, table):
+    async def setup_table(self, table):
         if table in APPEND_ONLY_TABLES:
             # It's safe to just carry on inserting.
-            row = yield self.postgres_store.db.simple_select_one(
+            row = await self.postgres_store.db.simple_select_one(
                 table="port_from_sqlite3",
                 keyvalues={"table_name": table},
                 retcols=("forward_rowid", "backward_rowid"),
@@ -207,10 +218,10 @@ class Porter(object):
                         forward_chunk,
                         already_ported,
                         total_to_port,
-                    ) = yield self._setup_sent_transactions()
+                    ) = await self._setup_sent_transactions()
                     backward_chunk = 0
                 else:
-                    yield self.postgres_store.db.simple_insert(
+                    await self.postgres_store.db.simple_insert(
                         table="port_from_sqlite3",
                         values={
                             "table_name": table,
@@ -227,7 +238,7 @@ class Porter(object):
                 backward_chunk = row["backward_rowid"]
 
             if total_to_port is None:
-                already_ported, total_to_port = yield self._get_total_count_to_port(
+                already_ported, total_to_port = await self._get_total_count_to_port(
                     table, forward_chunk, backward_chunk
                 )
         else:
@@ -238,9 +249,9 @@ class Porter(object):
                 )
                 txn.execute("TRUNCATE %s CASCADE" % (table,))
 
-            yield self.postgres_store.execute(delete_all)
+            await self.postgres_store.execute(delete_all)
 
-            yield self.postgres_store.db.simple_insert(
+            await self.postgres_store.db.simple_insert(
                 table="port_from_sqlite3",
                 values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0},
             )
@@ -248,16 +259,13 @@ class Porter(object):
             forward_chunk = 1
             backward_chunk = 0
 
-            already_ported, total_to_port = yield self._get_total_count_to_port(
+            already_ported, total_to_port = await self._get_total_count_to_port(
                 table, forward_chunk, backward_chunk
             )
 
-        defer.returnValue(
-            (table, already_ported, total_to_port, forward_chunk, backward_chunk)
-        )
+        return table, already_ported, total_to_port, forward_chunk, backward_chunk
 
-    @defer.inlineCallbacks
-    def handle_table(
+    async def handle_table(
         self, table, postgres_size, table_size, forward_chunk, backward_chunk
     ):
         logger.info(
@@ -275,7 +283,7 @@ class Porter(object):
         self.progress.add_table(table, postgres_size, table_size)
 
         if table == "event_search":
-            yield self.handle_search_table(
+            await self.handle_search_table(
                 postgres_size, table_size, forward_chunk, backward_chunk
             )
             return
@@ -294,7 +302,7 @@ class Porter(object):
         if table == "user_directory_stream_pos":
             # We need to make sure there is a single row, `(X, null), as that is
             # what synapse expects to be there.
-            yield self.postgres_store.db.simple_insert(
+            await self.postgres_store.db.simple_insert(
                 table=table, values={"stream_id": None}
             )
             self.progress.update(table, table_size)  # Mark table as done
@@ -335,7 +343,7 @@ class Porter(object):
 
                 return headers, forward_rows, backward_rows
 
-            headers, frows, brows = yield self.sqlite_store.db.runInteraction(
+            headers, frows, brows = await self.sqlite_store.db.runInteraction(
                 "select", r
             )
 
@@ -361,7 +369,7 @@ class Porter(object):
                         },
                     )
 
-                yield self.postgres_store.execute(insert)
+                await self.postgres_store.execute(insert)
 
                 postgres_size += len(rows)
 
@@ -369,8 +377,7 @@ class Porter(object):
             else:
                 return
 
-    @defer.inlineCallbacks
-    def handle_search_table(
+    async def handle_search_table(
         self, postgres_size, table_size, forward_chunk, backward_chunk
     ):
         select = (
@@ -390,7 +397,7 @@ class Porter(object):
 
                 return headers, rows
 
-            headers, rows = yield self.sqlite_store.db.runInteraction("select", r)
+            headers, rows = await self.sqlite_store.db.runInteraction("select", r)
 
             if rows:
                 forward_chunk = rows[-1][0] + 1
@@ -438,7 +445,7 @@ class Porter(object):
                         },
                     )
 
-                yield self.postgres_store.execute(insert)
+                await self.postgres_store.execute(insert)
 
                 postgres_size += len(rows)
 
@@ -447,20 +454,15 @@ class Porter(object):
             else:
                 return
 
-    def setup_db(self, db_config: DatabaseConnectionConfig, engine):
-        db_conn = make_conn(db_config, engine)
-        prepare_database(db_conn, engine, config=None)
-
-        db_conn.commit()
-
-        return db_conn
-
-    @defer.inlineCallbacks
-    def build_db_store(self, db_config: DatabaseConnectionConfig):
+    def build_db_store(
+        self, db_config: DatabaseConnectionConfig, allow_outdated_version: bool = False,
+    ):
         """Builds and returns a database store using the provided configuration.
 
         Args:
-            config: The database configuration
+            db_config: The database configuration
+            allow_outdated_version: True to suppress errors about the database server
+                version being too old to run a complete synapse
 
         Returns:
             The built Store object.
@@ -468,24 +470,23 @@ class Porter(object):
         self.progress.set_state("Preparing %s" % db_config.config["name"])
 
         engine = create_engine(db_config.config)
-        conn = self.setup_db(db_config, engine)
 
         hs = MockHomeserver(self.hs_config)
 
-        store = Store(Database(hs, db_config, engine), conn, hs)
-
-        yield store.db.runInteraction(
-            "%s_engine.check_database" % db_config.config["name"],
-            engine.check_database,
-        )
+        with make_conn(db_config, engine) as db_conn:
+            engine.check_database(
+                db_conn, allow_outdated_version=allow_outdated_version
+            )
+            prepare_database(db_conn, engine, config=self.hs_config)
+            store = Store(Database(hs, db_config, engine), db_conn, hs)
+            db_conn.commit()
 
         return store
 
-    @defer.inlineCallbacks
-    def run_background_updates_on_postgres(self):
+    async def run_background_updates_on_postgres(self):
         # Manually apply all background updates on the PostgreSQL database.
         postgres_ready = (
-            yield self.postgres_store.db.updates.has_completed_background_updates()
+            await self.postgres_store.db.updates.has_completed_background_updates()
         )
 
         if not postgres_ready:
@@ -494,35 +495,44 @@ class Porter(object):
             self.progress.set_state("Running background updates on PostgreSQL")
 
         while not postgres_ready:
-            yield self.postgres_store.db.updates.do_next_background_update(100)
-            postgres_ready = yield (
+            await self.postgres_store.db.updates.do_next_background_update(100)
+            postgres_ready = await (
                 self.postgres_store.db.updates.has_completed_background_updates()
             )
 
-    @defer.inlineCallbacks
-    def run(self):
+    async def run(self):
+        """Ports the SQLite database to a PostgreSQL database.
+
+        When a fatal error is met, its message is assigned to the global "end_error"
+        variable. When this error comes with a stacktrace, its exec_info is assigned to
+        the global "end_error_exec_info" variable.
+        """
+        global end_error
+
         try:
-            self.sqlite_store = yield self.build_db_store(
-                DatabaseConnectionConfig("master-sqlite", self.sqlite_config)
+            # we allow people to port away from outdated versions of sqlite.
+            self.sqlite_store = self.build_db_store(
+                DatabaseConnectionConfig("master-sqlite", self.sqlite_config),
+                allow_outdated_version=True,
             )
 
             # Check if all background updates are done, abort if not.
             updates_complete = (
-                yield self.sqlite_store.db.updates.has_completed_background_updates()
+                await self.sqlite_store.db.updates.has_completed_background_updates()
             )
             if not updates_complete:
-                sys.stderr.write(
+                end_error = (
                     "Pending background updates exist in the SQLite3 database."
                     " Please start Synapse again and wait until every update has finished"
                     " before running this script.\n"
                 )
-                defer.returnValue(None)
+                return
 
-            self.postgres_store = yield self.build_db_store(
+            self.postgres_store = self.build_db_store(
                 self.hs_config.get_single_database()
             )
 
-            yield self.run_background_updates_on_postgres()
+            await self.run_background_updates_on_postgres()
 
             self.progress.set_state("Creating port tables")
 
@@ -550,22 +560,22 @@ class Porter(object):
                 )
 
             try:
-                yield self.postgres_store.db.runInteraction("alter_table", alter_table)
+                await self.postgres_store.db.runInteraction("alter_table", alter_table)
             except Exception:
                 # On Error Resume Next
                 pass
 
-            yield self.postgres_store.db.runInteraction(
+            await self.postgres_store.db.runInteraction(
                 "create_port_table", create_port_table
             )
 
             # Step 2. Get tables.
             self.progress.set_state("Fetching tables")
-            sqlite_tables = yield self.sqlite_store.db.simple_select_onecol(
+            sqlite_tables = await self.sqlite_store.db.simple_select_onecol(
                 table="sqlite_master", keyvalues={"type": "table"}, retcol="name"
             )
 
-            postgres_tables = yield self.postgres_store.db.simple_select_onecol(
+            postgres_tables = await self.postgres_store.db.simple_select_onecol(
                 table="information_schema.tables",
                 keyvalues={},
                 retcol="distinct table_name",
@@ -576,28 +586,34 @@ class Porter(object):
 
             # Step 3. Figure out what still needs copying
             self.progress.set_state("Checking on port progress")
-            setup_res = yield defer.gatherResults(
-                [
-                    self.setup_table(table)
-                    for table in tables
-                    if table not in ["schema_version", "applied_schema_deltas"]
-                    and not table.startswith("sqlite_")
-                ],
-                consumeErrors=True,
+            setup_res = await make_deferred_yieldable(
+                defer.gatherResults(
+                    [
+                        run_in_background(self.setup_table, table)
+                        for table in tables
+                        if table not in ["schema_version", "applied_schema_deltas"]
+                        and not table.startswith("sqlite_")
+                    ],
+                    consumeErrors=True,
+                )
             )
 
             # Step 4. Do the copying.
             self.progress.set_state("Copying to postgres")
-            yield defer.gatherResults(
-                [self.handle_table(*res) for res in setup_res], consumeErrors=True
+            await make_deferred_yieldable(
+                defer.gatherResults(
+                    [run_in_background(self.handle_table, *res) for res in setup_res],
+                    consumeErrors=True,
+                )
             )
 
             # Step 5. Do final post-processing
-            yield self._setup_state_group_id_seq()
+            await self._setup_state_group_id_seq()
 
             self.progress.done()
-        except Exception:
+        except Exception as e:
             global end_error_exec_info
+            end_error = e
             end_error_exec_info = sys.exc_info()
             logger.exception("")
         finally:
@@ -637,8 +653,7 @@ class Porter(object):
 
         return outrows
 
-    @defer.inlineCallbacks
-    def _setup_sent_transactions(self):
+    async def _setup_sent_transactions(self):
         # Only save things from the last day
         yesterday = int(time.time() * 1000) - 86400000
 
@@ -659,7 +674,7 @@ class Porter(object):
 
             return headers, [r for r in rows if r[ts_ind] < yesterday]
 
-        headers, rows = yield self.sqlite_store.db.runInteraction("select", r)
+        headers, rows = await self.sqlite_store.db.runInteraction("select", r)
 
         rows = self._convert_rows("sent_transactions", headers, rows)
 
@@ -672,7 +687,7 @@ class Porter(object):
                     txn, "sent_transactions", headers[1:], rows
                 )
 
-            yield self.postgres_store.execute(insert)
+            await self.postgres_store.execute(insert)
         else:
             max_inserted_rowid = 0
 
@@ -689,10 +704,10 @@ class Porter(object):
             else:
                 return 1
 
-        next_chunk = yield self.sqlite_store.execute(get_start_id)
+        next_chunk = await self.sqlite_store.execute(get_start_id)
         next_chunk = max(max_inserted_rowid + 1, next_chunk)
 
-        yield self.postgres_store.db.simple_insert(
+        await self.postgres_store.db.simple_insert(
             table="port_from_sqlite3",
             values={
                 "table_name": "sent_transactions",
@@ -708,46 +723,49 @@ class Porter(object):
             (size,) = txn.fetchone()
             return int(size)
 
-        remaining_count = yield self.sqlite_store.execute(get_sent_table_size)
+        remaining_count = await self.sqlite_store.execute(get_sent_table_size)
 
         total_count = remaining_count + inserted_rows
 
-        defer.returnValue((next_chunk, inserted_rows, total_count))
+        return next_chunk, inserted_rows, total_count
 
-    @defer.inlineCallbacks
-    def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
-        frows = yield self.sqlite_store.execute_sql(
+    async def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
+        frows = await self.sqlite_store.execute_sql(
             "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk
         )
 
-        brows = yield self.sqlite_store.execute_sql(
+        brows = await self.sqlite_store.execute_sql(
             "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk
         )
 
-        defer.returnValue(frows[0][0] + brows[0][0])
+        return frows[0][0] + brows[0][0]
 
-    @defer.inlineCallbacks
-    def _get_already_ported_count(self, table):
-        rows = yield self.postgres_store.execute_sql(
+    async def _get_already_ported_count(self, table):
+        rows = await self.postgres_store.execute_sql(
             "SELECT count(*) FROM %s" % (table,)
         )
 
-        defer.returnValue(rows[0][0])
+        return rows[0][0]
 
-    @defer.inlineCallbacks
-    def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
-        remaining, done = yield defer.gatherResults(
-            [
-                self._get_remaining_count_to_port(table, forward_chunk, backward_chunk),
-                self._get_already_ported_count(table),
-            ],
-            consumeErrors=True,
+    async def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
+        remaining, done = await make_deferred_yieldable(
+            defer.gatherResults(
+                [
+                    run_in_background(
+                        self._get_remaining_count_to_port,
+                        table,
+                        forward_chunk,
+                        backward_chunk,
+                    ),
+                    run_in_background(self._get_already_ported_count, table),
+                ],
+            )
         )
 
         remaining = int(remaining) if remaining else 0
         done = int(done) if done else 0
 
-        defer.returnValue((done, remaining + done))
+        return done, remaining + done
 
     def _setup_state_group_id_seq(self):
         def r(txn):
@@ -1013,7 +1031,12 @@ if __name__ == "__main__":
             hs_config=config,
         )
 
-        reactor.callWhenRunning(porter.run)
+        @defer.inlineCallbacks
+        def run():
+            with LoggingContext("synapse_port_db_run"):
+                yield defer.ensureDeferred(porter.run())
+
+        reactor.callWhenRunning(run)
 
         reactor.run()
 
@@ -1022,7 +1045,11 @@ if __name__ == "__main__":
     else:
         start()
 
-    if end_error_exec_info:
-        exc_type, exc_value, exc_traceback = end_error_exec_info
-        traceback.print_exception(exc_type, exc_value, exc_traceback)
+    if end_error:
+        if end_error_exec_info:
+            exc_type, exc_value, exc_traceback = end_error_exec_info
+            traceback.print_exception(exc_type, exc_value, exc_traceback)
+
+        sys.stderr.write(end_error)
+
         sys.exit(5)
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 0dd538d804..1c44ca0999 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -36,7 +36,7 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.8.0"
+__version__ = "1.9.0rc1"
 
 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
     # We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index abbc7079a3..2cbfab2569 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -14,7 +14,6 @@
 # limitations under the License.
 
 import logging
-from typing import Dict, Tuple
 
 from six import itervalues
 
@@ -35,7 +34,7 @@ from synapse.api.errors import (
     ResourceLimitError,
 )
 from synapse.config.server import is_threepid_reserved
-from synapse.types import UserID
+from synapse.types import StateMap, UserID
 from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
 from synapse.util.caches.lrucache import LruCache
 from synapse.util.metrics import Measure
@@ -509,10 +508,7 @@ class Auth(object):
         return self.store.is_server_admin(user)
 
     def compute_auth_events(
-        self,
-        event,
-        current_state_ids: Dict[Tuple[str, str], str],
-        for_verification: bool = False,
+        self, event, current_state_ids: StateMap[str], for_verification: bool = False,
     ):
         """Given an event and current state return the list of event IDs used
         to auth an event.
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 5853a54c95..1c9456e583 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -17,13 +17,15 @@
 """Contains exceptions and error codes."""
 
 import logging
-from typing import Dict
+from typing import Dict, List
 
 from six import iteritems
 from six.moves import http_client
 
 from canonicaljson import json
 
+from twisted.web import http
+
 logger = logging.getLogger(__name__)
 
 
@@ -80,6 +82,29 @@ class CodeMessageException(RuntimeError):
         self.msg = msg
 
 
+class RedirectException(CodeMessageException):
+    """A pseudo-error indicating that we want to redirect the client to a different
+    location
+
+    Attributes:
+        cookies: a list of set-cookies values to add to the response. For example:
+           b"sessionId=a3fWa; Expires=Wed, 21 Oct 2015 07:28:00 GMT"
+    """
+
+    def __init__(self, location: bytes, http_code: int = http.FOUND):
+        """
+
+        Args:
+            location: the URI to redirect to
+            http_code: the HTTP response code
+        """
+        msg = "Redirect to %s" % (location.decode("utf-8"),)
+        super().__init__(code=http_code, msg=msg)
+        self.location = location
+
+        self.cookies = []  # type: List[bytes]
+
+
 class SynapseError(CodeMessageException):
     """A base exception type for matrix errors which have an errcode and error
     message (as well as an HTTP status code).
@@ -158,12 +183,6 @@ class UserDeactivatedError(SynapseError):
         )
 
 
-class RegistrationError(SynapseError):
-    """An error raised when a registration event fails."""
-
-    pass
-
-
 class FederationDeniedError(SynapseError):
     """An error raised when the server tries to federate with a server which
     is not on its federation whitelist.
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index 6eab1f13f0..8b64d0a285 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -15,6 +15,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from typing import List
+
 from six import text_type
 
 import jsonschema
@@ -293,7 +295,7 @@ class Filter(object):
             room_id = None
             ev_type = "m.presence"
             contains_url = False
-            labels = []
+            labels = []  # type: List[str]
         else:
             sender = event.get("sender", None)
             if not sender:
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index 172841f595..7a049b3af7 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -12,7 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import collections
+from collections import OrderedDict
+from typing import Any, Optional, Tuple
 
 from synapse.api.errors import LimitExceededError
 
@@ -23,7 +24,9 @@ class Ratelimiter(object):
     """
 
     def __init__(self):
-        self.message_counts = collections.OrderedDict()
+        self.message_counts = (
+            OrderedDict()
+        )  # type: OrderedDict[Any, Tuple[float, int, Optional[float]]]
 
     def can_do_action(self, key, time_now_s, rate_hz, burst_count, update=True):
         """Can the entity (e.g. user or IP address) perform the action?
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 8e36bc57d3..1c7c6ec0c8 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -84,8 +84,7 @@ class AdminCmdServer(HomeServer):
 
 
 class AdminCmdReplicationHandler(ReplicationClientHandler):
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
+    async def on_rdata(self, stream_name, token, rows):
         pass
 
     def get_streams_to_replicate(self):
diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index e82e0f11e3..2217d4a4fb 100644
--- a/synapse/app/appservice.py
+++ b/synapse/app/appservice.py
@@ -115,9 +115,8 @@ class ASReplicationHandler(ReplicationClientHandler):
         super(ASReplicationHandler, self).__init__(hs.get_datastore())
         self.appservice_handler = hs.get_application_service_handler()
 
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
-        yield super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
+    async def on_rdata(self, stream_name, token, rows):
+        await super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
 
         if stream_name == "events":
             max_stream_id = self.store.get_room_max_stream_ordering()
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 3edfe19567..ca96da6a4a 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -62,6 +62,9 @@ from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
 from synapse.rest.client.v2_alpha.register import RegisterRestServlet
 from synapse.rest.client.versions import VersionsRestServlet
 from synapse.server import HomeServer
+from synapse.storage.data_stores.main.monthly_active_users import (
+    MonthlyActiveUsersWorkerStore,
+)
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
@@ -85,6 +88,7 @@ class ClientReaderSlavedStore(
     SlavedTransactionStore,
     SlavedProfileStore,
     SlavedClientIpStore,
+    MonthlyActiveUsersWorkerStore,
     BaseSlavedStore,
 ):
     pass
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index d0ddbe38fc..58e5b354f6 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -56,6 +56,9 @@ from synapse.rest.client.v1.room import (
     RoomStateEventRestServlet,
 )
 from synapse.server import HomeServer
+from synapse.storage.data_stores.main.monthly_active_users import (
+    MonthlyActiveUsersWorkerStore,
+)
 from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.manhole import manhole
@@ -81,6 +84,7 @@ class EventCreatorSlavedStore(
     SlavedEventStore,
     SlavedRegistrationStore,
     RoomStore,
+    MonthlyActiveUsersWorkerStore,
     BaseSlavedStore,
 ):
     pass
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 311523e0ed..1f1cea1416 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -46,6 +46,9 @@ from synapse.replication.slave.storage.transactions import SlavedTransactionStor
 from synapse.replication.tcp.client import ReplicationClientHandler
 from synapse.rest.key.v2 import KeyApiV2Resource
 from synapse.server import HomeServer
+from synapse.storage.data_stores.main.monthly_active_users import (
+    MonthlyActiveUsersWorkerStore,
+)
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.manhole import manhole
 from synapse.util.versionstring import get_version_string
@@ -66,6 +69,7 @@ class FederationReaderSlavedStore(
     RoomStore,
     DirectoryStore,
     SlavedTransactionStore,
+    MonthlyActiveUsersWorkerStore,
     BaseSlavedStore,
 ):
     pass
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index 83c436229c..38d11fdd0f 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -145,9 +145,8 @@ class FederationSenderReplicationHandler(ReplicationClientHandler):
         super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
         self.send_handler = FederationSenderHandler(hs, self)
 
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
-        yield super(FederationSenderReplicationHandler, self).on_rdata(
+    async def on_rdata(self, stream_name, token, rows):
+        await super(FederationSenderReplicationHandler, self).on_rdata(
             stream_name, token, rows
         )
         self.send_handler.process_replication_rows(stream_name, token, rows)
@@ -159,6 +158,13 @@ class FederationSenderReplicationHandler(ReplicationClientHandler):
         args.update(self.send_handler.stream_positions())
         return args
 
+    def on_remote_server_up(self, server: str):
+        """Called when get a new REMOTE_SERVER_UP command."""
+
+        # Let's wake up the transaction queue for the server in case we have
+        # pending stuff to send to it.
+        self.send_handler.wake_destination(server)
+
 
 def start(config_options):
     try:
@@ -206,7 +212,7 @@ class FederationSenderHandler(object):
     to the federation sender.
     """
 
-    def __init__(self, hs, replication_client):
+    def __init__(self, hs: FederationSenderServer, replication_client):
         self.store = hs.get_datastore()
         self._is_mine_id = hs.is_mine_id
         self.federation_sender = hs.get_federation_sender()
@@ -227,6 +233,9 @@ class FederationSenderHandler(object):
             self.store.get_room_max_stream_ordering()
         )
 
+    def wake_destination(self, server: str):
+        self.federation_sender.wake_destination(server)
+
     def stream_positions(self):
         return {"federation": self.federation_position}
 
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index e5b44a5eed..c2a334a2b0 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -31,7 +31,7 @@ from prometheus_client import Gauge
 from twisted.application import service
 from twisted.internet import defer, reactor
 from twisted.python.failure import Failure
-from twisted.web.resource import EncodingResourceWrapper, NoResource
+from twisted.web.resource import EncodingResourceWrapper, IResource, NoResource
 from twisted.web.server import GzipEncoderFactory
 from twisted.web.static import File
 
@@ -109,7 +109,16 @@ class SynapseHomeServer(HomeServer):
         for path, resmodule in additional_resources.items():
             handler_cls, config = load_module(resmodule)
             handler = handler_cls(config, module_api)
-            resources[path] = AdditionalResource(self, handler.handle_request)
+            if IResource.providedBy(handler):
+                resource = handler
+            elif hasattr(handler, "handle_request"):
+                resource = AdditionalResource(self, handler.handle_request)
+            else:
+                raise ConfigError(
+                    "additional_resource %s does not implement a known interface"
+                    % (resmodule["module"],)
+                )
+            resources[path] = resource
 
         # try to find something useful to redirect '/' to
         if WEB_CLIENT_PREFIX in resources:
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 09e639040a..e46b6ac598 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -141,9 +141,8 @@ class PusherReplicationHandler(ReplicationClientHandler):
 
         self.pusher_pool = hs.get_pusherpool()
 
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
-        yield super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
+    async def on_rdata(self, stream_name, token, rows):
+        await super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
         run_in_background(self.poke_pushers, stream_name, token, rows)
 
     @defer.inlineCallbacks
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 03031ee34d..8982c0676e 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -54,6 +54,9 @@ from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
 from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
 from synapse.rest.client.v2_alpha import sync
 from synapse.server import HomeServer
+from synapse.storage.data_stores.main.monthly_active_users import (
+    MonthlyActiveUsersWorkerStore,
+)
 from synapse.storage.data_stores.main.presence import UserPresenceState
 from synapse.util.httpresourcetree import create_resource_tree
 from synapse.util.manhole import manhole
@@ -77,6 +80,7 @@ class SynchrotronSlavedStore(
     SlavedEventStore,
     SlavedClientIpStore,
     RoomStore,
+    MonthlyActiveUsersWorkerStore,
     BaseSlavedStore,
 ):
     pass
@@ -358,9 +362,8 @@ class SyncReplicationHandler(ReplicationClientHandler):
         self.presence_handler = hs.get_presence_handler()
         self.notifier = hs.get_notifier()
 
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
-        yield super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
+    async def on_rdata(self, stream_name, token, rows):
+        await super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
         run_in_background(self.process_and_notify, stream_name, token, rows)
 
     def get_streams_to_replicate(self):
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index 1257098f92..ba536d6f04 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -172,9 +172,8 @@ class UserDirectoryReplicationHandler(ReplicationClientHandler):
         super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
         self.user_directory = hs.get_user_directory_handler()
 
-    @defer.inlineCallbacks
-    def on_rdata(self, stream_name, token, rows):
-        yield super(UserDirectoryReplicationHandler, self).on_rdata(
+    async def on_rdata(self, stream_name, token, rows):
+        await super(UserDirectoryReplicationHandler, self).on_rdata(
             stream_name, token, rows
         )
         if stream_name == EventsStream.NAME:
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 35756bed87..74853f9faa 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -37,10 +37,12 @@ class EmailConfig(Config):
 
         self.email_enable_notifs = False
 
-        email_config = config.get("email", {})
+        email_config = config.get("email")
+        if email_config is None:
+            email_config = {}
 
-        self.email_smtp_host = email_config.get("smtp_host", None)
-        self.email_smtp_port = email_config.get("smtp_port", None)
+        self.email_smtp_host = email_config.get("smtp_host", "localhost")
+        self.email_smtp_port = email_config.get("smtp_port", 25)
         self.email_smtp_user = email_config.get("smtp_user", None)
         self.email_smtp_pass = email_config.get("smtp_pass", None)
         self.require_transport_security = email_config.get(
@@ -74,9 +76,9 @@ class EmailConfig(Config):
         self.email_template_dir = os.path.abspath(template_dir)
 
         self.email_enable_notifs = email_config.get("enable_notifs", False)
-        account_validity_renewal_enabled = config.get("account_validity", {}).get(
-            "renew_at"
-        )
+
+        account_validity_config = config.get("account_validity") or {}
+        account_validity_renewal_enabled = account_validity_config.get("renew_at")
 
         self.threepid_behaviour_email = (
             # Have Synapse handle the email sending if account_threepid_delegates.email
@@ -278,7 +280,9 @@ class EmailConfig(Config):
             self.email_notif_for_new_users = email_config.get(
                 "notif_for_new_users", True
             )
-            self.email_riot_base_url = email_config.get("riot_base_url", None)
+            self.email_riot_base_url = email_config.get(
+                "client_base_url", email_config.get("riot_base_url", None)
+            )
 
         if account_validity_renewal_enabled:
             self.email_expiry_template_html = email_config.get(
@@ -294,107 +298,111 @@ class EmailConfig(Config):
                     raise ConfigError("Unable to find email template file %s" % (p,))
 
     def generate_config_section(self, config_dir_path, server_name, **kwargs):
-        return """
-        # Enable sending emails for password resets, notification events or
-        # account expiry notices
-        #
-        # If your SMTP server requires authentication, the optional smtp_user &
-        # smtp_pass variables should be used
-        #
-        #email:
-        #   enable_notifs: false
-        #   smtp_host: "localhost"
-        #   smtp_port: 25 # SSL: 465, STARTTLS: 587
-        #   smtp_user: "exampleusername"
-        #   smtp_pass: "examplepassword"
-        #   require_transport_security: false
-        #
-        #   # notif_from defines the "From" address to use when sending emails.
-        #   # It must be set if email sending is enabled.
-        #   #
-        #   # The placeholder '%(app)s' will be replaced by the application name,
-        #   # which is normally 'app_name' (below), but may be overridden by the
-        #   # Matrix client application.
-        #   #
-        #   # Note that the placeholder must be written '%(app)s', including the
-        #   # trailing 's'.
-        #   #
-        #   notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
-        #
-        #   # app_name defines the default value for '%(app)s' in notif_from. It
-        #   # defaults to 'Matrix'.
-        #   #
-        #   #app_name: my_branded_matrix_server
-        #
-        #   # Enable email notifications by default
-        #   #
-        #   notif_for_new_users: true
-        #
-        #   # Defining a custom URL for Riot is only needed if email notifications
-        #   # should contain links to a self-hosted installation of Riot; when set
-        #   # the "app_name" setting is ignored
-        #   #
-        #   riot_base_url: "http://localhost/riot"
-        #
-        #   # Configure the time that a validation email or text message code
-        #   # will expire after sending
-        #   #
-        #   # This is currently used for password resets
-        #   #
-        #   #validation_token_lifetime: 1h
-        #
-        #   # Template directory. All template files should be stored within this
-        #   # directory. If not set, default templates from within the Synapse
-        #   # package will be used
-        #   #
-        #   # For the list of default templates, please see
-        #   # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
-        #   #
-        #   #template_dir: res/templates
-        #
-        #   # Templates for email notifications
-        #   #
-        #   notif_template_html: notif_mail.html
-        #   notif_template_text: notif_mail.txt
-        #
-        #   # Templates for account expiry notices
-        #   #
-        #   expiry_template_html: notice_expiry.html
-        #   expiry_template_text: notice_expiry.txt
-        #
-        #   # Templates for password reset emails sent by the homeserver
-        #   #
-        #   #password_reset_template_html: password_reset.html
-        #   #password_reset_template_text: password_reset.txt
-        #
-        #   # Templates for registration emails sent by the homeserver
-        #   #
-        #   #registration_template_html: registration.html
-        #   #registration_template_text: registration.txt
-        #
-        #   # Templates for validation emails sent by the homeserver when adding an email to
-        #   # your user account
-        #   #
-        #   #add_threepid_template_html: add_threepid.html
-        #   #add_threepid_template_text: add_threepid.txt
-        #
-        #   # Templates for password reset success and failure pages that a user
-        #   # will see after attempting to reset their password
-        #   #
-        #   #password_reset_template_success_html: password_reset_success.html
-        #   #password_reset_template_failure_html: password_reset_failure.html
-        #
-        #   # Templates for registration success and failure pages that a user
-        #   # will see after attempting to register using an email or phone
-        #   #
-        #   #registration_template_success_html: registration_success.html
-        #   #registration_template_failure_html: registration_failure.html
+        return """\
+        # Configuration for sending emails from Synapse.
         #
-        #   # Templates for success and failure pages that a user will see after attempting
-        #   # to add an email or phone to their account
-        #   #
-        #   #add_threepid_success_html: add_threepid_success.html
-        #   #add_threepid_failure_html: add_threepid_failure.html
+        email:
+          # The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
+          #
+          #smtp_host: mail.server
+
+          # The port on the mail server for outgoing SMTP. Defaults to 25.
+          #
+          #smtp_port: 587
+
+          # Username/password for authentication to the SMTP server. By default, no
+          # authentication is attempted.
+          #
+          # smtp_user: "exampleusername"
+          # smtp_pass: "examplepassword"
+
+          # Uncomment the following to require TLS transport security for SMTP.
+          # By default, Synapse will connect over plain text, and will then switch to
+          # TLS via STARTTLS *if the SMTP server supports it*. If this option is set,
+          # Synapse will refuse to connect unless the server supports STARTTLS.
+          #
+          #require_transport_security: true
+
+          # Enable sending emails for messages that the user has missed
+          #
+          #enable_notifs: false
+
+          # notif_from defines the "From" address to use when sending emails.
+          # It must be set if email sending is enabled.
+          #
+          # The placeholder '%(app)s' will be replaced by the application name,
+          # which is normally 'app_name' (below), but may be overridden by the
+          # Matrix client application.
+          #
+          # Note that the placeholder must be written '%(app)s', including the
+          # trailing 's'.
+          #
+          #notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
+
+          # app_name defines the default value for '%(app)s' in notif_from. It
+          # defaults to 'Matrix'.
+          #
+          #app_name: my_branded_matrix_server
+
+          # Uncomment the following to disable automatic subscription to email
+          # notifications for new users. Enabled by default.
+          #
+          #notif_for_new_users: false
+
+          # Custom URL for client links within the email notifications. By default
+          # links will be based on "https://matrix.to".
+          #
+          # (This setting used to be called riot_base_url; the old name is still
+          # supported for backwards-compatibility but is now deprecated.)
+          #
+          #client_base_url: "http://localhost/riot"
+
+          # Configure the time that a validation email will expire after sending.
+          # Defaults to 1h.
+          #
+          #validation_token_lifetime: 15m
+
+          # Directory in which Synapse will try to find the template files below.
+          # If not set, default templates from within the Synapse package will be used.
+          #
+          # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
+          # If you *do* uncomment it, you will need to make sure that all the templates
+          # below are in the directory.
+          #
+          # Synapse will look for the following templates in this directory:
+          #
+          # * The contents of email notifications of missed events: 'notif_mail.html' and
+          #   'notif_mail.txt'.
+          #
+          # * The contents of account expiry notice emails: 'notice_expiry.html' and
+          #   'notice_expiry.txt'.
+          #
+          # * The contents of password reset emails sent by the homeserver:
+          #   'password_reset.html' and 'password_reset.txt'
+          #
+          # * HTML pages for success and failure that a user will see when they follow
+          #   the link in the password reset email: 'password_reset_success.html' and
+          #   'password_reset_failure.html'
+          #
+          # * The contents of address verification emails sent during registration:
+          #   'registration.html' and 'registration.txt'
+          #
+          # * HTML pages for success and failure that a user will see when they follow
+          #   the link in an address verification email sent during registration:
+          #   'registration_success.html' and 'registration_failure.html'
+          #
+          # * The contents of address verification emails sent when an address is added
+          #   to a Matrix account: 'add_threepid.html' and 'add_threepid.txt'
+          #
+          # * HTML pages for success and failure that a user will see when they follow
+          #   the link in an address verification email sent when an address is added
+          #   to a Matrix account: 'add_threepid_success.html' and
+          #   'add_threepid_failure.html'
+          #
+          # You can see the default templates at:
+          # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
+          #
+          #template_dir: "res/templates"
         """
 
 
diff --git a/synapse/config/push.py b/synapse/config/push.py
index 0910958649..6f2b3a7faa 100644
--- a/synapse/config/push.py
+++ b/synapse/config/push.py
@@ -35,7 +35,7 @@ class PushConfig(Config):
 
         # Now check for the one in the 'email' section and honour it,
         # with a warning.
-        push_config = config.get("email", {})
+        push_config = config.get("email") or {}
         redact_content = push_config.get("redact_content")
         if redact_content is not None:
             print(
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index ee9614c5f7..9bb3beedbc 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -27,6 +27,9 @@ class AccountValidityConfig(Config):
     section = "accountvalidity"
 
     def __init__(self, config, synapse_config):
+        if config is None:
+            return
+        super(AccountValidityConfig, self).__init__()
         self.enabled = config.get("enabled", False)
         self.renew_by_email_enabled = "renew_at" in config
 
@@ -91,7 +94,7 @@ class RegistrationConfig(Config):
             )
 
         self.account_validity = AccountValidityConfig(
-            config.get("account_validity", {}), config
+            config.get("account_validity") or {}, config
         )
 
         self.registrations_require_3pid = config.get("registrations_require_3pid", [])
@@ -159,23 +162,6 @@ class RegistrationConfig(Config):
         # Optional account validity configuration. This allows for accounts to be denied
         # any request after a given period.
         #
-        # ``enabled`` defines whether the account validity feature is enabled. Defaults
-        # to False.
-        #
-        # ``period`` allows setting the period after which an account is valid
-        # after its registration. When renewing the account, its validity period
-        # will be extended by this amount of time. This parameter is required when using
-        # the account validity feature.
-        #
-        # ``renew_at`` is the amount of time before an account's expiry date at which
-        # Synapse will send an email to the account's email address with a renewal link.
-        # This needs the ``email`` and ``public_baseurl`` configuration sections to be
-        # filled.
-        #
-        # ``renew_email_subject`` is the subject of the email sent out with the renewal
-        # link. ``%%(app)s`` can be used as a placeholder for the ``app_name`` parameter
-        # from the ``email`` section.
-        #
         # Once this feature is enabled, Synapse will look for registered users without an
         # expiration date at startup and will add one to every account it found using the
         # current settings at that time.
@@ -186,21 +172,55 @@ class RegistrationConfig(Config):
         # date will be randomly selected within a range [now + period - d ; now + period],
         # where d is equal to 10%% of the validity period.
         #
-        #account_validity:
-        #  enabled: true
-        #  period: 6w
-        #  renew_at: 1w
-        #  renew_email_subject: "Renew your %%(app)s account"
-        #  # Directory in which Synapse will try to find the HTML files to serve to the
-        #  # user when trying to renew an account. Optional, defaults to
-        #  # synapse/res/templates.
-        #  template_dir: "res/templates"
-        #  # HTML to be displayed to the user after they successfully renewed their
-        #  # account. Optional.
-        #  account_renewed_html_path: "account_renewed.html"
-        #  # HTML to be displayed when the user tries to renew an account with an invalid
-        #  # renewal token. Optional.
-        #  invalid_token_html_path: "invalid_token.html"
+        account_validity:
+          # The account validity feature is disabled by default. Uncomment the
+          # following line to enable it.
+          #
+          #enabled: true
+
+          # The period after which an account is valid after its registration. When
+          # renewing the account, its validity period will be extended by this amount
+          # of time. This parameter is required when using the account validity
+          # feature.
+          #
+          #period: 6w
+
+          # The amount of time before an account's expiry date at which Synapse will
+          # send an email to the account's email address with a renewal link. By
+          # default, no such emails are sent.
+          #
+          # If you enable this setting, you will also need to fill out the 'email' and
+          # 'public_baseurl' configuration sections.
+          #
+          #renew_at: 1w
+
+          # The subject of the email sent out with the renewal link. '%%(app)s' can be
+          # used as a placeholder for the 'app_name' parameter from the 'email'
+          # section.
+          #
+          # Note that the placeholder must be written '%%(app)s', including the
+          # trailing 's'.
+          #
+          # If this is not set, a default value is used.
+          #
+          #renew_email_subject: "Renew your %%(app)s account"
+
+          # Directory in which Synapse will try to find templates for the HTML files to
+          # serve to the user when trying to renew an account. If not set, default
+          # templates from within the Synapse package will be used.
+          #
+          #template_dir: "res/templates"
+
+          # File within 'template_dir' giving the HTML to be displayed to the user after
+          # they successfully renewed their account. If not set, default text is used.
+          #
+          #account_renewed_html_path: "account_renewed.html"
+
+          # File within 'template_dir' giving the HTML to be displayed when the user
+          # tries to renew an account with an invalid renewal token. If not set,
+          # default text is used.
+          #
+          #invalid_token_html_path: "invalid_token.html"
 
         # Time that a user's session remains valid for, after they log in.
         #
diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py
index b91414aa35..423c158b11 100644
--- a/synapse/config/saml2_config.py
+++ b/synapse/config/saml2_config.py
@@ -121,6 +121,7 @@ class SAML2Config(Config):
         required_methods = [
             "get_saml_attributes",
             "saml_response_to_user_attributes",
+            "get_remote_user_id",
         ]
         missing_methods = [
             method
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 9ac112233b..0ec1b0fadd 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -294,6 +294,14 @@ class ServerConfig(Config):
             self.retention_default_min_lifetime = None
             self.retention_default_max_lifetime = None
 
+        if self.retention_enabled:
+            logger.info(
+                "Message retention policies support enabled with the following default"
+                " policy: min_lifetime = %s ; max_lifetime = %s",
+                self.retention_default_min_lifetime,
+                self.retention_default_max_lifetime,
+            )
+
         self.retention_allowed_lifetime_min = retention_config.get(
             "allowed_lifetime_min"
         )
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 1033e5e121..e3a1ba47a0 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -634,7 +634,7 @@ def get_public_keys(invite_event):
     return public_keys
 
 
-def auth_types_for_event(event) -> Set[Tuple[str]]:
+def auth_types_for_event(event) -> Set[Tuple[str, str]]:
     """Given an event, return a list of (EventType, StateKey) that may be
     needed to auth the event. The returned list may be a superset of what
     would actually be required depending on the full state of the room.
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index a44baea365..9ea85e93e6 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -12,7 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from typing import Dict, Optional, Tuple, Union
+from typing import Optional, Union
 
 from six import iteritems
 
@@ -23,6 +23,7 @@ from twisted.internet import defer
 
 from synapse.appservice import ApplicationService
 from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.types import StateMap
 
 
 @attr.s(slots=True)
@@ -106,13 +107,11 @@ class EventContext:
     _state_group = attr.ib(default=None, type=Optional[int])
     state_group_before_event = attr.ib(default=None, type=Optional[int])
     prev_group = attr.ib(default=None, type=Optional[int])
-    delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
+    delta_ids = attr.ib(default=None, type=Optional[StateMap[str]])
     app_service = attr.ib(default=None, type=Optional[ApplicationService])
 
-    _current_state_ids = attr.ib(
-        default=None, type=Optional[Dict[Tuple[str, str], str]]
-    )
-    _prev_state_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
+    _current_state_ids = attr.ib(default=None, type=Optional[StateMap[str]])
+    _prev_state_ids = attr.ib(default=None, type=Optional[StateMap[str]])
 
     @staticmethod
     def with_state(
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index d7ce333822..8eddb3bf2c 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -15,6 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+from typing import Dict
 
 import six
 from six import iteritems
@@ -22,6 +23,7 @@ from six import iteritems
 from canonicaljson import json
 from prometheus_client import Counter
 
+from twisted.internet import defer
 from twisted.internet.abstract import isIPAddress
 from twisted.python import failure
 
@@ -41,7 +43,11 @@ from synapse.federation.federation_base import FederationBase, event_from_pdu_js
 from synapse.federation.persistence import TransactionActions
 from synapse.federation.units import Edu, Transaction
 from synapse.http.endpoint import parse_server_name
-from synapse.logging.context import nested_logging_context
+from synapse.logging.context import (
+    make_deferred_yieldable,
+    nested_logging_context,
+    run_in_background,
+)
 from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
 from synapse.logging.utils import log_function
 from synapse.replication.http.federation import (
@@ -49,7 +55,7 @@ from synapse.replication.http.federation import (
     ReplicationGetQueryRestServlet,
 )
 from synapse.types import get_domain_from_id
-from synapse.util import glob_to_regex
+from synapse.util import glob_to_regex, unwrapFirstError
 from synapse.util.async_helpers import Linearizer, concurrently_execute
 from synapse.util.caches.response_cache import ResponseCache
 
@@ -160,6 +166,43 @@ class FederationServer(FederationBase):
             )
             return 400, response
 
+        # We process PDUs and EDUs in parallel. This is important as we don't
+        # want to block things like to device messages from reaching clients
+        # behind the potentially expensive handling of PDUs.
+        pdu_results, _ = await make_deferred_yieldable(
+            defer.gatherResults(
+                [
+                    run_in_background(
+                        self._handle_pdus_in_txn, origin, transaction, request_time
+                    ),
+                    run_in_background(self._handle_edus_in_txn, origin, transaction),
+                ],
+                consumeErrors=True,
+            ).addErrback(unwrapFirstError)
+        )
+
+        response = {"pdus": pdu_results}
+
+        logger.debug("Returning: %s", str(response))
+
+        await self.transaction_actions.set_response(origin, transaction, 200, response)
+        return 200, response
+
+    async def _handle_pdus_in_txn(
+        self, origin: str, transaction: Transaction, request_time: int
+    ) -> Dict[str, dict]:
+        """Process the PDUs in a received transaction.
+
+        Args:
+            origin: the server making the request
+            transaction: incoming transaction
+            request_time: timestamp that the HTTP request arrived at
+
+        Returns:
+            A map from event ID of a processed PDU to any errors we should
+            report back to the sending server.
+        """
+
         received_pdus_counter.inc(len(transaction.pdus))
 
         origin_host, _ = parse_server_name(origin)
@@ -250,20 +293,23 @@ class FederationServer(FederationBase):
             process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
         )
 
-        if hasattr(transaction, "edus"):
-            for edu in (Edu(**x) for x in transaction.edus):
-                await self.received_edu(origin, edu.edu_type, edu.content)
+        return pdu_results
 
-        response = {"pdus": pdu_results}
+    async def _handle_edus_in_txn(self, origin: str, transaction: Transaction):
+        """Process the EDUs in a received transaction.
+        """
 
-        logger.debug("Returning: %s", str(response))
+        async def _process_edu(edu_dict):
+            received_edus_counter.inc()
 
-        await self.transaction_actions.set_response(origin, transaction, 200, response)
-        return 200, response
+            edu = Edu(**edu_dict)
+            await self.registry.on_edu(edu.edu_type, origin, edu.content)
 
-    async def received_edu(self, origin, edu_type, content):
-        received_edus_counter.inc()
-        await self.registry.on_edu(edu_type, origin, content)
+        await concurrently_execute(
+            _process_edu,
+            getattr(transaction, "edus", []),
+            TRANSACTION_CONCURRENCY_LIMIT,
+        )
 
     async def on_context_state_request(self, origin, room_id, event_id):
         origin_host, _ = parse_server_name(origin)
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index ced4925a98..174f6e42be 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -259,7 +259,9 @@ class FederationRemoteSendQueue(object):
     def federation_ack(self, token):
         self._clear_queue_before_pos(token)
 
-    def get_replication_rows(self, from_token, to_token, limit, federation_ack=None):
+    async def get_replication_rows(
+        self, from_token, to_token, limit, federation_ack=None
+    ):
         """Get rows to be sent over federation between the two tokens
 
         Args:
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 4ebb0e8bc0..36c83c3027 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -21,6 +21,7 @@ from prometheus_client import Counter
 
 from twisted.internet import defer
 
+import synapse
 import synapse.metrics
 from synapse.federation.sender.per_destination_queue import PerDestinationQueue
 from synapse.federation.sender.transaction_manager import TransactionManager
@@ -54,7 +55,7 @@ sent_pdus_destination_dist_total = Counter(
 
 
 class FederationSender(object):
-    def __init__(self, hs):
+    def __init__(self, hs: "synapse.server.HomeServer"):
         self.hs = hs
         self.server_name = hs.hostname
 
@@ -482,7 +483,20 @@ class FederationSender(object):
 
     def send_device_messages(self, destination):
         if destination == self.server_name:
-            logger.info("Not sending device update to ourselves")
+            logger.warning("Not sending device update to ourselves")
+            return
+
+        self._get_per_destination_queue(destination).attempt_new_transaction()
+
+    def wake_destination(self, destination: str):
+        """Called when we want to retry sending transactions to a remote.
+
+        This is mainly useful if the remote server has been down and we think it
+        might have come back.
+        """
+
+        if destination == self.server_name:
+            logger.warning("Not waking up ourselves")
             return
 
         self._get_per_destination_queue(destination).attempt_new_transaction()
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index a5b36b1827..5012aaea35 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -31,6 +31,7 @@ from synapse.handlers.presence import format_user_presence_state
 from synapse.metrics import sent_transactions_counter
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.presence import UserPresenceState
+from synapse.types import StateMap
 from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
 
 # This is defined in the Matrix spec and enforced by the receiver.
@@ -77,7 +78,7 @@ class PerDestinationQueue(object):
         # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
         # based on their key (e.g. typing events by room_id)
         # Map of (edu_type, key) -> Edu
-        self._pending_edus_keyed = {}  # type: dict[tuple[str, str], Edu]
+        self._pending_edus_keyed = {}  # type: StateMap[Edu]
 
         # Map of user_id -> UserPresenceState of pending presence to be sent to this
         # destination
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index b4cbf23394..d8cf9ed299 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -44,6 +44,7 @@ from synapse.logging.opentracing import (
     tags,
     whitelisted_homeserver,
 )
+from synapse.server import HomeServer
 from synapse.types import ThirdPartyInstanceID, get_domain_from_id
 from synapse.util.ratelimitutils import FederationRateLimiter
 from synapse.util.versionstring import get_version_string
@@ -101,12 +102,17 @@ class NoAuthenticationError(AuthenticationError):
 
 
 class Authenticator(object):
-    def __init__(self, hs):
+    def __init__(self, hs: HomeServer):
         self._clock = hs.get_clock()
         self.keyring = hs.get_keyring()
         self.server_name = hs.hostname
         self.store = hs.get_datastore()
         self.federation_domain_whitelist = hs.config.federation_domain_whitelist
+        self.notifer = hs.get_notifier()
+
+        self.replication_client = None
+        if hs.config.worker.worker_app:
+            self.replication_client = hs.get_tcp_replication()
 
     # A method just so we can pass 'self' as the authenticator to the Servlets
     async def authenticate_request(self, request, content):
@@ -166,6 +172,17 @@ class Authenticator(object):
         try:
             logger.info("Marking origin %r as up", origin)
             await self.store.set_destination_retry_timings(origin, None, 0, 0)
+
+            # Inform the relevant places that the remote server is back up.
+            self.notifer.notify_remote_server_up(origin)
+            if self.replication_client:
+                # If we're on a worker we try and inform master about this. The
+                # replication client doesn't hook into the notifier to avoid
+                # infinite loops where we send a `REMOTE_SERVER_UP` command to
+                # master, which then echoes it back to us which in turn pokes
+                # the notifier.
+                self.replication_client.send_remote_server_up(origin)
+
         except Exception:
             logger.exception("Error resetting retry timings on %s", origin)
 
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 76d18a8ba8..9205865231 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -14,9 +14,11 @@
 # limitations under the License.
 
 import logging
+from typing import List
 
 from synapse.api.constants import Membership
-from synapse.types import RoomStreamToken
+from synapse.events import FrozenEvent
+from synapse.types import RoomStreamToken, StateMap
 from synapse.visibility import filter_events_for_client
 
 from ._base import BaseHandler
@@ -60,68 +62,6 @@ class AdminHandler(BaseHandler):
             ret["avatar_url"] = profile.avatar_url
         return ret
 
-    async def get_users(self):
-        """Function to retrieve a list of users in users table.
-
-        Args:
-        Returns:
-            defer.Deferred: resolves to list[dict[str, Any]]
-        """
-        ret = await self.store.get_users()
-
-        return ret
-
-    async def get_users_paginate(self, start, limit, name, guests, deactivated):
-        """Function to retrieve a paginated list of users from
-        users list. This will return a json list of users.
-
-        Args:
-            start (int): start number to begin the query from
-            limit (int): number of rows to retrieve
-            name (string): filter for user names
-            guests (bool): whether to in include guest users
-            deactivated (bool): whether to include deactivated users
-        Returns:
-            defer.Deferred: resolves to json list[dict[str, Any]]
-        """
-        ret = await self.store.get_users_paginate(
-            start, limit, name, guests, deactivated
-        )
-
-        return ret
-
-    async def search_users(self, term):
-        """Function to search users list for one or more users with
-        the matched term.
-
-        Args:
-            term (str): search term
-        Returns:
-            defer.Deferred: resolves to list[dict[str, Any]]
-        """
-        ret = await self.store.search_users(term)
-
-        return ret
-
-    def get_user_server_admin(self, user):
-        """
-        Get the admin bit on a user.
-
-        Args:
-            user_id (UserID): the (necessarily local) user to manipulate
-        """
-        return self.store.is_server_admin(user)
-
-    def set_user_server_admin(self, user, admin):
-        """
-        Set the admin bit on a user.
-
-        Args:
-            user_id (UserID): the (necessarily local) user to manipulate
-            admin (bool): whether or not the user should be an admin of this server
-        """
-        return self.store.set_server_admin(user, admin)
-
     async def export_user_data(self, user_id, writer):
         """Write all data we have on the user to the given writer.
 
@@ -134,7 +74,7 @@ class AdminHandler(BaseHandler):
             The returned value is that returned by `writer.finished()`.
         """
         # Get all rooms the user is in or has been in
-        rooms = await self.store.get_rooms_for_user_where_membership_is(
+        rooms = await self.store.get_rooms_for_local_user_where_membership_is(
             user_id,
             membership_list=(
                 Membership.JOIN,
@@ -259,35 +199,26 @@ class ExfiltrationWriter(object):
     """Interface used to specify how to write exported data.
     """
 
-    def write_events(self, room_id, events):
+    def write_events(self, room_id: str, events: List[FrozenEvent]):
         """Write a batch of events for a room.
-
-        Args:
-            room_id (str)
-            events (list[FrozenEvent])
         """
         pass
 
-    def write_state(self, room_id, event_id, state):
+    def write_state(self, room_id: str, event_id: str, state: StateMap[FrozenEvent]):
         """Write the state at the given event in the room.
 
         This only gets called for backward extremities rather than for each
         event.
-
-        Args:
-            room_id (str)
-            event_id (str)
-            state (dict[tuple[str, str], FrozenEvent])
         """
         pass
 
-    def write_invite(self, room_id, event, state):
+    def write_invite(self, room_id: str, event: FrozenEvent, state: StateMap[dict]):
         """Write an invite for the room, with associated invite state.
 
         Args:
-            room_id (str)
-            event (FrozenEvent)
-            state (dict[tuple[str, str], dict]): A subset of the state at the
+            room_id
+            event
+            state: A subset of the state at the
                 invite, with a subset of the event keys (type, state_key
                 content and sender)
         """
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 4426967f88..2afb390a92 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -140,7 +140,7 @@ class DeactivateAccountHandler(BaseHandler):
             user_id (str): The user ID to reject pending invites for.
         """
         user = UserID.from_string(user_id)
-        pending_invites = await self.store.get_invited_rooms_for_user(user_id)
+        pending_invites = await self.store.get_invited_rooms_for_local_user(user_id)
 
         for room in pending_invites:
             try:
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 61b6713c88..d4f9a792fc 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -64,7 +64,7 @@ from synapse.replication.http.federation import (
 from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
 from synapse.state import StateResolutionStore, resolve_events_with_store
 from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
-from synapse.types import UserID, get_domain_from_id
+from synapse.types import StateMap, UserID, get_domain_from_id
 from synapse.util.async_helpers import Linearizer, concurrently_execute
 from synapse.util.distributor import user_joined_room
 from synapse.util.retryutils import NotRetryingDestination
@@ -89,7 +89,7 @@ class _NewEventInfo:
 
     event = attr.ib(type=EventBase)
     state = attr.ib(type=Optional[Sequence[EventBase]], default=None)
-    auth_events = attr.ib(type=Optional[Dict[Tuple[str, str], EventBase]], default=None)
+    auth_events = attr.ib(type=Optional[StateMap[EventBase]], default=None)
 
 
 def shortstr(iterable, maxitems=5):
@@ -352,9 +352,7 @@ class FederationHandler(BaseHandler):
                     ours = await self.state_store.get_state_groups_ids(room_id, seen)
 
                     # state_maps is a list of mappings from (type, state_key) to event_id
-                    state_maps = list(
-                        ours.values()
-                    )  # type: list[dict[tuple[str, str], str]]
+                    state_maps = list(ours.values())  # type: list[StateMap[str]]
 
                     # we don't need this any more, let's delete it.
                     del ours
@@ -1912,7 +1910,7 @@ class FederationHandler(BaseHandler):
         origin: str,
         event: EventBase,
         state: Optional[Iterable[EventBase]],
-        auth_events: Optional[Dict[Tuple[str, str], EventBase]],
+        auth_events: Optional[StateMap[EventBase]],
         backfilled: bool,
     ):
         """
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 44ec3e66ae..2e6755f19c 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -101,7 +101,7 @@ class InitialSyncHandler(BaseHandler):
         if include_archived:
             memberships.append(Membership.LEAVE)
 
-        room_list = await self.store.get_rooms_for_user_where_membership_is(
+        room_list = await self.store.get_rooms_for_local_user_where_membership_is(
             user_id=user_id, membership_list=memberships
         )
 
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 00a6afc963..71d76202c9 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -88,6 +88,8 @@ class PaginationHandler(object):
         if hs.config.retention_enabled:
             # Run the purge jobs described in the configuration file.
             for job in hs.config.retention_purge_jobs:
+                logger.info("Setting up purge job with config: %s", job)
+
                 self.clock.looping_call(
                     run_as_background_process,
                     job["interval"],
@@ -130,11 +132,22 @@ class PaginationHandler(object):
         else:
             include_null = False
 
+        logger.info(
+            "[purge] Running purge job for %d < max_lifetime <= %d (include NULLs = %s)",
+            min_ms,
+            max_ms,
+            include_null,
+        )
+
         rooms = yield self.store.get_rooms_for_retention_period_in_range(
             min_ms, max_ms, include_null
         )
 
+        logger.debug("[purge] Rooms to purge: %s", rooms)
+
         for room_id, retention_policy in iteritems(rooms):
+            logger.info("[purge] Attempting to purge messages in room %s", room_id)
+
             if room_id in self._purges_in_progress_by_room:
                 logger.warning(
                     "[purge] not purging room %s as there's an ongoing purge running"
@@ -156,7 +169,7 @@ class PaginationHandler(object):
 
             stream_ordering = yield self.store.find_first_stream_ordering_after_ts(ts)
 
-            r = yield self.store.get_room_event_after_stream_ordering(
+            r = yield self.store.get_room_event_before_stream_ordering(
                 room_id, stream_ordering,
             )
             if not r:
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 8a7d965feb..7ffc194f0c 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -20,13 +20,7 @@ from twisted.internet import defer
 
 from synapse import types
 from synapse.api.constants import MAX_USERID_LENGTH, LoginType
-from synapse.api.errors import (
-    AuthError,
-    Codes,
-    ConsentNotGivenError,
-    RegistrationError,
-    SynapseError,
-)
+from synapse.api.errors import AuthError, Codes, ConsentNotGivenError, SynapseError
 from synapse.config.server import is_threepid_reserved
 from synapse.http.servlet import assert_params_in_dict
 from synapse.replication.http.login import RegisterDeviceReplicationServlet
@@ -165,7 +159,7 @@ class RegistrationHandler(BaseHandler):
         Returns:
             Deferred[str]: user_id
         Raises:
-            RegistrationError if there was a problem registering.
+            SynapseError if there was a problem registering.
         """
         yield self.check_registration_ratelimit(address)
 
@@ -174,7 +168,7 @@ class RegistrationHandler(BaseHandler):
         if password:
             password_hash = yield self._auth_handler.hash(password)
 
-        if localpart:
+        if localpart is not None:
             yield self.check_username(localpart, guest_access_token=guest_access_token)
 
             was_guest = guest_access_token is not None
@@ -182,7 +176,7 @@ class RegistrationHandler(BaseHandler):
             if not was_guest:
                 try:
                     int(localpart)
-                    raise RegistrationError(
+                    raise SynapseError(
                         400, "Numeric user IDs are reserved for guest users."
                     )
                 except ValueError:
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 9cab2adbfb..9f50196ea7 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -32,7 +32,15 @@ from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, Syna
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.http.endpoint import parse_and_validate_server_name
 from synapse.storage.state import StateFilter
-from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
+from synapse.types import (
+    Requester,
+    RoomAlias,
+    RoomID,
+    RoomStreamToken,
+    StateMap,
+    StreamToken,
+    UserID,
+)
 from synapse.util import stringutils
 from synapse.util.async_helpers import Linearizer
 from synapse.util.caches.response_cache import ResponseCache
@@ -207,15 +215,19 @@ class RoomCreationHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def _update_upgraded_room_pls(
-        self, requester, old_room_id, new_room_id, old_room_state,
+        self,
+        requester: Requester,
+        old_room_id: str,
+        new_room_id: str,
+        old_room_state: StateMap[str],
     ):
         """Send updated power levels in both rooms after an upgrade
 
         Args:
-            requester (synapse.types.Requester): the user requesting the upgrade
-            old_room_id (str): the id of the room to be replaced
-            new_room_id (str): the id of the replacement room
-            old_room_state (dict[tuple[str, str], str]): the state map for the old room
+            requester: the user requesting the upgrade
+            old_room_id: the id of the room to be replaced
+            new_room_id: the id of the replacement room
+            old_room_state: the state map for the old room
 
         Returns:
             Deferred
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 03bb52ccfb..15e8aa5249 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -690,7 +690,7 @@ class RoomMemberHandler(object):
 
     @defer.inlineCallbacks
     def _get_inviter(self, user_id, room_id):
-        invite = yield self.store.get_invite_for_user_in_room(
+        invite = yield self.store.get_invite_for_local_user_in_room(
             user_id=user_id, room_id=room_id
         )
         if invite:
diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py
index 0082f85c26..7f411b53b9 100644
--- a/synapse/handlers/saml_handler.py
+++ b/synapse/handlers/saml_handler.py
@@ -24,6 +24,7 @@ from saml2.client import Saml2Client
 from synapse.api.errors import SynapseError
 from synapse.config import ConfigError
 from synapse.http.servlet import parse_string
+from synapse.module_api import ModuleApi
 from synapse.rest.client.v1.login import SSOAuthHandler
 from synapse.types import (
     UserID,
@@ -31,6 +32,7 @@ from synapse.types import (
     mxid_localpart_allowed_characters,
 )
 from synapse.util.async_helpers import Linearizer
+from synapse.util.iterutils import chunk_seq
 
 logger = logging.getLogger(__name__)
 
@@ -59,7 +61,8 @@ class SamlHandler:
 
         # plugin to do custom mapping from saml response to mxid
         self._user_mapping_provider = hs.config.saml2_user_mapping_provider_class(
-            hs.config.saml2_user_mapping_provider_config
+            hs.config.saml2_user_mapping_provider_config,
+            ModuleApi(hs, hs.get_auth_handler()),
         )
 
         # identifier for the external_ids table
@@ -112,10 +115,10 @@ class SamlHandler:
         # the dict.
         self.expire_sessions()
 
-        user_id = await self._map_saml_response_to_user(resp_bytes)
+        user_id = await self._map_saml_response_to_user(resp_bytes, relay_state)
         self._sso_auth_handler.complete_sso_login(user_id, request, relay_state)
 
-    async def _map_saml_response_to_user(self, resp_bytes):
+    async def _map_saml_response_to_user(self, resp_bytes, client_redirect_url):
         try:
             saml2_auth = self._saml_client.parse_authn_request_response(
                 resp_bytes,
@@ -130,17 +133,28 @@ class SamlHandler:
             logger.warning("SAML2 response was not signed")
             raise SynapseError(400, "SAML2 response was not signed")
 
-        logger.info("SAML2 response: %s", saml2_auth.origxml)
-        logger.info("SAML2 mapped attributes: %s", saml2_auth.ava)
+        logger.debug("SAML2 response: %s", saml2_auth.origxml)
+        for assertion in saml2_auth.assertions:
+            # kibana limits the length of a log field, whereas this is all rather
+            # useful, so split it up.
+            count = 0
+            for part in chunk_seq(str(assertion), 10000):
+                logger.info(
+                    "SAML2 assertion: %s%s", "(%i)..." % (count,) if count else "", part
+                )
+                count += 1
 
-        try:
-            remote_user_id = saml2_auth.ava["uid"][0]
-        except KeyError:
-            logger.warning("SAML2 response lacks a 'uid' attestation")
-            raise SynapseError(400, "'uid' not in SAML2 response")
+        logger.info("SAML2 mapped attributes: %s", saml2_auth.ava)
 
         self._outstanding_requests_dict.pop(saml2_auth.in_response_to, None)
 
+        remote_user_id = self._user_mapping_provider.get_remote_user_id(
+            saml2_auth, client_redirect_url
+        )
+
+        if not remote_user_id:
+            raise Exception("Failed to extract remote user id from SAML response")
+
         with (await self._mapping_lock.queue(self._auth_provider_id)):
             # first of all, check if we already have a mapping for this user
             logger.info(
@@ -183,7 +197,7 @@ class SamlHandler:
             # Map saml response to user attributes using the configured mapping provider
             for i in range(1000):
                 attribute_dict = self._user_mapping_provider.saml_response_to_user_attributes(
-                    saml2_auth, i
+                    saml2_auth, i, client_redirect_url=client_redirect_url,
                 )
 
                 logger.debug(
@@ -216,6 +230,8 @@ class SamlHandler:
                     500, "Unable to generate a Matrix ID from the SAML response"
                 )
 
+            logger.info("Mapped SAML user to local part %s", localpart)
+
             registered_user_id = await self._registration_handler.register_user(
                 localpart=localpart, default_display_name=displayname
             )
@@ -265,17 +281,35 @@ class SamlConfig(object):
 class DefaultSamlMappingProvider(object):
     __version__ = "0.0.1"
 
-    def __init__(self, parsed_config: SamlConfig):
+    def __init__(self, parsed_config: SamlConfig, module_api: ModuleApi):
         """The default SAML user mapping provider
 
         Args:
             parsed_config: Module configuration
+            module_api: module api proxy
         """
         self._mxid_source_attribute = parsed_config.mxid_source_attribute
         self._mxid_mapper = parsed_config.mxid_mapper
 
+        self._grandfathered_mxid_source_attribute = (
+            module_api._hs.config.saml2_grandfathered_mxid_source_attribute
+        )
+
+    def get_remote_user_id(
+        self, saml_response: saml2.response.AuthnResponse, client_redirect_url: str
+    ):
+        """Extracts the remote user id from the SAML response"""
+        try:
+            return saml_response.ava["uid"][0]
+        except KeyError:
+            logger.warning("SAML2 response lacks a 'uid' attestation")
+            raise SynapseError(400, "'uid' not in SAML2 response")
+
     def saml_response_to_user_attributes(
-        self, saml_response: saml2.response.AuthnResponse, failures: int = 0,
+        self,
+        saml_response: saml2.response.AuthnResponse,
+        failures: int,
+        client_redirect_url: str,
     ) -> dict:
         """Maps some text from a SAML response to attributes of a new user
 
@@ -285,6 +319,8 @@ class DefaultSamlMappingProvider(object):
             failures: How many times a call to this function with this
                 saml_response has resulted in a failure
 
+            client_redirect_url: where the client wants to redirect to
+
         Returns:
             dict: A dict containing new user attributes. Possible keys:
                 * mxid_localpart (str): Required. The localpart of the user's mxid
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index ef750d1497..110097eab9 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -179,7 +179,7 @@ class SearchHandler(BaseHandler):
         search_filter = Filter(filter_dict)
 
         # TODO: Search through left rooms too
-        rooms = yield self.store.get_rooms_for_user_where_membership_is(
+        rooms = yield self.store.get_rooms_for_local_user_where_membership_is(
             user.to_string(),
             membership_list=[Membership.JOIN],
             # membership_list=[Membership.JOIN, Membership.LEAVE, Membership.Ban],
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 2d3b8ba73c..cd95f85e3f 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -1662,7 +1662,7 @@ class SyncHandler(object):
             Membership.BAN,
         )
 
-        room_list = await self.store.get_rooms_for_user_where_membership_is(
+        room_list = await self.store.get_rooms_for_local_user_where_membership_is(
             user_id=user_id, membership_list=membership_list
         )
 
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index b635c339ed..d5ca9cb07b 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -257,7 +257,7 @@ class TypingHandler(object):
             "typing_key", self._latest_room_serial, rooms=[member.room_id]
         )
 
-    def get_all_typing_updates(self, last_id, current_id):
+    async def get_all_typing_updates(self, last_id, current_id):
         if last_id == current_id:
             return []
 
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 943d12c907..04bc2385a2 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -14,8 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import cgi
 import collections
+import html
 import http.client
 import logging
 import types
@@ -36,6 +36,7 @@ import synapse.metrics
 from synapse.api.errors import (
     CodeMessageException,
     Codes,
+    RedirectException,
     SynapseError,
     UnrecognizedRequestError,
 )
@@ -153,14 +154,18 @@ def _return_html_error(f, request):
 
     Args:
         f (twisted.python.failure.Failure):
-        request (twisted.web.iweb.IRequest):
+        request (twisted.web.server.Request):
     """
     if f.check(CodeMessageException):
         cme = f.value
         code = cme.code
         msg = cme.msg
 
-        if isinstance(cme, SynapseError):
+        if isinstance(cme, RedirectException):
+            logger.info("%s redirect to %s", request, cme.location)
+            request.setHeader(b"location", cme.location)
+            request.cookies.extend(cme.cookies)
+        elif isinstance(cme, SynapseError):
             logger.info("%s SynapseError: %s - %s", request, code, msg)
         else:
             logger.error(
@@ -178,7 +183,7 @@ def _return_html_error(f, request):
             exc_info=(f.type, f.value, f.getTracebackObject()),
         )
 
-    body = HTML_ERROR_TEMPLATE.format(code=code, msg=cgi.escape(msg)).encode("utf-8")
+    body = HTML_ERROR_TEMPLATE.format(code=code, msg=html.escape(msg)).encode("utf-8")
     request.setResponseCode(code)
     request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
     request.setHeader(b"Content-Length", b"%i" % (len(body),))
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 9f2d035fa0..911251c0bc 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -88,7 +88,7 @@ class SynapseRequest(Request):
     def get_redacted_uri(self):
         uri = self.uri
         if isinstance(uri, bytes):
-            uri = self.uri.decode("ascii")
+            uri = self.uri.decode("ascii", errors="replace")
         return redact_uri(uri)
 
     def get_method(self):
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index 33b322209d..1b940842f6 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -571,6 +571,9 @@ def run_in_background(f, *args, **kwargs):
     yield or await on (for instance because you want to pass it to
     deferred.gatherResults()).
 
+    If f returns a Coroutine object, it will be wrapped into a Deferred (which will have
+    the side effect of executing the coroutine).
+
     Note that if you completely discard the result, you should make sure that
     `f` doesn't raise any deferred exceptions, otherwise a scary-looking
     CRITICAL error about an unhandled error will be logged without much
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 305b9b0178..d680ee95e1 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2017 New Vector Ltd
+# Copyright 2020 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,18 +17,26 @@ import logging
 
 from twisted.internet import defer
 
+from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.types import UserID
 
+"""
+This package defines the 'stable' API which can be used by extension modules which
+are loaded into Synapse.
+"""
+
+__all__ = ["errors", "make_deferred_yieldable", "run_in_background", "ModuleApi"]
+
 logger = logging.getLogger(__name__)
 
 
 class ModuleApi(object):
-    """A proxy object that gets passed to password auth providers so they
+    """A proxy object that gets passed to various plugin modules so they
     can register new users etc if necessary.
     """
 
     def __init__(self, hs, auth_handler):
-        self.hs = hs
+        self._hs = hs
 
         self._store = hs.get_datastore()
         self._auth = hs.get_auth()
@@ -64,7 +73,7 @@ class ModuleApi(object):
         """
         if username.startswith("@"):
             return username
-        return UserID(username, self.hs.hostname).to_string()
+        return UserID(username, self._hs.hostname).to_string()
 
     def check_user_exists(self, user_id):
         """Check if user exists.
@@ -111,10 +120,14 @@ class ModuleApi(object):
             displayname (str|None): The displayname of the new user.
             emails (List[str]): Emails to bind to the new user.
 
+        Raises:
+            SynapseError if there is an error performing the registration. Check the
+                'errcode' property for more information on the reason for failure
+
         Returns:
             Deferred[str]: user_id
         """
-        return self.hs.get_registration_handler().register_user(
+        return self._hs.get_registration_handler().register_user(
             localpart=localpart, default_display_name=displayname, bind_emails=emails
         )
 
@@ -131,12 +144,34 @@ class ModuleApi(object):
         Returns:
             defer.Deferred[tuple[str, str]]: Tuple of device ID and access token
         """
-        return self.hs.get_registration_handler().register_device(
+        return self._hs.get_registration_handler().register_device(
             user_id=user_id,
             device_id=device_id,
             initial_display_name=initial_display_name,
         )
 
+    def record_user_external_id(
+        self, auth_provider_id: str, remote_user_id: str, registered_user_id: str
+    ) -> defer.Deferred:
+        """Record a mapping from an external user id to a mxid
+
+        Args:
+            auth_provider: identifier for the remote auth provider
+            external_id: id on that system
+            user_id: complete mxid that it is mapped to
+        """
+        return self._store.record_user_external_id(
+            auth_provider_id, remote_user_id, registered_user_id
+        )
+
+    def generate_short_term_login_token(
+        self, user_id: str, duration_in_ms: int = (2 * 60 * 1000)
+    ) -> str:
+        """Generate a login token suitable for m.login.token authentication"""
+        return self._hs.get_macaroon_generator().generate_short_term_login_token(
+            user_id, duration_in_ms
+        )
+
     @defer.inlineCallbacks
     def invalidate_access_token(self, access_token):
         """Invalidate an access token for a user
@@ -157,7 +192,7 @@ class ModuleApi(object):
         user_id = user_info["user"].to_string()
         if device_id:
             # delete the device, which will also delete its access tokens
-            yield self.hs.get_device_handler().delete_device(user_id, device_id)
+            yield self._hs.get_device_handler().delete_device(user_id, device_id)
         else:
             # no associated device. Just delete the access token.
             yield self._auth_handler.delete_access_token(access_token)
diff --git a/synapse/module_api/errors.py b/synapse/module_api/errors.py
new file mode 100644
index 0000000000..b15441772c
--- /dev/null
+++ b/synapse/module_api/errors.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Exception types which are exposed as part of the stable module API"""
+
+from synapse.api.errors import RedirectException, SynapseError  # noqa: F401
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 5f5f765bea..6132727cbd 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -15,11 +15,13 @@
 
 import logging
 from collections import namedtuple
+from typing import Callable, List
 
 from prometheus_client import Counter
 
 from twisted.internet import defer
 
+import synapse.server
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import AuthError
 from synapse.handlers.presence import format_user_presence_state
@@ -154,7 +156,7 @@ class Notifier(object):
 
     UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000
 
-    def __init__(self, hs):
+    def __init__(self, hs: "synapse.server.HomeServer"):
         self.user_to_user_stream = {}
         self.room_to_user_streams = {}
 
@@ -164,7 +166,12 @@ class Notifier(object):
         self.store = hs.get_datastore()
         self.pending_new_room_events = []
 
-        self.replication_callbacks = []
+        # Called when there are new things to stream over replication
+        self.replication_callbacks = []  # type: List[Callable[[], None]]
+
+        # Called when remote servers have come back online after having been
+        # down.
+        self.remote_server_up_callbacks = []  # type: List[Callable[[str], None]]
 
         self.clock = hs.get_clock()
         self.appservice_handler = hs.get_application_service_handler()
@@ -205,7 +212,7 @@ class Notifier(object):
             "synapse_notifier_users", "", [], lambda: len(self.user_to_user_stream)
         )
 
-    def add_replication_callback(self, cb):
+    def add_replication_callback(self, cb: Callable[[], None]):
         """Add a callback that will be called when some new data is available.
         Callback is not given any arguments. It should *not* return a Deferred - if
         it needs to do any asynchronous work, a background thread should be started and
@@ -213,6 +220,12 @@ class Notifier(object):
         """
         self.replication_callbacks.append(cb)
 
+    def add_remote_server_up_callback(self, cb: Callable[[str], None]):
+        """Add a callback that will be called when synapse detects a server
+        has been
+        """
+        self.remote_server_up_callbacks.append(cb)
+
     def on_new_room_event(
         self, event, room_stream_id, max_room_stream_id, extra_users=[]
     ):
@@ -522,3 +535,15 @@ class Notifier(object):
         """Notify the any replication listeners that there's a new event"""
         for cb in self.replication_callbacks:
             cb()
+
+    def notify_remote_server_up(self, server: str):
+        """Notify any replication that a remote server has come back up
+        """
+        # We call federation_sender directly rather than registering as a
+        # callback as a) we already have a reference to it and b) it introduces
+        # circular dependencies.
+        if self.federation_sender:
+            self.federation_sender.wake_destination(server)
+
+        for cb in self.remote_server_up_callbacks:
+            cb(server)
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index de5c101a58..5dae4648c0 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -21,7 +21,7 @@ from synapse.storage import Storage
 
 @defer.inlineCallbacks
 def get_badge_count(store, user_id):
-    invites = yield store.get_invited_rooms_for_user(user_id)
+    invites = yield store.get_invited_rooms_for_local_user(user_id)
     joins = yield store.get_rooms_for_user(user_id)
 
     my_receipts_by_room = yield store.get_receipts_for_user(user_id, "m.read")
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index c8056b0c0c..444eb7b7f4 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -16,6 +16,7 @@
 import abc
 import logging
 import re
+from typing import Dict, List, Tuple
 
 from six import raise_from
 from six.moves import urllib
@@ -78,9 +79,8 @@ class ReplicationEndpoint(object):
 
     __metaclass__ = abc.ABCMeta
 
-    NAME = abc.abstractproperty()
-    PATH_ARGS = abc.abstractproperty()
-
+    NAME = abc.abstractproperty()  # type: str  # type: ignore
+    PATH_ARGS = abc.abstractproperty()  # type: Tuple[str, ...]  # type: ignore
     METHOD = "POST"
     CACHE = True
     RETRY_ON_TIMEOUT = True
@@ -171,7 +171,7 @@ class ReplicationEndpoint(object):
                 # have a good idea that the request has either succeeded or failed on
                 # the master, and so whether we should clean up or not.
                 while True:
-                    headers = {}
+                    headers = {}  # type: Dict[bytes, List[bytes]]
                     inject_active_span_byte_dict(headers, None, check_destination=False)
                     try:
                         result = yield request_func(uri, data, headers=headers)
@@ -207,7 +207,7 @@ class ReplicationEndpoint(object):
         method = self.METHOD
 
         if self.CACHE:
-            handler = self._cached_handler
+            handler = self._cached_handler  # type: ignore
             url_args.append("txn_id")
 
         args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args)
diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py
index b91a528245..f45cbd37a0 100644
--- a/synapse/replication/slave/storage/_base.py
+++ b/synapse/replication/slave/storage/_base.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 import logging
-from typing import Dict
+from typing import Dict, Optional
 
 import six
 
@@ -41,7 +41,7 @@ class BaseSlavedStore(SQLBaseStore):
         if isinstance(self.database_engine, PostgresEngine):
             self._cache_id_gen = SlavedIdTracker(
                 db_conn, "cache_invalidation_stream", "stream_id"
-            )
+            )  # type: Optional[SlavedIdTracker]
         else:
             self._cache_id_gen = None
 
@@ -62,14 +62,20 @@ class BaseSlavedStore(SQLBaseStore):
 
     def process_replication_rows(self, stream_name, token, rows):
         if stream_name == "caches":
-            self._cache_id_gen.advance(token)
+            if self._cache_id_gen:
+                self._cache_id_gen.advance(token)
             for row in rows:
                 if row.cache_func == CURRENT_STATE_CACHE_NAME:
+                    if row.keys is None:
+                        raise Exception(
+                            "Can't send an 'invalidate all' for current state cache"
+                        )
+
                     room_id = row.keys[0]
                     members_changed = set(row.keys[1:])
                     self._invalidate_state_caches(room_id, members_changed)
                 else:
-                    self._attempt_to_invalidate_cache(row.cache_func, tuple(row.keys))
+                    self._attempt_to_invalidate_cache(row.cache_func, row.keys)
 
     def _invalidate_cache_and_stream(self, txn, cache_func, keys):
         txn.call_after(cache_func.invalidate, keys)
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index 29f35b9915..3aa6cb8b96 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -152,7 +152,7 @@ class SlavedEventStore(
 
         if etype == EventTypes.Member:
             self._membership_stream_cache.entity_has_changed(state_key, stream_ordering)
-            self.get_invited_rooms_for_user.invalidate((state_key,))
+            self.get_invited_rooms_for_local_user.invalidate((state_key,))
 
         if relates_to:
             self.get_relations_for_event.invalidate_many((relates_to,))
diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py
index f552e7c972..ad8f0c15a9 100644
--- a/synapse/replication/slave/storage/presence.py
+++ b/synapse/replication/slave/storage/presence.py
@@ -29,7 +29,7 @@ class SlavedPresenceStore(BaseSlavedStore):
 
         self._presence_on_startup = self._get_active_presence(db_conn)
 
-        self.presence_stream_cache = self.presence_stream_cache = StreamChangeCache(
+        self.presence_stream_cache = StreamChangeCache(
             "PresenceStreamChangeCache", self._presence_id_gen.get_current_token()
         )
 
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index bbcb84646c..fc06a7b053 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -16,7 +16,7 @@
 """
 
 import logging
-from typing import Dict
+from typing import Dict, List, Optional
 
 from twisted.internet import defer
 from twisted.internet.protocol import ReconnectingClientFactory
@@ -28,6 +28,7 @@ from synapse.replication.tcp.protocol import (
 )
 
 from .commands import (
+    Command,
     FederationAckCommand,
     InvalidateCacheCommand,
     RemovePusherCommand,
@@ -89,15 +90,15 @@ class ReplicationClientHandler(AbstractReplicationClientHandler):
 
         # Any pending commands to be sent once a new connection has been
         # established
-        self.pending_commands = []
+        self.pending_commands = []  # type: List[Command]
 
         # Map from string -> deferred, to wake up when receiveing a SYNC with
         # the given string.
         # Used for tests.
-        self.awaiting_syncs = {}
+        self.awaiting_syncs = {}  # type: Dict[str, defer.Deferred]
 
         # The factory used to create connections.
-        self.factory = None
+        self.factory = None  # type: Optional[ReplicationClientFactory]
 
     def start_replication(self, hs):
         """Helper method to start a replication connection to the remote server
@@ -109,7 +110,7 @@ class ReplicationClientHandler(AbstractReplicationClientHandler):
         port = hs.config.worker_replication_port
         hs.get_reactor().connectTCP(host, port, self.factory)
 
-    def on_rdata(self, stream_name, token, rows):
+    async def on_rdata(self, stream_name, token, rows):
         """Called to handle a batch of replication data with a given stream token.
 
         By default this just pokes the slave store. Can be overridden in subclasses to
@@ -120,20 +121,17 @@ class ReplicationClientHandler(AbstractReplicationClientHandler):
             token (int): stream token for this batch of rows
             rows (list): a list of Stream.ROW_TYPE objects as returned by
                 Stream.parse_row.
-
-        Returns:
-            Deferred|None
         """
         logger.debug("Received rdata %s -> %s", stream_name, token)
-        return self.store.process_replication_rows(stream_name, token, rows)
+        self.store.process_replication_rows(stream_name, token, rows)
 
-    def on_position(self, stream_name, token):
+    async def on_position(self, stream_name, token):
         """Called when we get new position data. By default this just pokes
         the slave store.
 
         Can be overriden in subclasses to handle more.
         """
-        return self.store.process_replication_rows(stream_name, token, [])
+        self.store.process_replication_rows(stream_name, token, [])
 
     def on_sync(self, data):
         """When we received a SYNC we wake up any deferreds that were waiting
@@ -145,6 +143,9 @@ class ReplicationClientHandler(AbstractReplicationClientHandler):
         if d:
             d.callback(data)
 
+    def on_remote_server_up(self, server: str):
+        """Called when get a new REMOTE_SERVER_UP command."""
+
     def get_streams_to_replicate(self) -> Dict[str, int]:
         """Called when a new connection has been established and we need to
         subscribe to streams.
@@ -235,4 +236,5 @@ class ReplicationClientHandler(AbstractReplicationClientHandler):
         # We don't reset the delay any earlier as otherwise if there is a
         # problem during start up we'll end up tight looping connecting to the
         # server.
-        self.factory.resetDelay()
+        if self.factory:
+            self.factory.resetDelay()
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index 0ff2a7199f..451671412d 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -20,15 +20,16 @@ allowed to be sent by which side.
 
 import logging
 import platform
+from typing import Tuple, Type
 
 if platform.python_implementation() == "PyPy":
     import json
 
     _json_encoder = json.JSONEncoder()
 else:
-    import simplejson as json
+    import simplejson as json  # type: ignore[no-redef]  # noqa: F821
 
-    _json_encoder = json.JSONEncoder(namedtuple_as_object=False)
+    _json_encoder = json.JSONEncoder(namedtuple_as_object=False)  # type: ignore[call-arg]  # noqa: F821
 
 logger = logging.getLogger(__name__)
 
@@ -44,7 +45,7 @@ class Command(object):
     The default implementation creates a command of form `<NAME> <data>`
     """
 
-    NAME = None
+    NAME = None  # type: str
 
     def __init__(self, data):
         self.data = data
@@ -386,25 +387,39 @@ class UserIpCommand(Command):
         )
 
 
+class RemoteServerUpCommand(Command):
+    """Sent when a worker has detected that a remote server is no longer
+    "down" and retry timings should be reset.
+
+    If sent from a client the server will relay to all other workers.
+
+    Format::
+
+        REMOTE_SERVER_UP <server>
+    """
+
+    NAME = "REMOTE_SERVER_UP"
+
+
+_COMMANDS = (
+    ServerCommand,
+    RdataCommand,
+    PositionCommand,
+    ErrorCommand,
+    PingCommand,
+    NameCommand,
+    ReplicateCommand,
+    UserSyncCommand,
+    FederationAckCommand,
+    SyncCommand,
+    RemovePusherCommand,
+    InvalidateCacheCommand,
+    UserIpCommand,
+    RemoteServerUpCommand,
+)  # type: Tuple[Type[Command], ...]
+
 # Map of command name to command type.
-COMMAND_MAP = {
-    cmd.NAME: cmd
-    for cmd in (
-        ServerCommand,
-        RdataCommand,
-        PositionCommand,
-        ErrorCommand,
-        PingCommand,
-        NameCommand,
-        ReplicateCommand,
-        UserSyncCommand,
-        FederationAckCommand,
-        SyncCommand,
-        RemovePusherCommand,
-        InvalidateCacheCommand,
-        UserIpCommand,
-    )
-}
+COMMAND_MAP = {cmd.NAME: cmd for cmd in _COMMANDS}
 
 # The commands the server is allowed to send
 VALID_SERVER_COMMANDS = (
@@ -414,6 +429,7 @@ VALID_SERVER_COMMANDS = (
     ErrorCommand.NAME,
     PingCommand.NAME,
     SyncCommand.NAME,
+    RemoteServerUpCommand.NAME,
 )
 
 # The commands the client is allowed to send
@@ -427,4 +443,5 @@ VALID_CLIENT_COMMANDS = (
     InvalidateCacheCommand.NAME,
     UserIpCommand.NAME,
     ErrorCommand.NAME,
+    RemoteServerUpCommand.NAME,
 )
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index afaf002fe6..131e5acb09 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -53,6 +53,7 @@ import fcntl
 import logging
 import struct
 from collections import defaultdict
+from typing import Any, DefaultDict, Dict, List, Set, Tuple
 
 from six import iteritems, iterkeys
 
@@ -65,24 +66,26 @@ from twisted.python.failure import Failure
 from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.metrics import LaterGauge
 from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.util import Clock
-from synapse.util.stringutils import random_string
-
-from .commands import (
+from synapse.replication.tcp.commands import (
     COMMAND_MAP,
     VALID_CLIENT_COMMANDS,
     VALID_SERVER_COMMANDS,
+    Command,
     ErrorCommand,
     NameCommand,
     PingCommand,
     PositionCommand,
     RdataCommand,
+    RemoteServerUpCommand,
     ReplicateCommand,
     ServerCommand,
     SyncCommand,
     UserSyncCommand,
 )
-from .streams import STREAMS_MAP
+from synapse.replication.tcp.streams import STREAMS_MAP
+from synapse.types import Collection
+from synapse.util import Clock
+from synapse.util.stringutils import random_string
 
 connection_close_counter = Counter(
     "synapse_replication_tcp_protocol_close_reason", "", ["reason_type"]
@@ -124,8 +127,11 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
 
     delimiter = b"\n"
 
-    VALID_INBOUND_COMMANDS = []  # Valid commands we expect to receive
-    VALID_OUTBOUND_COMMANDS = []  # Valid commans we can send
+    # Valid commands we expect to receive
+    VALID_INBOUND_COMMANDS = []  # type: Collection[str]
+
+    # Valid commands we can send
+    VALID_OUTBOUND_COMMANDS = []  # type: Collection[str]
 
     max_line_buffer = 10000
 
@@ -144,13 +150,13 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
         self.conn_id = random_string(5)  # To dedupe in case of name clashes.
 
         # List of pending commands to send once we've established the connection
-        self.pending_commands = []
+        self.pending_commands = []  # type: List[Command]
 
         # The LoopingCall for sending pings.
         self._send_ping_loop = None
 
-        self.inbound_commands_counter = defaultdict(int)
-        self.outbound_commands_counter = defaultdict(int)
+        self.inbound_commands_counter = defaultdict(int)  # type: DefaultDict[str, int]
+        self.outbound_commands_counter = defaultdict(int)  # type: DefaultDict[str, int]
 
     def connectionMade(self):
         logger.info("[%s] Connection established", self.id())
@@ -235,19 +241,16 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
             "replication-" + cmd.get_logcontext_id(), self.handle_command, cmd
         )
 
-    def handle_command(self, cmd):
+    async def handle_command(self, cmd: Command):
         """Handle a command we have received over the replication stream.
 
-        By default delegates to on_<COMMAND>
+        By default delegates to on_<COMMAND>, which should return an awaitable.
 
         Args:
-            cmd (synapse.replication.tcp.commands.Command): received command
-
-        Returns:
-            Deferred
+            cmd: received command
         """
         handler = getattr(self, "on_%s" % (cmd.NAME,))
-        return handler(cmd)
+        await handler(cmd)
 
     def close(self):
         logger.warning("[%s] Closing connection", self.id())
@@ -320,10 +323,10 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
         for cmd in pending:
             self.send_command(cmd)
 
-    def on_PING(self, line):
+    async def on_PING(self, line):
         self.received_ping = True
 
-    def on_ERROR(self, cmd):
+    async def on_ERROR(self, cmd):
         logger.error("[%s] Remote reported error: %r", self.id(), cmd.data)
 
     def pauseProducing(self):
@@ -409,30 +412,30 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
         self.streamer = streamer
 
         # The streams the client has subscribed to and is up to date with
-        self.replication_streams = set()
+        self.replication_streams = set()  # type: Set[str]
 
         # The streams the client is currently subscribing to.
-        self.connecting_streams = set()
+        self.connecting_streams = set()  # type:  Set[str]
 
         # Map from stream name to list of updates to send once we've finished
         # subscribing the client to the stream.
-        self.pending_rdata = {}
+        self.pending_rdata = {}  # type: Dict[str, List[Tuple[int, Any]]]
 
     def connectionMade(self):
         self.send_command(ServerCommand(self.server_name))
         BaseReplicationStreamProtocol.connectionMade(self)
         self.streamer.new_connection(self)
 
-    def on_NAME(self, cmd):
+    async def on_NAME(self, cmd):
         logger.info("[%s] Renamed to %r", self.id(), cmd.data)
         self.name = cmd.data
 
-    def on_USER_SYNC(self, cmd):
-        return self.streamer.on_user_sync(
+    async def on_USER_SYNC(self, cmd):
+        await self.streamer.on_user_sync(
             self.conn_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms
         )
 
-    def on_REPLICATE(self, cmd):
+    async def on_REPLICATE(self, cmd):
         stream_name = cmd.stream_name
         token = cmd.token
 
@@ -443,23 +446,26 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
                 for stream in iterkeys(self.streamer.streams_by_name)
             ]
 
-            return make_deferred_yieldable(
+            await make_deferred_yieldable(
                 defer.gatherResults(deferreds, consumeErrors=True)
             )
         else:
-            return self.subscribe_to_stream(stream_name, token)
+            await self.subscribe_to_stream(stream_name, token)
 
-    def on_FEDERATION_ACK(self, cmd):
-        return self.streamer.federation_ack(cmd.token)
+    async def on_FEDERATION_ACK(self, cmd):
+        self.streamer.federation_ack(cmd.token)
 
-    def on_REMOVE_PUSHER(self, cmd):
-        return self.streamer.on_remove_pusher(cmd.app_id, cmd.push_key, cmd.user_id)
+    async def on_REMOVE_PUSHER(self, cmd):
+        await self.streamer.on_remove_pusher(cmd.app_id, cmd.push_key, cmd.user_id)
 
-    def on_INVALIDATE_CACHE(self, cmd):
-        return self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys)
+    async def on_INVALIDATE_CACHE(self, cmd):
+        self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys)
 
-    def on_USER_IP(self, cmd):
-        return self.streamer.on_user_ip(
+    async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand):
+        self.streamer.on_remote_server_up(cmd.data)
+
+    async def on_USER_IP(self, cmd):
+        self.streamer.on_user_ip(
             cmd.user_id,
             cmd.access_token,
             cmd.ip,
@@ -468,8 +474,7 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
             cmd.last_seen,
         )
 
-    @defer.inlineCallbacks
-    def subscribe_to_stream(self, stream_name, token):
+    async def subscribe_to_stream(self, stream_name, token):
         """Subscribe the remote to a stream.
 
         This invloves checking if they've missed anything and sending those
@@ -481,7 +486,7 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
 
         try:
             # Get missing updates
-            updates, current_token = yield self.streamer.get_stream_updates(
+            updates, current_token = await self.streamer.get_stream_updates(
                 stream_name, token
             )
 
@@ -554,6 +559,9 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
     def send_sync(self, data):
         self.send_command(SyncCommand(data))
 
+    def send_remote_server_up(self, server: str):
+        self.send_command(RemoteServerUpCommand(server))
+
     def on_connection_closed(self):
         BaseReplicationStreamProtocol.on_connection_closed(self)
         self.streamer.lost_connection(self)
@@ -566,7 +574,7 @@ class AbstractReplicationClientHandler(metaclass=abc.ABCMeta):
     """
 
     @abc.abstractmethod
-    def on_rdata(self, stream_name, token, rows):
+    async def on_rdata(self, stream_name, token, rows):
         """Called to handle a batch of replication data with a given stream token.
 
         Args:
@@ -574,14 +582,11 @@ class AbstractReplicationClientHandler(metaclass=abc.ABCMeta):
             token (int): stream token for this batch of rows
             rows (list): a list of Stream.ROW_TYPE objects as returned by
                 Stream.parse_row.
-
-        Returns:
-            Deferred|None
         """
         raise NotImplementedError()
 
     @abc.abstractmethod
-    def on_position(self, stream_name, token):
+    async def on_position(self, stream_name, token):
         """Called when we get new position data."""
         raise NotImplementedError()
 
@@ -591,6 +596,11 @@ class AbstractReplicationClientHandler(metaclass=abc.ABCMeta):
         raise NotImplementedError()
 
     @abc.abstractmethod
+    async def on_remote_server_up(self, server: str):
+        """Called when get a new REMOTE_SERVER_UP command."""
+        raise NotImplementedError()
+
+    @abc.abstractmethod
     def get_streams_to_replicate(self):
         """Called when a new connection has been established and we need to
         subscribe to streams.
@@ -642,11 +652,11 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
         # Set of stream names that have been subscribe to, but haven't yet
         # caught up with. This is used to track when the client has been fully
         # connected to the remote.
-        self.streams_connecting = set()
+        self.streams_connecting = set()  # type: Set[str]
 
         # Map of stream to batched updates. See RdataCommand for info on how
         # batching works.
-        self.pending_batches = {}
+        self.pending_batches = {}  # type: Dict[str, Any]
 
     def connectionMade(self):
         self.send_command(NameCommand(self.client_name))
@@ -670,12 +680,12 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
         if not self.streams_connecting:
             self.handler.finished_connecting()
 
-    def on_SERVER(self, cmd):
+    async def on_SERVER(self, cmd):
         if cmd.data != self.server_name:
             logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data)
             self.send_error("Wrong remote")
 
-    def on_RDATA(self, cmd):
+    async def on_RDATA(self, cmd):
         stream_name = cmd.stream_name
         inbound_rdata_count.labels(stream_name).inc()
 
@@ -695,19 +705,22 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
             # Check if this is the last of a batch of updates
             rows = self.pending_batches.pop(stream_name, [])
             rows.append(row)
-            return self.handler.on_rdata(stream_name, cmd.token, rows)
+            await self.handler.on_rdata(stream_name, cmd.token, rows)
 
-    def on_POSITION(self, cmd):
+    async def on_POSITION(self, cmd):
         # When we get a `POSITION` command it means we've finished getting
         # missing updates for the given stream, and are now up to date.
         self.streams_connecting.discard(cmd.stream_name)
         if not self.streams_connecting:
             self.handler.finished_connecting()
 
-        return self.handler.on_position(cmd.stream_name, cmd.token)
+        await self.handler.on_position(cmd.stream_name, cmd.token)
+
+    async def on_SYNC(self, cmd):
+        self.handler.on_sync(cmd.data)
 
-    def on_SYNC(self, cmd):
-        return self.handler.on_sync(cmd.data)
+    async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand):
+        self.handler.on_remote_server_up(cmd.data)
 
     def replicate(self, stream_name, token):
         """Send the subscription request to the server
@@ -766,7 +779,7 @@ def transport_kernel_read_buffer_size(protocol, read=True):
             op = SIOCINQ
         else:
             op = SIOCOUTQ
-        size = struct.unpack("I", fcntl.ioctl(fileno, op, "\0\0\0\0"))[0]
+        size = struct.unpack("I", fcntl.ioctl(fileno, op, b"\0\0\0\0"))[0]
         return size
     return 0
 
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index d1e98428bc..6ebf944f66 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -17,12 +17,12 @@
 
 import logging
 import random
+from typing import List
 
 from six import itervalues
 
 from prometheus_client import Counter
 
-from twisted.internet import defer
 from twisted.internet.protocol import Factory
 
 from synapse.metrics import LaterGauge
@@ -79,7 +79,7 @@ class ReplicationStreamer(object):
         self._replication_torture_level = hs.config.replication_torture_level
 
         # Current connections.
-        self.connections = []
+        self.connections = []  # type: List[ServerReplicationStreamProtocol]
 
         LaterGauge(
             "synapse_replication_tcp_resource_total_connections",
@@ -120,6 +120,7 @@ class ReplicationStreamer(object):
             self.federation_sender = hs.get_federation_sender()
 
         self.notifier.add_replication_callback(self.on_notifier_poke)
+        self.notifier.add_remote_server_up_callback(self.send_remote_server_up)
 
         # Keeps track of whether we are currently checking for updates
         self.is_looping = False
@@ -154,8 +155,7 @@ class ReplicationStreamer(object):
 
         run_as_background_process("replication_notifier", self._run_notifier_loop)
 
-    @defer.inlineCallbacks
-    def _run_notifier_loop(self):
+    async def _run_notifier_loop(self):
         self.is_looping = True
 
         try:
@@ -184,7 +184,7 @@ class ReplicationStreamer(object):
                             continue
 
                         if self._replication_torture_level:
-                            yield self.clock.sleep(
+                            await self.clock.sleep(
                                 self._replication_torture_level / 1000.0
                             )
 
@@ -195,7 +195,7 @@ class ReplicationStreamer(object):
                             stream.upto_token,
                         )
                         try:
-                            updates, current_token = yield stream.get_updates()
+                            updates, current_token = await stream.get_updates()
                         except Exception:
                             logger.info("Failed to handle stream %s", stream.NAME)
                             raise
@@ -232,7 +232,7 @@ class ReplicationStreamer(object):
             self.is_looping = False
 
     @measure_func("repl.get_stream_updates")
-    def get_stream_updates(self, stream_name, token):
+    async def get_stream_updates(self, stream_name, token):
         """For a given stream get all updates since token. This is called when
         a client first subscribes to a stream.
         """
@@ -240,7 +240,7 @@ class ReplicationStreamer(object):
         if not stream:
             raise Exception("unknown stream %s", stream_name)
 
-        return stream.get_updates_since(token)
+        return await stream.get_updates_since(token)
 
     @measure_func("repl.federation_ack")
     def federation_ack(self, token):
@@ -251,22 +251,20 @@ class ReplicationStreamer(object):
             self.federation_sender.federation_ack(token)
 
     @measure_func("repl.on_user_sync")
-    @defer.inlineCallbacks
-    def on_user_sync(self, conn_id, user_id, is_syncing, last_sync_ms):
+    async def on_user_sync(self, conn_id, user_id, is_syncing, last_sync_ms):
         """A client has started/stopped syncing on a worker.
         """
         user_sync_counter.inc()
-        yield self.presence_handler.update_external_syncs_row(
+        await self.presence_handler.update_external_syncs_row(
             conn_id, user_id, is_syncing, last_sync_ms
         )
 
     @measure_func("repl.on_remove_pusher")
-    @defer.inlineCallbacks
-    def on_remove_pusher(self, app_id, push_key, user_id):
+    async def on_remove_pusher(self, app_id, push_key, user_id):
         """A client has asked us to remove a pusher
         """
         remove_pusher_counter.inc()
-        yield self.store.delete_pusher_by_app_id_pushkey_user_id(
+        await self.store.delete_pusher_by_app_id_pushkey_user_id(
             app_id=app_id, pushkey=push_key, user_id=user_id
         )
 
@@ -280,15 +278,24 @@ class ReplicationStreamer(object):
         getattr(self.store, cache_func).invalidate(tuple(keys))
 
     @measure_func("repl.on_user_ip")
-    @defer.inlineCallbacks
-    def on_user_ip(self, user_id, access_token, ip, user_agent, device_id, last_seen):
+    async def on_user_ip(
+        self, user_id, access_token, ip, user_agent, device_id, last_seen
+    ):
         """The client saw a user request
         """
         user_ip_cache_counter.inc()
-        yield self.store.insert_client_ip(
+        await self.store.insert_client_ip(
             user_id, access_token, ip, user_agent, device_id, last_seen
         )
-        yield self._server_notices_sender.on_user_ip(user_id)
+        await self._server_notices_sender.on_user_ip(user_id)
+
+    @measure_func("repl.on_remote_server_up")
+    def on_remote_server_up(self, server: str):
+        self.notifier.notify_remote_server_up(server)
+
+    def send_remote_server_up(self, server: str):
+        for conn in self.connections:
+            conn.send_remote_server_up(server)
 
     def send_sync_to_all_connections(self, data):
         """Sends a SYNC command to all clients.
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 8512923eae..a8d568b14a 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -14,12 +14,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 import itertools
 import logging
 from collections import namedtuple
+from typing import Any, List, Optional
 
-from twisted.internet import defer
+import attr
 
 logger = logging.getLogger(__name__)
 
@@ -67,10 +67,24 @@ PushersStreamRow = namedtuple(
     "PushersStreamRow",
     ("user_id", "app_id", "pushkey", "deleted"),  # str  # str  # str  # bool
 )
-CachesStreamRow = namedtuple(
-    "CachesStreamRow",
-    ("cache_func", "keys", "invalidation_ts"),  # str  # list(str)  # int
-)
+
+
+@attr.s
+class CachesStreamRow:
+    """Stream to inform workers they should invalidate their cache.
+
+    Attributes:
+        cache_func: Name of the cached function.
+        keys: The entry in the cache to invalidate. If None then will
+            invalidate all.
+        invalidation_ts: Timestamp of when the invalidation took place.
+    """
+
+    cache_func = attr.ib(type=str)
+    keys = attr.ib(type=Optional[List[Any]])
+    invalidation_ts = attr.ib(type=int)
+
+
 PublicRoomsStreamRow = namedtuple(
     "PublicRoomsStreamRow",
     (
@@ -104,8 +118,9 @@ class Stream(object):
     time it was called up until the point `advance_current_token` was called.
     """
 
-    NAME = None  # The name of the stream
-    ROW_TYPE = None  # The type of the row. Used by the default impl of parse_row.
+    NAME = None  # type: str  # The name of the stream
+    # The type of the row. Used by the default impl of parse_row.
+    ROW_TYPE = None  # type: Any
     _LIMITED = True  # Whether the update function takes a limit
 
     @classmethod
@@ -143,8 +158,7 @@ class Stream(object):
         self.upto_token = self.current_token()
         self.last_token = self.upto_token
 
-    @defer.inlineCallbacks
-    def get_updates(self):
+    async def get_updates(self):
         """Gets all updates since the last time this function was called (or
         since the stream was constructed if it hadn't been called before),
         until the `upto_token`
@@ -155,13 +169,12 @@ class Stream(object):
                 list of ``(token, row)`` entries. ``row`` will be json-serialised and
                 sent over the replication steam.
         """
-        updates, current_token = yield self.get_updates_since(self.last_token)
+        updates, current_token = await self.get_updates_since(self.last_token)
         self.last_token = current_token
 
         return updates, current_token
 
-    @defer.inlineCallbacks
-    def get_updates_since(self, from_token):
+    async def get_updates_since(self, from_token):
         """Like get_updates except allows specifying from when we should
         stream updates
 
@@ -181,15 +194,16 @@ class Stream(object):
         if from_token == current_token:
             return [], current_token
 
+        logger.info("get_updates_since: %s", self.__class__)
         if self._LIMITED:
-            rows = yield self.update_function(
+            rows = await self.update_function(
                 from_token, current_token, limit=MAX_EVENTS_BEHIND + 1
             )
 
             # never turn more than MAX_EVENTS_BEHIND + 1 into updates.
             rows = itertools.islice(rows, MAX_EVENTS_BEHIND + 1)
         else:
-            rows = yield self.update_function(from_token, current_token)
+            rows = await self.update_function(from_token, current_token)
 
         updates = [(row[0], row[1:]) for row in rows]
 
@@ -231,8 +245,8 @@ class BackfillStream(Stream):
 
     def __init__(self, hs):
         store = hs.get_datastore()
-        self.current_token = store.get_current_backfill_token
-        self.update_function = store.get_all_new_backfill_event_rows
+        self.current_token = store.get_current_backfill_token  # type: ignore
+        self.update_function = store.get_all_new_backfill_event_rows  # type: ignore
 
         super(BackfillStream, self).__init__(hs)
 
@@ -246,8 +260,8 @@ class PresenceStream(Stream):
         store = hs.get_datastore()
         presence_handler = hs.get_presence_handler()
 
-        self.current_token = store.get_current_presence_token
-        self.update_function = presence_handler.get_all_presence_updates
+        self.current_token = store.get_current_presence_token  # type: ignore
+        self.update_function = presence_handler.get_all_presence_updates  # type: ignore
 
         super(PresenceStream, self).__init__(hs)
 
@@ -260,8 +274,8 @@ class TypingStream(Stream):
     def __init__(self, hs):
         typing_handler = hs.get_typing_handler()
 
-        self.current_token = typing_handler.get_current_token
-        self.update_function = typing_handler.get_all_typing_updates
+        self.current_token = typing_handler.get_current_token  # type: ignore
+        self.update_function = typing_handler.get_all_typing_updates  # type: ignore
 
         super(TypingStream, self).__init__(hs)
 
@@ -273,8 +287,8 @@ class ReceiptsStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_max_receipt_stream_id
-        self.update_function = store.get_all_updated_receipts
+        self.current_token = store.get_max_receipt_stream_id  # type: ignore
+        self.update_function = store.get_all_updated_receipts  # type: ignore
 
         super(ReceiptsStream, self).__init__(hs)
 
@@ -294,9 +308,8 @@ class PushRulesStream(Stream):
         push_rules_token, _ = self.store.get_push_rules_stream_token()
         return push_rules_token
 
-    @defer.inlineCallbacks
-    def update_function(self, from_token, to_token, limit):
-        rows = yield self.store.get_all_push_rule_updates(from_token, to_token, limit)
+    async def update_function(self, from_token, to_token, limit):
+        rows = await self.store.get_all_push_rule_updates(from_token, to_token, limit)
         return [(row[0], row[2]) for row in rows]
 
 
@@ -310,8 +323,8 @@ class PushersStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_pushers_stream_token
-        self.update_function = store.get_all_updated_pushers_rows
+        self.current_token = store.get_pushers_stream_token  # type: ignore
+        self.update_function = store.get_all_updated_pushers_rows  # type: ignore
 
         super(PushersStream, self).__init__(hs)
 
@@ -327,8 +340,8 @@ class CachesStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_cache_stream_token
-        self.update_function = store.get_all_updated_caches
+        self.current_token = store.get_cache_stream_token  # type: ignore
+        self.update_function = store.get_all_updated_caches  # type: ignore
 
         super(CachesStream, self).__init__(hs)
 
@@ -343,8 +356,8 @@ class PublicRoomsStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_current_public_room_stream_id
-        self.update_function = store.get_all_new_public_rooms
+        self.current_token = store.get_current_public_room_stream_id  # type: ignore
+        self.update_function = store.get_all_new_public_rooms  # type: ignore
 
         super(PublicRoomsStream, self).__init__(hs)
 
@@ -360,8 +373,8 @@ class DeviceListsStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_device_stream_token
-        self.update_function = store.get_all_device_list_changes_for_remotes
+        self.current_token = store.get_device_stream_token  # type: ignore
+        self.update_function = store.get_all_device_list_changes_for_remotes  # type: ignore
 
         super(DeviceListsStream, self).__init__(hs)
 
@@ -376,8 +389,8 @@ class ToDeviceStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_to_device_stream_token
-        self.update_function = store.get_all_new_device_messages
+        self.current_token = store.get_to_device_stream_token  # type: ignore
+        self.update_function = store.get_all_new_device_messages  # type: ignore
 
         super(ToDeviceStream, self).__init__(hs)
 
@@ -392,8 +405,8 @@ class TagAccountDataStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_max_account_data_stream_id
-        self.update_function = store.get_all_updated_tags
+        self.current_token = store.get_max_account_data_stream_id  # type: ignore
+        self.update_function = store.get_all_updated_tags  # type: ignore
 
         super(TagAccountDataStream, self).__init__(hs)
 
@@ -408,13 +421,12 @@ class AccountDataStream(Stream):
     def __init__(self, hs):
         self.store = hs.get_datastore()
 
-        self.current_token = self.store.get_max_account_data_stream_id
+        self.current_token = self.store.get_max_account_data_stream_id  # type: ignore
 
         super(AccountDataStream, self).__init__(hs)
 
-    @defer.inlineCallbacks
-    def update_function(self, from_token, to_token, limit):
-        global_results, room_results = yield self.store.get_all_updated_account_data(
+    async def update_function(self, from_token, to_token, limit):
+        global_results, room_results = await self.store.get_all_updated_account_data(
             from_token, from_token, to_token, limit
         )
 
@@ -434,8 +446,8 @@ class GroupServerStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_group_stream_token
-        self.update_function = store.get_all_groups_changes
+        self.current_token = store.get_group_stream_token  # type: ignore
+        self.update_function = store.get_all_groups_changes  # type: ignore
 
         super(GroupServerStream, self).__init__(hs)
 
@@ -451,7 +463,7 @@ class UserSignatureStream(Stream):
     def __init__(self, hs):
         store = hs.get_datastore()
 
-        self.current_token = store.get_device_stream_token
-        self.update_function = store.get_all_user_signature_changes_for_remotes
+        self.current_token = store.get_device_stream_token  # type: ignore
+        self.update_function = store.get_all_user_signature_changes_for_remotes  # type: ignore
 
         super(UserSignatureStream, self).__init__(hs)
diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py
index d97669c886..b3afabb8cd 100644
--- a/synapse/replication/tcp/streams/events.py
+++ b/synapse/replication/tcp/streams/events.py
@@ -13,12 +13,12 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
 import heapq
+from typing import Tuple, Type
 
 import attr
 
-from twisted.internet import defer
-
 from ._base import Stream
 
 
@@ -63,7 +63,8 @@ class BaseEventsStreamRow(object):
     Specifies how to identify, serialize and deserialize the different types.
     """
 
-    TypeId = None  # Unique string that ids the type. Must be overriden in sub classes.
+    # Unique string that ids the type. Must be overriden in sub classes.
+    TypeId = None  # type: str
 
     @classmethod
     def from_data(cls, data):
@@ -99,9 +100,12 @@ class EventsStreamCurrentStateRow(BaseEventsStreamRow):
     event_id = attr.ib()  # str, optional
 
 
-TypeToRow = {
-    Row.TypeId: Row for Row in (EventsStreamEventRow, EventsStreamCurrentStateRow)
-}
+_EventRows = (
+    EventsStreamEventRow,
+    EventsStreamCurrentStateRow,
+)  # type: Tuple[Type[BaseEventsStreamRow], ...]
+
+TypeToRow = {Row.TypeId: Row for Row in _EventRows}
 
 
 class EventsStream(Stream):
@@ -112,20 +116,19 @@ class EventsStream(Stream):
 
     def __init__(self, hs):
         self._store = hs.get_datastore()
-        self.current_token = self._store.get_current_events_token
+        self.current_token = self._store.get_current_events_token  # type: ignore
 
         super(EventsStream, self).__init__(hs)
 
-    @defer.inlineCallbacks
-    def update_function(self, from_token, current_token, limit=None):
-        event_rows = yield self._store.get_all_new_forward_event_rows(
+    async def update_function(self, from_token, current_token, limit=None):
+        event_rows = await self._store.get_all_new_forward_event_rows(
             from_token, current_token, limit
         )
         event_updates = (
             (row[0], EventsStreamEventRow.TypeId, row[1:]) for row in event_rows
         )
 
-        state_rows = yield self._store.get_all_updated_current_state_deltas(
+        state_rows = await self._store.get_all_updated_current_state_deltas(
             from_token, current_token, limit
         )
         state_updates = (
diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py
index dc2484109d..615f3dc9ac 100644
--- a/synapse/replication/tcp/streams/federation.py
+++ b/synapse/replication/tcp/streams/federation.py
@@ -37,7 +37,7 @@ class FederationStream(Stream):
     def __init__(self, hs):
         federation_sender = hs.get_federation_sender()
 
-        self.current_token = federation_sender.get_current_token
-        self.update_function = federation_sender.get_replication_rows
+        self.current_token = federation_sender.get_current_token  # type: ignore
+        self.update_function = federation_sender.get_replication_rows  # type: ignore
 
         super(FederationStream, self).__init__(hs)
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index a10b4a9b72..42cc2b062a 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -29,7 +29,7 @@ from synapse.rest.admin._base import (
 from synapse.rest.admin.groups import DeleteGroupAdminRestServlet
 from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo
 from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet
-from synapse.rest.admin.rooms import ShutdownRoomRestServlet
+from synapse.rest.admin.rooms import ListRoomRestServlet, ShutdownRoomRestServlet
 from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
 from synapse.rest.admin.users import (
     AccountValidityRenewServlet,
@@ -107,7 +107,7 @@ class PurgeHistoryRestServlet(RestServlet):
 
             stream_ordering = await self.store.find_first_stream_ordering_after_ts(ts)
 
-            r = await self.store.get_room_event_after_stream_ordering(
+            r = await self.store.get_room_event_before_stream_ordering(
                 room_id, stream_ordering
             )
             if not r:
@@ -188,6 +188,7 @@ def register_servlets(hs, http_server):
     Register all the admin servlets.
     """
     register_servlets_for_client_rest_resource(hs, http_server)
+    ListRoomRestServlet(hs).register(http_server)
     PurgeRoomServlet(hs).register(http_server)
     SendServerNoticeServlet(hs).register(http_server)
     VersionServlet(hs).register(http_server)
diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py
index afd0647205..459482eb6d 100644
--- a/synapse/rest/admin/_base.py
+++ b/synapse/rest/admin/_base.py
@@ -40,6 +40,21 @@ def historical_admin_path_patterns(path_regex):
     )
 
 
+def admin_patterns(path_regex: str):
+    """Returns the list of patterns for an admin endpoint
+
+    Args:
+        path_regex: The regex string to match. This should NOT have a ^
+            as this will be prefixed.
+
+    Returns:
+        A list of regex patterns.
+    """
+    admin_prefix = "^/_synapse/admin/v1"
+    patterns = [re.compile(admin_prefix + path_regex)]
+    return patterns
+
+
 async def assert_requester_is_admin(auth, request):
     """Verify that the requester is an admin user
 
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index fa833e54cf..ee75095c0e 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -32,16 +32,24 @@ class QuarantineMediaInRoom(RestServlet):
     this server.
     """
 
-    PATTERNS = historical_admin_path_patterns("/quarantine_media/(?P<room_id>[^/]+)")
+    PATTERNS = (
+        historical_admin_path_patterns("/room/(?P<room_id>[^/]+)/media/quarantine")
+        +
+        # This path kept around for legacy reasons
+        historical_admin_path_patterns("/quarantine_media/(?P<room_id>[^/]+)")
+    )
 
     def __init__(self, hs):
         self.store = hs.get_datastore()
         self.auth = hs.get_auth()
 
-    async def on_POST(self, request, room_id):
+    async def on_POST(self, request, room_id: str):
         requester = await self.auth.get_user_by_req(request)
         await assert_user_is_admin(self.auth, requester.user)
 
+        logging.info("Quarantining room: %s", room_id)
+
+        # Quarantine all media in this room
         num_quarantined = await self.store.quarantine_media_ids_in_room(
             room_id, requester.user.to_string()
         )
@@ -49,6 +57,60 @@ class QuarantineMediaInRoom(RestServlet):
         return 200, {"num_quarantined": num_quarantined}
 
 
+class QuarantineMediaByUser(RestServlet):
+    """Quarantines all local media by a given user so that no one can download it via
+    this server.
+    """
+
+    PATTERNS = historical_admin_path_patterns(
+        "/user/(?P<user_id>[^/]+)/media/quarantine"
+    )
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.auth = hs.get_auth()
+
+    async def on_POST(self, request, user_id: str):
+        requester = await self.auth.get_user_by_req(request)
+        await assert_user_is_admin(self.auth, requester.user)
+
+        logging.info("Quarantining local media by user: %s", user_id)
+
+        # Quarantine all media this user has uploaded
+        num_quarantined = await self.store.quarantine_media_ids_by_user(
+            user_id, requester.user.to_string()
+        )
+
+        return 200, {"num_quarantined": num_quarantined}
+
+
+class QuarantineMediaByID(RestServlet):
+    """Quarantines local or remote media by a given ID so that no one can download
+    it via this server.
+    """
+
+    PATTERNS = historical_admin_path_patterns(
+        "/media/quarantine/(?P<server_name>[^/]+)/(?P<media_id>[^/]+)"
+    )
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.auth = hs.get_auth()
+
+    async def on_POST(self, request, server_name: str, media_id: str):
+        requester = await self.auth.get_user_by_req(request)
+        await assert_user_is_admin(self.auth, requester.user)
+
+        logging.info("Quarantining local media by ID: %s/%s", server_name, media_id)
+
+        # Quarantine this media id
+        await self.store.quarantine_media_by_id(
+            server_name, media_id, requester.user.to_string()
+        )
+
+        return 200, {}
+
+
 class ListMediaInRoom(RestServlet):
     """Lists all of the media in a given room.
     """
@@ -94,4 +156,6 @@ def register_servlets_for_media_repo(hs, http_server):
     """
     PurgeMediaCacheRestServlet(hs).register(http_server)
     QuarantineMediaInRoom(hs).register(http_server)
+    QuarantineMediaByID(hs).register(http_server)
+    QuarantineMediaByUser(hs).register(http_server)
     ListMediaInRoom(hs).register(http_server)
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index f7cc5e9be9..f9b8c0a4f0 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -15,15 +15,20 @@
 import logging
 
 from synapse.api.constants import Membership
+from synapse.api.errors import Codes, SynapseError
 from synapse.http.servlet import (
     RestServlet,
     assert_params_in_dict,
+    parse_integer,
     parse_json_object_from_request,
+    parse_string,
 )
 from synapse.rest.admin._base import (
+    admin_patterns,
     assert_user_is_admin,
     historical_admin_path_patterns,
 )
+from synapse.storage.data_stores.main.room import RoomSortOrder
 from synapse.types import create_requester
 from synapse.util.async_helpers import maybe_awaitable
 
@@ -155,3 +160,80 @@ class ShutdownRoomRestServlet(RestServlet):
                 "new_room_id": new_room_id,
             },
         )
+
+
+class ListRoomRestServlet(RestServlet):
+    """
+    List all rooms that are known to the homeserver. Results are returned
+    in a dictionary containing room information. Supports pagination.
+    """
+
+    PATTERNS = admin_patterns("/rooms")
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.auth = hs.get_auth()
+        self.admin_handler = hs.get_handlers().admin_handler
+
+    async def on_GET(self, request):
+        requester = await self.auth.get_user_by_req(request)
+        await assert_user_is_admin(self.auth, requester.user)
+
+        # Extract query parameters
+        start = parse_integer(request, "from", default=0)
+        limit = parse_integer(request, "limit", default=100)
+        order_by = parse_string(request, "order_by", default="alphabetical")
+        if order_by not in (
+            RoomSortOrder.ALPHABETICAL.value,
+            RoomSortOrder.SIZE.value,
+        ):
+            raise SynapseError(
+                400,
+                "Unknown value for order_by: %s" % (order_by,),
+                errcode=Codes.INVALID_PARAM,
+            )
+
+        search_term = parse_string(request, "search_term")
+        if search_term == "":
+            raise SynapseError(
+                400,
+                "search_term cannot be an empty string",
+                errcode=Codes.INVALID_PARAM,
+            )
+
+        direction = parse_string(request, "dir", default="f")
+        if direction not in ("f", "b"):
+            raise SynapseError(
+                400, "Unknown direction: %s" % (direction,), errcode=Codes.INVALID_PARAM
+            )
+
+        reverse_order = True if direction == "b" else False
+
+        # Return list of rooms according to parameters
+        rooms, total_rooms = await self.store.get_rooms_paginate(
+            start, limit, order_by, reverse_order, search_term
+        )
+        response = {
+            # next_token should be opaque, so return a value the client can parse
+            "offset": start,
+            "rooms": rooms,
+            "total_rooms": total_rooms,
+        }
+
+        # Are there more rooms to paginate through after this?
+        if (start + limit) < total_rooms:
+            # There are. Calculate where the query should start from next time
+            # to get the next part of the list
+            response["next_batch"] = start + limit
+
+        # Is it possible to paginate backwards? Check if we currently have an
+        # offset
+        if start > 0:
+            if start > limit:
+                # Going back one iteration won't take us to the start.
+                # Calculate new offset
+                response["prev_batch"] = start - limit
+            else:
+                response["prev_batch"] = 0
+
+        return 200, response
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 574cb90c74..927e9ca9ee 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -45,6 +45,7 @@ class UsersRestServlet(RestServlet):
 
     def __init__(self, hs):
         self.hs = hs
+        self.store = hs.get_datastore()
         self.auth = hs.get_auth()
         self.admin_handler = hs.get_handlers().admin_handler
 
@@ -55,7 +56,7 @@ class UsersRestServlet(RestServlet):
         if not self.hs.is_mine(target_user):
             raise SynapseError(400, "Can only users a local user")
 
-        ret = await self.admin_handler.get_users()
+        ret = await self.store.get_users()
 
         return 200, ret
 
@@ -80,6 +81,7 @@ class UsersRestServletV2(RestServlet):
 
     def __init__(self, hs):
         self.hs = hs
+        self.store = hs.get_datastore()
         self.auth = hs.get_auth()
         self.admin_handler = hs.get_handlers().admin_handler
 
@@ -92,7 +94,7 @@ class UsersRestServletV2(RestServlet):
         guests = parse_boolean(request, "guests", default=True)
         deactivated = parse_boolean(request, "deactivated", default=False)
 
-        users = await self.admin_handler.get_users_paginate(
+        users = await self.store.get_users_paginate(
             start, limit, user_id, guests, deactivated
         )
         ret = {"users": users}
@@ -193,8 +195,8 @@ class UserRestServletV2(RestServlet):
                     raise SynapseError(400, "Invalid password")
                 else:
                     new_password = body["password"]
-                    await self._set_password_handler.set_password(
-                        target_user, new_password, requester
+                    await self.set_password_handler.set_password(
+                        target_user.to_string(), new_password, requester
                     )
 
             if "deactivated" in body:
@@ -338,21 +340,22 @@ class UserRegisterServlet(RestServlet):
 
         got_mac = body["mac"]
 
-        want_mac = hmac.new(
+        want_mac_builder = hmac.new(
             key=self.hs.config.registration_shared_secret.encode(),
             digestmod=hashlib.sha1,
         )
-        want_mac.update(nonce.encode("utf8"))
-        want_mac.update(b"\x00")
-        want_mac.update(username)
-        want_mac.update(b"\x00")
-        want_mac.update(password)
-        want_mac.update(b"\x00")
-        want_mac.update(b"admin" if admin else b"notadmin")
+        want_mac_builder.update(nonce.encode("utf8"))
+        want_mac_builder.update(b"\x00")
+        want_mac_builder.update(username)
+        want_mac_builder.update(b"\x00")
+        want_mac_builder.update(password)
+        want_mac_builder.update(b"\x00")
+        want_mac_builder.update(b"admin" if admin else b"notadmin")
         if user_type:
-            want_mac.update(b"\x00")
-            want_mac.update(user_type.encode("utf8"))
-        want_mac = want_mac.hexdigest()
+            want_mac_builder.update(b"\x00")
+            want_mac_builder.update(user_type.encode("utf8"))
+
+        want_mac = want_mac_builder.hexdigest()
 
         if not hmac.compare_digest(want_mac.encode("ascii"), got_mac.encode("ascii")):
             raise SynapseError(403, "HMAC incorrect")
@@ -515,8 +518,8 @@ class SearchUsersRestServlet(RestServlet):
     PATTERNS = historical_admin_path_patterns("/search_users/(?P<target_user_id>[^/]*)")
 
     def __init__(self, hs):
-        self.store = hs.get_datastore()
         self.hs = hs
+        self.store = hs.get_datastore()
         self.auth = hs.get_auth()
         self.handlers = hs.get_handlers()
 
@@ -539,7 +542,7 @@ class SearchUsersRestServlet(RestServlet):
         term = parse_string(request, "term", required=True)
         logger.info("term: %s ", term)
 
-        ret = await self.handlers.admin_handler.search_users(term)
+        ret = await self.handlers.store.search_users(term)
         return 200, ret
 
 
@@ -573,8 +576,8 @@ class UserAdminServlet(RestServlet):
 
     def __init__(self, hs):
         self.hs = hs
+        self.store = hs.get_datastore()
         self.auth = hs.get_auth()
-        self.handlers = hs.get_handlers()
 
     async def on_GET(self, request, user_id):
         await assert_requester_is_admin(self.auth, request)
@@ -584,8 +587,7 @@ class UserAdminServlet(RestServlet):
         if not self.hs.is_mine(target_user):
             raise SynapseError(400, "Only local users can be admins of this homeserver")
 
-        is_admin = await self.handlers.admin_handler.get_user_server_admin(target_user)
-        is_admin = bool(is_admin)
+        is_admin = await self.store.is_server_admin(target_user)
 
         return 200, {"admin": is_admin}
 
@@ -608,8 +610,6 @@ class UserAdminServlet(RestServlet):
         if target_user == auth_user and not set_admin_to:
             raise SynapseError(400, "You may not demote yourself.")
 
-        await self.handlers.admin_handler.set_user_server_admin(
-            target_user, set_admin_to
-        )
+        await self.store.set_user_server_admin(target_user, set_admin_to)
 
         return 200, {}
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index 4beb617733..25effd0261 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -70,7 +70,6 @@ class EventStreamRestServlet(RestServlet):
         return 200, {}
 
 
-# TODO: Unit test gets, with and without auth, with different kinds of events.
 class EventRestServlet(RestServlet):
     PATTERNS = client_patterns("/events/(?P<event_id>[^/]*)$", v1=True)
 
@@ -78,6 +77,7 @@ class EventRestServlet(RestServlet):
         super(EventRestServlet, self).__init__()
         self.clock = hs.get_clock()
         self.event_handler = hs.get_event_handler()
+        self.auth = hs.get_auth()
         self._event_serializer = hs.get_event_client_serializer()
 
     async def on_GET(self, request, event_id):
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index ff9c978fe7..1294e080dc 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -514,7 +514,7 @@ class CasTicketServlet(RestServlet):
             if user is None:
                 raise Exception("CAS response does not contain user")
         except Exception:
-            logger.error("Error parsing CAS response", exc_info=1)
+            logger.exception("Error parsing CAS response")
             raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED)
         if not success:
             raise LoginError(
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 711d4ad304..5aef8238b8 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -16,6 +16,7 @@
 
 """ This module contains REST servlets to do with rooms: /rooms/<paths> """
 import logging
+from typing import List, Optional
 
 from six.moves.urllib import parse as urlparse
 
@@ -207,7 +208,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
                 requester, event_dict, txn_id=txn_id
             )
 
-        ret = {}
+        ret = {}  # type: dict
         if event:
             set_tag("event_id", event.event_id)
             ret = {"event_id": event.event_id}
@@ -285,7 +286,7 @@ class JoinRoomAliasServlet(TransactionRestServlet):
             try:
                 remote_room_hosts = [
                     x.decode("ascii") for x in request.args[b"server_name"]
-                ]
+                ]  # type: Optional[List[str]]
             except Exception:
                 remote_room_hosts = None
         elif RoomAlias.is_valid(room_identifier):
@@ -375,7 +376,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
         server = parse_string(request, "server", default=None)
         content = parse_json_object_from_request(request)
 
-        limit = int(content.get("limit", 100))
+        limit = int(content.get("limit", 100))  # type: Optional[int]
         since_token = content.get("since", None)
         search_filter = content.get("filter", None)
 
@@ -504,11 +505,16 @@ class RoomMessageListRestServlet(RestServlet):
         filter_bytes = parse_string(request, b"filter", encoding=None)
         if filter_bytes:
             filter_json = urlparse.unquote(filter_bytes.decode("UTF-8"))
-            event_filter = Filter(json.loads(filter_json))
-            if event_filter.filter_json.get("event_format", "client") == "federation":
+            event_filter = Filter(json.loads(filter_json))  # type: Optional[Filter]
+            if (
+                event_filter
+                and event_filter.filter_json.get("event_format", "client")
+                == "federation"
+            ):
                 as_client_event = False
         else:
             event_filter = None
+
         msgs = await self.pagination_handler.get_messages(
             room_id=room_id,
             requester=requester,
@@ -611,7 +617,7 @@ class RoomEventContextServlet(RestServlet):
         filter_bytes = parse_string(request, "filter")
         if filter_bytes:
             filter_json = urlparse.unquote(filter_bytes)
-            event_filter = Filter(json.loads(filter_json))
+            event_filter = Filter(json.loads(filter_json))  # type: Optional[Filter]
         else:
             event_filter = None
 
diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py
index 2a3f4dd58f..bc11b4dda4 100644
--- a/synapse/rest/client/v2_alpha/_base.py
+++ b/synapse/rest/client/v2_alpha/_base.py
@@ -32,7 +32,7 @@ def client_patterns(path_regex, releases=(0,), unstable=True, v1=False):
 
     Args:
         path_regex (str): The regex string to match. This should NOT have a ^
-        as this will be prefixed.
+            as this will be prefixed.
     Returns:
         SRE_Pattern
     """
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
index 7a256b6ecb..50e080673b 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -206,10 +206,6 @@ class AuthRestServlet(RestServlet):
 
             return None
         elif stagetype == LoginType.TERMS:
-            if ("session" not in request.args or len(request.args["session"])) == 0:
-                raise SynapseError(400, "No session supplied")
-
-            session = request.args["session"][0]
             authdict = {"session": session}
 
             success = await self.auth_handler.add_oob_auth(
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 66de16a1fa..1bda9aec7e 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -21,6 +21,7 @@ from typing import List, Union
 from six import string_types
 
 import synapse
+import synapse.api.auth
 import synapse.types
 from synapse.api.constants import LoginType
 from synapse.api.errors import (
@@ -405,7 +406,7 @@ class RegisterRestServlet(RestServlet):
             return ret
         elif kind != b"user":
             raise UnrecognizedRequestError(
-                "Do not understand membership kind: %s" % (kind,)
+                "Do not understand membership kind: %s" % (kind.decode("utf8"),)
             )
 
         # we do basic sanity checks here because the auth layer will store these
diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py
index 501b52fb6c..db829f3098 100644
--- a/synapse/rest/client/v2_alpha/sendtodevice.py
+++ b/synapse/rest/client/v2_alpha/sendtodevice.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from typing import Tuple
 
 from synapse.http import servlet
 from synapse.http.servlet import parse_json_object_from_request
@@ -60,7 +61,7 @@ class SendToDeviceRestServlet(servlet.RestServlet):
             sender_user_id, message_type, content["messages"]
         )
 
-        response = (200, {})
+        response = (200, {})  # type: Tuple[int, dict]
         return response
 
 
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 2a477ad22e..3d0fefb4df 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -71,6 +71,8 @@ class VersionsRestServlet(RestServlet):
                     # Implements support for label-based filtering as described in
                     # MSC2326.
                     "org.matrix.label_based_filtering": True,
+                    # Implements support for cross signing as described in MSC1756
+                    "org.matrix.e2e_cross_signing": True,
                 },
             },
         )
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index e7fc3f0431..9d6813a047 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 import logging
+from typing import Dict, Set
 
 from canonicaljson import encode_canonical_json, json
 from signedjson.sign import sign_json
@@ -103,7 +104,7 @@ class RemoteKey(DirectServeResource):
     async def _async_render_GET(self, request):
         if len(request.postpath) == 1:
             (server,) = request.postpath
-            query = {server.decode("ascii"): {}}
+            query = {server.decode("ascii"): {}}  # type: dict
         elif len(request.postpath) == 2:
             server, key_id = request.postpath
             minimum_valid_until_ts = parse_integer(request, "minimum_valid_until_ts")
@@ -148,7 +149,7 @@ class RemoteKey(DirectServeResource):
 
         time_now_ms = self.clock.time_msec()
 
-        cache_misses = dict()
+        cache_misses = dict()  # type: Dict[str, Set[str]]
         for (server_name, key_id, from_server), results in cached.items():
             results = [(result["ts_added_ms"], result) for result in results]
 
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index bd9186fe50..490b1b45a8 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -18,6 +18,7 @@ import errno
 import logging
 import os
 import shutil
+from typing import Dict, Tuple
 
 from six import iteritems
 
@@ -605,7 +606,7 @@ class MediaRepository(object):
 
         # We deduplicate the thumbnail sizes by ignoring the cropped versions if
         # they have the same dimensions of a scaled one.
-        thumbnails = {}
+        thumbnails = {}  # type: Dict[Tuple[int, int, str], str]
         for r_width, r_height, r_method, r_type in requirements:
             if r_method == "crop":
                 thumbnails.setdefault((r_width, r_height, r_type), r_method)
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 6b978be876..07e395cfd1 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -23,6 +23,7 @@ import re
 import shutil
 import sys
 import traceback
+from typing import Dict, Optional
 
 import six
 from six import string_types
@@ -237,8 +238,8 @@ class PreviewUrlResource(DirectServeResource):
             # If we don't find a match, we'll look at the HTTP Content-Type, and
             # if that doesn't exist, we'll fall back to UTF-8.
             if not encoding:
-                match = _content_type_match.match(media_info["media_type"])
-                encoding = match.group(1) if match else "utf-8"
+                content_match = _content_type_match.match(media_info["media_type"])
+                encoding = content_match.group(1) if content_match else "utf-8"
 
             og = decode_and_calc_og(body, media_info["uri"], encoding)
 
@@ -518,7 +519,7 @@ def _calc_og(tree, media_uri):
     # "og:video:height" : "720",
     # "og:video:secure_url": "https://www.youtube.com/v/LXDBoHyjmtw?version=3",
 
-    og = {}
+    og = {}  # type: Dict[str, Optional[str]]
     for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
         if "content" in tag.attrib:
             # if we've got more than 50 tags, someone is taking the piss
diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py
index 931ce79be8..d57480f761 100644
--- a/synapse/rest/media/v1/thumbnail_resource.py
+++ b/synapse/rest/media/v1/thumbnail_resource.py
@@ -296,8 +296,8 @@ class ThumbnailResource(DirectServeResource):
         d_h = desired_height
 
         if desired_method.lower() == "crop":
-            info_list = []
-            info_list2 = []
+            crop_info_list = []
+            crop_info_list2 = []
             for info in thumbnail_infos:
                 t_w = info["thumbnail_width"]
                 t_h = info["thumbnail_height"]
@@ -309,7 +309,7 @@ class ThumbnailResource(DirectServeResource):
                     type_quality = desired_type != info["thumbnail_type"]
                     length_quality = info["thumbnail_length"]
                     if t_w >= d_w or t_h >= d_h:
-                        info_list.append(
+                        crop_info_list.append(
                             (
                                 aspect_quality,
                                 min_quality,
@@ -320,7 +320,7 @@ class ThumbnailResource(DirectServeResource):
                             )
                         )
                     else:
-                        info_list2.append(
+                        crop_info_list2.append(
                             (
                                 aspect_quality,
                                 min_quality,
@@ -330,10 +330,10 @@ class ThumbnailResource(DirectServeResource):
                                 info,
                             )
                         )
-            if info_list:
-                return min(info_list)[-1]
+            if crop_info_list:
+                return min(crop_info_list)[-1]
             else:
-                return min(info_list2)[-1]
+                return min(crop_info_list2)[-1]
         else:
             info_list = []
             info_list2 = []
diff --git a/synapse/server.pyi b/synapse/server.pyi
index b5e0b57095..0731403047 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -1,3 +1,5 @@
+import twisted.internet
+
 import synapse.api.auth
 import synapse.config.homeserver
 import synapse.federation.sender
@@ -9,10 +11,12 @@ import synapse.handlers.deactivate_account
 import synapse.handlers.device
 import synapse.handlers.e2e_keys
 import synapse.handlers.message
+import synapse.handlers.presence
 import synapse.handlers.room
 import synapse.handlers.room_member
 import synapse.handlers.set_password
 import synapse.http.client
+import synapse.notifier
 import synapse.rest.media.v1.media_repository
 import synapse.server_notices.server_notices_manager
 import synapse.server_notices.server_notices_sender
@@ -85,3 +89,11 @@ class HomeServer(object):
         self,
     ) -> synapse.server_notices.server_notices_sender.ServerNoticesSender:
         pass
+    def get_notifier(self) -> synapse.notifier.Notifier:
+        pass
+    def get_presence_handler(self) -> synapse.handlers.presence.PresenceHandler:
+        pass
+    def get_clock(self) -> synapse.util.Clock:
+        pass
+    def get_reactor(self) -> twisted.internet.base.ReactorBase:
+        pass
diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py
index 2dac90578c..f7432c8d2f 100644
--- a/synapse/server_notices/server_notices_manager.py
+++ b/synapse/server_notices/server_notices_manager.py
@@ -105,7 +105,7 @@ class ServerNoticesManager(object):
 
         assert self._is_mine_id(user_id), "Cannot send server notices to remote users"
 
-        rooms = yield self._store.get_rooms_for_user_where_membership_is(
+        rooms = yield self._store.get_rooms_for_local_user_where_membership_is(
             user_id, [Membership.INVITE, Membership.JOIN]
         )
         system_mxid = self._config.server_notices_mxid
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 5accc071ab..cacd0c0c2b 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -16,7 +16,7 @@
 
 import logging
 from collections import namedtuple
-from typing import Dict, Iterable, List, Optional, Tuple
+from typing import Dict, Iterable, List, Optional
 
 from six import iteritems, itervalues
 
@@ -33,6 +33,7 @@ from synapse.events.snapshot import EventContext
 from synapse.logging.utils import log_function
 from synapse.state import v1, v2
 from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
+from synapse.types import StateMap
 from synapse.util.async_helpers import Linearizer
 from synapse.util.caches import get_cache_factor_for
 from synapse.util.caches.expiringcache import ExpiringCache
@@ -594,7 +595,7 @@ def _make_state_cache_entry(new_state, state_groups_ids):
 def resolve_events_with_store(
     room_id: str,
     room_version: str,
-    state_sets: List[Dict[Tuple[str, str], str]],
+    state_sets: List[StateMap[str]],
     event_map: Optional[Dict[str, EventBase]],
     state_res_store: "StateResolutionStore",
 ):
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index b2f9865f39..d6c34ce3b7 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -15,7 +15,7 @@
 
 import hashlib
 import logging
-from typing import Callable, Dict, List, Optional, Tuple
+from typing import Callable, Dict, List, Optional
 
 from six import iteritems, iterkeys, itervalues
 
@@ -26,6 +26,7 @@ from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError
 from synapse.api.room_versions import RoomVersions
 from synapse.events import EventBase
+from synapse.types import StateMap
 
 logger = logging.getLogger(__name__)
 
@@ -36,7 +37,7 @@ POWER_KEY = (EventTypes.PowerLevels, "")
 @defer.inlineCallbacks
 def resolve_events_with_store(
     room_id: str,
-    state_sets: List[Dict[Tuple[str, str], str]],
+    state_sets: List[StateMap[str]],
     event_map: Optional[Dict[str, EventBase]],
     state_map_factory: Callable,
 ):
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 72fb8a6317..6216fdd204 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -16,7 +16,7 @@
 import heapq
 import itertools
 import logging
-from typing import Dict, List, Optional, Tuple
+from typing import Dict, List, Optional
 
 from six import iteritems, itervalues
 
@@ -27,6 +27,7 @@ from synapse import event_auth
 from synapse.api.constants import EventTypes
 from synapse.api.errors import AuthError
 from synapse.events import EventBase
+from synapse.types import StateMap
 
 logger = logging.getLogger(__name__)
 
@@ -35,7 +36,7 @@ logger = logging.getLogger(__name__)
 def resolve_events_with_store(
     room_id: str,
     room_version: str,
-    state_sets: List[Dict[Tuple[str, str], str]],
+    state_sets: List[StateMap[str]],
     event_map: Optional[Dict[str, EventBase]],
     state_res_store: "synapse.state.StateResolutionStore",
 ):
@@ -393,12 +394,12 @@ def _iterative_auth_checks(
         room_id (str)
         room_version (str)
         event_ids (list[str]): Ordered list of events to apply auth checks to
-        base_state (dict[tuple[str, str], str]): The set of state to start with
+        base_state (StateMap[str]): The set of state to start with
         event_map (dict[str,FrozenEvent])
         state_res_store (StateResolutionStore)
 
     Returns:
-        Deferred[dict[tuple[str, str], str]]: Returns the final updated state
+        Deferred[StateMap[str]]: Returns the final updated state
     """
     resolved_state = base_state.copy()
 
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 3bb9381663..da3b99f93d 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -17,6 +17,7 @@
 import logging
 import random
 from abc import ABCMeta
+from typing import Any, Optional
 
 from six import PY2
 from six.moves import builtins
@@ -26,7 +27,7 @@ from canonicaljson import json
 from synapse.storage.database import LoggingTransaction  # noqa: F401
 from synapse.storage.database import make_in_list_sql_clause  # noqa: F401
 from synapse.storage.database import Database
-from synapse.types import get_domain_from_id
+from synapse.types import Collection, get_domain_from_id
 
 logger = logging.getLogger(__name__)
 
@@ -63,17 +64,24 @@ class SQLBaseStore(metaclass=ABCMeta):
         self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
         self._attempt_to_invalidate_cache("get_current_state_ids", (room_id,))
 
-    def _attempt_to_invalidate_cache(self, cache_name, key):
+    def _attempt_to_invalidate_cache(
+        self, cache_name: str, key: Optional[Collection[Any]]
+    ):
         """Attempts to invalidate the cache of the given name, ignoring if the
         cache doesn't exist. Mainly used for invalidating caches on workers,
         where they may not have the cache.
 
         Args:
-            cache_name (str)
-            key (tuple)
+            cache_name
+            key: Entry to invalidate. If None then invalidates the entire
+                cache.
         """
+
         try:
-            getattr(self, cache_name).invalidate(key)
+            if key is None:
+                getattr(self, cache_name).invalidate_all()
+            else:
+                getattr(self, cache_name).invalidate(tuple(key))
         except AttributeError:
             # We probably haven't pulled in the cache in this worker,
             # which is fine.
diff --git a/synapse/storage/data_stores/__init__.py b/synapse/storage/data_stores/__init__.py
index 092e803799..e1d03429ca 100644
--- a/synapse/storage/data_stores/__init__.py
+++ b/synapse/storage/data_stores/__init__.py
@@ -47,7 +47,7 @@ class DataStores(object):
             with make_conn(database_config, engine) as db_conn:
                 logger.info("Preparing database %r...", db_name)
 
-                engine.check_database(db_conn.cursor())
+                engine.check_database(db_conn)
                 prepare_database(
                     db_conn, engine, hs.config, data_stores=database_config.data_stores,
                 )
diff --git a/synapse/storage/data_stores/main/cache.py b/synapse/storage/data_stores/main/cache.py
index 54ed8574c4..afa2b41c98 100644
--- a/synapse/storage/data_stores/main/cache.py
+++ b/synapse/storage/data_stores/main/cache.py
@@ -16,12 +16,13 @@
 
 import itertools
 import logging
+from typing import Any, Iterable, Optional
 
 from twisted.internet import defer
 
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.engines import PostgresEngine
-from synapse.util import batch_iter
+from synapse.util.iterutils import batch_iter
 
 logger = logging.getLogger(__name__)
 
@@ -43,6 +44,14 @@ class CacheInvalidationStore(SQLBaseStore):
         txn.call_after(cache_func.invalidate, keys)
         self._send_invalidation_to_replication(txn, cache_func.__name__, keys)
 
+    def _invalidate_all_cache_and_stream(self, txn, cache_func):
+        """Invalidates the entire cache and adds it to the cache stream so slaves
+        will know to invalidate their caches.
+        """
+
+        txn.call_after(cache_func.invalidate_all)
+        self._send_invalidation_to_replication(txn, cache_func.__name__, None)
+
     def _invalidate_state_caches_and_stream(self, txn, room_id, members_changed):
         """Special case invalidation of caches based on current state.
 
@@ -73,17 +82,24 @@ class CacheInvalidationStore(SQLBaseStore):
                 txn, CURRENT_STATE_CACHE_NAME, [room_id]
             )
 
-    def _send_invalidation_to_replication(self, txn, cache_name, keys):
+    def _send_invalidation_to_replication(
+        self, txn, cache_name: str, keys: Optional[Iterable[Any]]
+    ):
         """Notifies replication that given cache has been invalidated.
 
         Note that this does *not* invalidate the cache locally.
 
         Args:
             txn
-            cache_name (str)
-            keys (iterable[str])
+            cache_name
+            keys: Entry to invalidate. If None will invalidate all.
         """
 
+        if cache_name == CURRENT_STATE_CACHE_NAME and keys is None:
+            raise Exception(
+                "Can't stream invalidate all with magic current state cache"
+            )
+
         if isinstance(self.database_engine, PostgresEngine):
             # get_next() returns a context manager which is designed to wrap
             # the transaction. However, we want to only get an ID when we want
@@ -95,13 +111,16 @@ class CacheInvalidationStore(SQLBaseStore):
             txn.call_after(ctx.__exit__, None, None, None)
             txn.call_after(self.hs.get_notifier().on_new_replication_data)
 
+            if keys is not None:
+                keys = list(keys)
+
             self.db.simple_insert_txn(
                 txn,
                 table="cache_invalidation_stream",
                 values={
                     "stream_id": stream_id,
                     "cache_func": cache_name,
-                    "keys": list(keys),
+                    "keys": keys,
                     "invalidation_ts": self.clock.time_msec(),
                 },
             )
diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py
index 9a828231c4..f0a7962dd0 100644
--- a/synapse/storage/data_stores/main/devices.py
+++ b/synapse/storage/data_stores/main/devices.py
@@ -33,13 +33,13 @@ from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
 from synapse.storage.database import Database
 from synapse.types import get_verify_key_from_cross_signing_key
-from synapse.util import batch_iter
 from synapse.util.caches.descriptors import (
     Cache,
     cached,
     cachedInlineCallbacks,
     cachedList,
 )
+from synapse.util.iterutils import batch_iter
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index 58f35d7f56..596daf8909 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -19,6 +19,7 @@ import itertools
 import logging
 from collections import Counter as c_counter, OrderedDict, namedtuple
 from functools import wraps
+from typing import Dict, List, Tuple
 
 from six import iteritems, text_type
 from six.moves import range
@@ -41,11 +42,12 @@ from synapse.storage._base import make_in_list_sql_clause
 from synapse.storage.data_stores.main.event_federation import EventFederationStore
 from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
 from synapse.storage.data_stores.main.state import StateGroupWorkerStore
-from synapse.storage.database import Database
-from synapse.types import RoomStreamToken, get_domain_from_id
-from synapse.util import batch_iter
+from synapse.storage.database import Database, LoggingTransaction
+from synapse.storage.persist_events import DeltaState
+from synapse.types import RoomStreamToken, StateMap, get_domain_from_id
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 from synapse.util.frozenutils import frozendict_json_encoder
+from synapse.util.iterutils import batch_iter
 
 logger = logging.getLogger(__name__)
 
@@ -128,6 +130,7 @@ class EventsStore(
             hs.get_clock().looping_call(_censor_redactions, 5 * 60 * 1000)
 
         self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
+        self.is_mine_id = hs.is_mine_id
 
     @defer.inlineCallbacks
     def _read_forward_extremities(self):
@@ -147,30 +150,26 @@ class EventsStore(
     @defer.inlineCallbacks
     def _persist_events_and_state_updates(
         self,
-        events_and_contexts,
-        current_state_for_room,
-        state_delta_for_room,
-        new_forward_extremeties,
-        backfilled=False,
-        delete_existing=False,
+        events_and_contexts: List[Tuple[EventBase, EventContext]],
+        current_state_for_room: Dict[str, StateMap[str]],
+        state_delta_for_room: Dict[str, DeltaState],
+        new_forward_extremeties: Dict[str, List[str]],
+        backfilled: bool = False,
+        delete_existing: bool = False,
     ):
         """Persist a set of events alongside updates to the current state and
         forward extremities tables.
 
         Args:
-            events_and_contexts (list[(EventBase, EventContext)]):
-            current_state_for_room (dict[str, dict]): Map from room_id to the
-                current state of the room based on forward extremities
-            state_delta_for_room (dict[str, tuple]): Map from room_id to tuple
-                of `(to_delete, to_insert)` where to_delete is a list
-                of type/state keys to remove from current state, and to_insert
-                is a map (type,key)->event_id giving the state delta in each
-                room.
-            new_forward_extremities (dict[str, list[str]]): Map from room_id
-                to list of event IDs that are the new forward extremities of
-                the room.
-            backfilled (bool)
-            delete_existing (bool):
+            events_and_contexts:
+            current_state_for_room: Map from room_id to the current state of
+                the room based on forward extremities
+            state_delta_for_room: Map from room_id to the delta to apply to
+                room state
+            new_forward_extremities: Map from room_id to list of event IDs
+                that are the new forward extremities of the room.
+            backfilled
+            delete_existing
 
         Returns:
             Deferred: resolves when the events have been persisted
@@ -351,12 +350,12 @@ class EventsStore(
     @log_function
     def _persist_events_txn(
         self,
-        txn,
-        events_and_contexts,
-        backfilled,
-        delete_existing=False,
-        state_delta_for_room={},
-        new_forward_extremeties={},
+        txn: LoggingTransaction,
+        events_and_contexts: List[Tuple[EventBase, EventContext]],
+        backfilled: bool,
+        delete_existing: bool = False,
+        state_delta_for_room: Dict[str, DeltaState] = {},
+        new_forward_extremeties: Dict[str, List[str]] = {},
     ):
         """Insert some number of room events into the necessary database tables.
 
@@ -365,21 +364,16 @@ class EventsStore(
         whether the event was rejected.
 
         Args:
-            txn (twisted.enterprise.adbapi.Connection): db connection
-            events_and_contexts (list[(EventBase, EventContext)]):
-                events to persist
-            backfilled (bool): True if the events were backfilled
-            delete_existing (bool): True to purge existing table rows for the
-                events from the database. This is useful when retrying due to
+            txn
+            events_and_contexts: events to persist
+            backfilled: True if the events were backfilled
+            delete_existing True to purge existing table rows for the events
+                from the database. This is useful when retrying due to
                 IntegrityError.
-            state_delta_for_room (dict[str, (list, dict)]):
-                The current-state delta for each room. For each room, a tuple
-                (to_delete, to_insert), being a list of type/state keys to be
-                removed from the current state, and a state set to be added to
-                the current state.
-            new_forward_extremeties (dict[str, list[str]]):
-                The new forward extremities for each room. For each room, a
-                list of the event ids which are the forward extremities.
+            state_delta_for_room: The current-state delta for each room.
+            new_forward_extremetie: The new forward extremities for each room.
+                For each room, a list of the event ids which are the forward
+                extremities.
 
         """
         all_events_and_contexts = events_and_contexts
@@ -464,9 +458,15 @@ class EventsStore(
         # room_memberships, where applicable.
         self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)
 
-    def _update_current_state_txn(self, txn, state_delta_by_room, stream_id):
-        for room_id, current_state_tuple in iteritems(state_delta_by_room):
-            to_delete, to_insert = current_state_tuple
+    def _update_current_state_txn(
+        self,
+        txn: LoggingTransaction,
+        state_delta_by_room: Dict[str, DeltaState],
+        stream_id: int,
+    ):
+        for room_id, delta_state in iteritems(state_delta_by_room):
+            to_delete = delta_state.to_delete
+            to_insert = delta_state.to_insert
 
             # First we add entries to the current_state_delta_stream. We
             # do this before updating the current_state_events table so
@@ -547,6 +547,34 @@ class EventsStore(
                 ],
             )
 
+            # Note: Do we really want to delete rows here (that we do not
+            # subsequently reinsert below)? While technically correct it means
+            # we have no record of the fact the user *was* a member of the
+            # room but got, say, state reset out of it.
+            if to_delete or to_insert:
+                txn.executemany(
+                    "DELETE FROM local_current_membership"
+                    " WHERE room_id = ? AND user_id = ?",
+                    (
+                        (room_id, state_key)
+                        for etype, state_key in itertools.chain(to_delete, to_insert)
+                        if etype == EventTypes.Member and self.is_mine_id(state_key)
+                    ),
+                )
+
+            if to_insert:
+                txn.executemany(
+                    """INSERT INTO local_current_membership
+                        (room_id, user_id, event_id, membership)
+                    VALUES (?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
+                    """,
+                    [
+                        (room_id, key[1], ev_id, ev_id)
+                        for key, ev_id in to_insert.items()
+                        if key[0] == EventTypes.Member and self.is_mine_id(key[1])
+                    ],
+                )
+
             txn.call_after(
                 self._curr_state_delta_stream_cache.entity_has_changed,
                 room_id,
@@ -1724,6 +1752,7 @@ class EventsStore(
             "local_invites",
             "room_account_data",
             "room_tags",
+            "local_current_membership",
         ):
             logger.info("[purge] removing %s from %s", room_id, table)
             txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,))
diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py
index 0cce5232f5..3b93e0597a 100644
--- a/synapse/storage/data_stores/main/events_worker.py
+++ b/synapse/storage/data_stores/main/events_worker.py
@@ -37,8 +37,8 @@ from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
 from synapse.storage.database import Database
 from synapse.types import get_domain_from_id
-from synapse.util import batch_iter
 from synapse.util.caches.descriptors import Cache
+from synapse.util.iterutils import batch_iter
 from synapse.util.metrics import Measure
 
 logger = logging.getLogger(__name__)
diff --git a/synapse/storage/data_stores/main/keys.py b/synapse/storage/data_stores/main/keys.py
index 6b12f5a75f..ba89c68c9f 100644
--- a/synapse/storage/data_stores/main/keys.py
+++ b/synapse/storage/data_stores/main/keys.py
@@ -23,8 +23,8 @@ from signedjson.key import decode_verify_key_bytes
 
 from synapse.storage._base import SQLBaseStore
 from synapse.storage.keys import FetchKeyResult
-from synapse.util import batch_iter
 from synapse.util.caches.descriptors import cached, cachedList
+from synapse.util.iterutils import batch_iter
 
 logger = logging.getLogger(__name__)
 
diff --git a/synapse/storage/data_stores/main/monthly_active_users.py b/synapse/storage/data_stores/main/monthly_active_users.py
index 27158534cb..89a41542a3 100644
--- a/synapse/storage/data_stores/main/monthly_active_users.py
+++ b/synapse/storage/data_stores/main/monthly_active_users.py
@@ -27,12 +27,76 @@ logger = logging.getLogger(__name__)
 LAST_SEEN_GRANULARITY = 60 * 60 * 1000
 
 
-class MonthlyActiveUsersStore(SQLBaseStore):
+class MonthlyActiveUsersWorkerStore(SQLBaseStore):
     def __init__(self, database: Database, db_conn, hs):
-        super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs)
+        super(MonthlyActiveUsersWorkerStore, self).__init__(database, db_conn, hs)
         self._clock = hs.get_clock()
         self.hs = hs
+
+    @cached(num_args=0)
+    def get_monthly_active_count(self):
+        """Generates current count of monthly active users
+
+        Returns:
+            Defered[int]: Number of current monthly active users
+        """
+
+        def _count_users(txn):
+            sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users"
+
+            txn.execute(sql)
+            (count,) = txn.fetchone()
+            return count
+
+        return self.db.runInteraction("count_users", _count_users)
+
+    @defer.inlineCallbacks
+    def get_registered_reserved_users(self):
+        """Of the reserved threepids defined in config, which are associated
+        with registered users?
+
+        Returns:
+            Defered[list]: Real reserved users
+        """
+        users = []
+
+        for tp in self.hs.config.mau_limits_reserved_threepids[
+            : self.hs.config.max_mau_value
+        ]:
+            user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
+                tp["medium"], tp["address"]
+            )
+            if user_id:
+                users.append(user_id)
+
+        return users
+
+    @cached(num_args=1)
+    def user_last_seen_monthly_active(self, user_id):
+        """
+            Checks if a given user is part of the monthly active user group
+            Arguments:
+                user_id (str): user to add/update
+            Return:
+                Deferred[int] : timestamp since last seen, None if never seen
+
+        """
+
+        return self.db.simple_select_one_onecol(
+            table="monthly_active_users",
+            keyvalues={"user_id": user_id},
+            retcol="timestamp",
+            allow_none=True,
+            desc="user_last_seen_monthly_active",
+        )
+
+
+class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
+    def __init__(self, database: Database, db_conn, hs):
+        super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs)
+
         # Do not add more reserved users than the total allowable number
+        # cur = LoggingTransaction(
         self.db.new_transaction(
             db_conn,
             "initialise_mau_threepids",
@@ -146,57 +210,22 @@ class MonthlyActiveUsersStore(SQLBaseStore):
 
                     txn.execute(sql, query_args)
 
+            # It seems poor to invalidate the whole cache, Postgres supports
+            # 'Returning' which would allow me to invalidate only the
+            # specific users, but sqlite has no way to do this and instead
+            # I would need to SELECT and the DELETE which without locking
+            # is racy.
+            # Have resolved to invalidate the whole cache for now and do
+            # something about it if and when the perf becomes significant
+            self._invalidate_all_cache_and_stream(
+                txn, self.user_last_seen_monthly_active
+            )
+            self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ())
+
         reserved_users = yield self.get_registered_reserved_users()
         yield self.db.runInteraction(
             "reap_monthly_active_users", _reap_users, reserved_users
         )
-        # It seems poor to invalidate the whole cache, Postgres supports
-        # 'Returning' which would allow me to invalidate only the
-        # specific users, but sqlite has no way to do this and instead
-        # I would need to SELECT and the DELETE which without locking
-        # is racy.
-        # Have resolved to invalidate the whole cache for now and do
-        # something about it if and when the perf becomes significant
-        self.user_last_seen_monthly_active.invalidate_all()
-        self.get_monthly_active_count.invalidate_all()
-
-    @cached(num_args=0)
-    def get_monthly_active_count(self):
-        """Generates current count of monthly active users
-
-        Returns:
-            Defered[int]: Number of current monthly active users
-        """
-
-        def _count_users(txn):
-            sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users"
-
-            txn.execute(sql)
-            (count,) = txn.fetchone()
-            return count
-
-        return self.db.runInteraction("count_users", _count_users)
-
-    @defer.inlineCallbacks
-    def get_registered_reserved_users(self):
-        """Of the reserved threepids defined in config, which are associated
-        with registered users?
-
-        Returns:
-            Defered[list]: Real reserved users
-        """
-        users = []
-
-        for tp in self.hs.config.mau_limits_reserved_threepids[
-            : self.hs.config.max_mau_value
-        ]:
-            user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
-                tp["medium"], tp["address"]
-            )
-            if user_id:
-                users.append(user_id)
-
-        return users
 
     @defer.inlineCallbacks
     def upsert_monthly_active_user(self, user_id):
@@ -222,23 +251,9 @@ class MonthlyActiveUsersStore(SQLBaseStore):
             "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id
         )
 
-        user_in_mau = self.user_last_seen_monthly_active.cache.get(
-            (user_id,), None, update_metrics=False
-        )
-        if user_in_mau is None:
-            self.get_monthly_active_count.invalidate(())
-
-        self.user_last_seen_monthly_active.invalidate((user_id,))
-
     def upsert_monthly_active_user_txn(self, txn, user_id):
         """Updates or inserts monthly active user member
 
-        Note that, after calling this method, it will generally be necessary
-        to invalidate the caches on user_last_seen_monthly_active and
-        get_monthly_active_count. We can't do that here, because we are running
-        in a database thread rather than the main thread, and we can't call
-        txn.call_after because txn may not be a LoggingTransaction.
-
         We consciously do not call is_support_txn from this method because it
         is not possible to cache the response. is_support_txn will be false in
         almost all cases, so it seems reasonable to call it only for
@@ -269,27 +284,13 @@ class MonthlyActiveUsersStore(SQLBaseStore):
             values={"timestamp": int(self._clock.time_msec())},
         )
 
-        return is_insert
-
-    @cached(num_args=1)
-    def user_last_seen_monthly_active(self, user_id):
-        """
-            Checks if a given user is part of the monthly active user group
-            Arguments:
-                user_id (str): user to add/update
-            Return:
-                Deferred[int] : timestamp since last seen, None if never seen
-
-        """
-
-        return self.db.simple_select_one_onecol(
-            table="monthly_active_users",
-            keyvalues={"user_id": user_id},
-            retcol="timestamp",
-            allow_none=True,
-            desc="user_last_seen_monthly_active",
+        self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ())
+        self._invalidate_cache_and_stream(
+            txn, self.user_last_seen_monthly_active, (user_id,)
         )
 
+        return is_insert
+
     @defer.inlineCallbacks
     def populate_monthly_active_users(self, user_id):
         """Checks on the state of monthly active user limits and optionally
diff --git a/synapse/storage/data_stores/main/presence.py b/synapse/storage/data_stores/main/presence.py
index a2c83e0867..604c8b7ddd 100644
--- a/synapse/storage/data_stores/main/presence.py
+++ b/synapse/storage/data_stores/main/presence.py
@@ -17,8 +17,8 @@ from twisted.internet import defer
 
 from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
 from synapse.storage.presence import UserPresenceState
-from synapse.util import batch_iter
 from synapse.util.caches.descriptors import cached, cachedList
+from synapse.util.iterutils import batch_iter
 
 
 class PresenceStore(SQLBaseStore):
diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py
index cb4b2b39a0..49306642ed 100644
--- a/synapse/storage/data_stores/main/registration.py
+++ b/synapse/storage/data_stores/main/registration.py
@@ -291,7 +291,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             desc="is_server_admin",
         )
 
-        return res if res else False
+        return bool(res) if res else False
 
     def set_server_admin(self, user, admin):
         """Sets whether a user is an admin of this homeserver.
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index 8636d75030..d968803ad2 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -18,7 +18,8 @@ import collections
 import logging
 import re
 from abc import abstractmethod
-from typing import Optional, Tuple
+from enum import Enum
+from typing import Any, Dict, List, Optional, Tuple
 
 from six import integer_types
 
@@ -46,6 +47,18 @@ RatelimitOverride = collections.namedtuple(
 )
 
 
+class RoomSortOrder(Enum):
+    """
+    Enum to define the sorting method used when returning rooms with get_rooms_paginate
+
+    ALPHABETICAL = sort rooms alphabetically by name
+    SIZE = sort rooms by membership size, highest to lowest
+    """
+
+    ALPHABETICAL = "alphabetical"
+    SIZE = "size"
+
+
 class RoomWorkerStore(SQLBaseStore):
     def __init__(self, database: Database, db_conn, hs):
         super(RoomWorkerStore, self).__init__(database, db_conn, hs)
@@ -281,6 +294,116 @@ class RoomWorkerStore(SQLBaseStore):
             desc="is_room_blocked",
         )
 
+    async def get_rooms_paginate(
+        self,
+        start: int,
+        limit: int,
+        order_by: RoomSortOrder,
+        reverse_order: bool,
+        search_term: Optional[str],
+    ) -> Tuple[List[Dict[str, Any]], int]:
+        """Function to retrieve a paginated list of rooms as json.
+
+        Args:
+            start: offset in the list
+            limit: maximum amount of rooms to retrieve
+            order_by: the sort order of the returned list
+            reverse_order: whether to reverse the room list
+            search_term: a string to filter room names by
+        Returns:
+            A list of room dicts and an integer representing the total number of
+            rooms that exist given this query
+        """
+        # Filter room names by a string
+        where_statement = ""
+        if search_term:
+            where_statement = "WHERE state.name LIKE ?"
+
+            # Our postgres db driver converts ? -> %s in SQL strings as that's the
+            # placeholder for postgres.
+            # HOWEVER, if you put a % into your SQL then everything goes wibbly.
+            # To get around this, we're going to surround search_term with %'s
+            # before giving it to the database in python instead
+            search_term = "%" + search_term + "%"
+
+        # Set ordering
+        if RoomSortOrder(order_by) == RoomSortOrder.SIZE:
+            order_by_column = "curr.joined_members"
+            order_by_asc = False
+        elif RoomSortOrder(order_by) == RoomSortOrder.ALPHABETICAL:
+            # Sort alphabetically
+            order_by_column = "state.name"
+            order_by_asc = True
+        else:
+            raise StoreError(
+                500, "Incorrect value for order_by provided: %s" % order_by
+            )
+
+        # Whether to return the list in reverse order
+        if reverse_order:
+            # Flip the boolean
+            order_by_asc = not order_by_asc
+
+        # Create one query for getting the limited number of events that the user asked
+        # for, and another query for getting the total number of events that could be
+        # returned. Thus allowing us to see if there are more events to paginate through
+        info_sql = """
+            SELECT state.room_id, state.name, state.canonical_alias, curr.joined_members
+            FROM room_stats_state state
+            INNER JOIN room_stats_current curr USING (room_id)
+            %s
+            ORDER BY %s %s
+            LIMIT ?
+            OFFSET ?
+        """ % (
+            where_statement,
+            order_by_column,
+            "ASC" if order_by_asc else "DESC",
+        )
+
+        # Use a nested SELECT statement as SQL can't count(*) with an OFFSET
+        count_sql = """
+            SELECT count(*) FROM (
+              SELECT room_id FROM room_stats_state state
+              %s
+            ) AS get_room_ids
+        """ % (
+            where_statement,
+        )
+
+        def _get_rooms_paginate_txn(txn):
+            # Execute the data query
+            sql_values = (limit, start)
+            if search_term:
+                # Add the search term into the WHERE clause
+                sql_values = (search_term,) + sql_values
+            txn.execute(info_sql, sql_values)
+
+            # Refactor room query data into a structured dictionary
+            rooms = []
+            for room in txn:
+                rooms.append(
+                    {
+                        "room_id": room[0],
+                        "name": room[1],
+                        "canonical_alias": room[2],
+                        "joined_members": room[3],
+                    }
+                )
+
+            # Execute the count query
+
+            # Add the search term into the WHERE clause if present
+            sql_values = (search_term,) if search_term else ()
+            txn.execute(count_sql, sql_values)
+
+            room_count = txn.fetchone()
+            return rooms, room_count[0]
+
+        return await self.db.runInteraction(
+            "get_rooms_paginate", _get_rooms_paginate_txn,
+        )
+
     @cachedInlineCallbacks(max_entries=10000)
     def get_ratelimit_for_user(self, user_id):
         """Check if there are any overrides for ratelimiting for the given
@@ -399,6 +522,8 @@ class RoomWorkerStore(SQLBaseStore):
         the associated media
         """
 
+        logger.info("Quarantining media in room: %s", room_id)
+
         def _quarantine_media_in_room_txn(txn):
             local_mxcs, remote_mxcs = self._get_media_mxcs_in_room_txn(txn, room_id)
             total_media_quarantined = 0
@@ -494,6 +619,118 @@ class RoomWorkerStore(SQLBaseStore):
 
         return local_media_mxcs, remote_media_mxcs
 
+    def quarantine_media_by_id(
+        self, server_name: str, media_id: str, quarantined_by: str,
+    ):
+        """quarantines a single local or remote media id
+
+        Args:
+            server_name: The name of the server that holds this media
+            media_id: The ID of the media to be quarantined
+            quarantined_by: The user ID that initiated the quarantine request
+        """
+        logger.info("Quarantining media: %s/%s", server_name, media_id)
+        is_local = server_name == self.config.server_name
+
+        def _quarantine_media_by_id_txn(txn):
+            local_mxcs = [media_id] if is_local else []
+            remote_mxcs = [(server_name, media_id)] if not is_local else []
+
+            return self._quarantine_media_txn(
+                txn, local_mxcs, remote_mxcs, quarantined_by
+            )
+
+        return self.db.runInteraction(
+            "quarantine_media_by_user", _quarantine_media_by_id_txn
+        )
+
+    def quarantine_media_ids_by_user(self, user_id: str, quarantined_by: str):
+        """quarantines all local media associated with a single user
+
+        Args:
+            user_id: The ID of the user to quarantine media of
+            quarantined_by: The ID of the user who made the quarantine request
+        """
+
+        def _quarantine_media_by_user_txn(txn):
+            local_media_ids = self._get_media_ids_by_user_txn(txn, user_id)
+            return self._quarantine_media_txn(txn, local_media_ids, [], quarantined_by)
+
+        return self.db.runInteraction(
+            "quarantine_media_by_user", _quarantine_media_by_user_txn
+        )
+
+    def _get_media_ids_by_user_txn(self, txn, user_id: str, filter_quarantined=True):
+        """Retrieves local media IDs by a given user
+
+        Args:
+            txn (cursor)
+            user_id: The ID of the user to retrieve media IDs of
+
+        Returns:
+            The local and remote media as a lists of tuples where the key is
+            the hostname and the value is the media ID.
+        """
+        # Local media
+        sql = """
+            SELECT media_id
+            FROM local_media_repository
+            WHERE user_id = ?
+            """
+        if filter_quarantined:
+            sql += "AND quarantined_by IS NULL"
+        txn.execute(sql, (user_id,))
+
+        local_media_ids = [row[0] for row in txn]
+
+        # TODO: Figure out all remote media a user has referenced in a message
+
+        return local_media_ids
+
+    def _quarantine_media_txn(
+        self,
+        txn,
+        local_mxcs: List[str],
+        remote_mxcs: List[Tuple[str, str]],
+        quarantined_by: str,
+    ) -> int:
+        """Quarantine local and remote media items
+
+        Args:
+            txn (cursor)
+            local_mxcs: A list of local mxc URLs
+            remote_mxcs: A list of (remote server, media id) tuples representing
+                remote mxc URLs
+            quarantined_by: The ID of the user who initiated the quarantine request
+        Returns:
+            The total number of media items quarantined
+        """
+        total_media_quarantined = 0
+
+        # Update all the tables to set the quarantined_by flag
+        txn.executemany(
+            """
+            UPDATE local_media_repository
+            SET quarantined_by = ?
+            WHERE media_id = ?
+        """,
+            ((quarantined_by, media_id) for media_id in local_mxcs),
+        )
+
+        txn.executemany(
+            """
+                UPDATE remote_media_cache
+                SET quarantined_by = ?
+                WHERE media_origin = ? AND media_id = ?
+            """,
+            ((quarantined_by, origin, media_id) for origin, media_id in remote_mxcs),
+        )
+
+        total_media_quarantined += len(local_mxcs)
+        total_media_quarantined += len(remote_mxcs)
+
+        return total_media_quarantined
+
 
 class RoomBackgroundUpdateStore(SQLBaseStore):
     REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py
index 70ff5751b6..9acef7c950 100644
--- a/synapse/storage/data_stores/main/roommember.py
+++ b/synapse/storage/data_stores/main/roommember.py
@@ -297,19 +297,22 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         return {row[0]: row[1] for row in txn}
 
     @cached()
-    def get_invited_rooms_for_user(self, user_id):
-        """ Get all the rooms the user is invited to
+    def get_invited_rooms_for_local_user(self, user_id):
+        """ Get all the rooms the *local* user is invited to
+
         Args:
             user_id (str): The user ID.
         Returns:
             A deferred list of RoomsForUser.
         """
 
-        return self.get_rooms_for_user_where_membership_is(user_id, [Membership.INVITE])
+        return self.get_rooms_for_local_user_where_membership_is(
+            user_id, [Membership.INVITE]
+        )
 
     @defer.inlineCallbacks
-    def get_invite_for_user_in_room(self, user_id, room_id):
-        """Gets the invite for the given user and room
+    def get_invite_for_local_user_in_room(self, user_id, room_id):
+        """Gets the invite for the given *local* user and room
 
         Args:
             user_id (str)
@@ -319,15 +322,15 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             Deferred: Resolves to either a RoomsForUser or None if no invite was
                 found.
         """
-        invites = yield self.get_invited_rooms_for_user(user_id)
+        invites = yield self.get_invited_rooms_for_local_user(user_id)
         for invite in invites:
             if invite.room_id == room_id:
                 return invite
         return None
 
     @defer.inlineCallbacks
-    def get_rooms_for_user_where_membership_is(self, user_id, membership_list):
-        """ Get all the rooms for this user where the membership for this user
+    def get_rooms_for_local_user_where_membership_is(self, user_id, membership_list):
+        """ Get all the rooms for this *local* user where the membership for this user
         matches one in the membership list.
 
         Filters out forgotten rooms.
@@ -344,8 +347,8 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             return defer.succeed(None)
 
         rooms = yield self.db.runInteraction(
-            "get_rooms_for_user_where_membership_is",
-            self._get_rooms_for_user_where_membership_is_txn,
+            "get_rooms_for_local_user_where_membership_is",
+            self._get_rooms_for_local_user_where_membership_is_txn,
             user_id,
             membership_list,
         )
@@ -354,76 +357,42 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         forgotten_rooms = yield self.get_forgotten_rooms_for_user(user_id)
         return [room for room in rooms if room.room_id not in forgotten_rooms]
 
-    def _get_rooms_for_user_where_membership_is_txn(
+    def _get_rooms_for_local_user_where_membership_is_txn(
         self, txn, user_id, membership_list
     ):
+        # Paranoia check.
+        if not self.hs.is_mine_id(user_id):
+            raise Exception(
+                "Cannot call 'get_rooms_for_local_user_where_membership_is' on non-local user %r"
+                % (user_id,),
+            )
 
-        do_invite = Membership.INVITE in membership_list
-        membership_list = [m for m in membership_list if m != Membership.INVITE]
-
-        results = []
-        if membership_list:
-            if self._current_state_events_membership_up_to_date:
-                clause, args = make_in_list_sql_clause(
-                    self.database_engine, "c.membership", membership_list
-                )
-                sql = """
-                    SELECT room_id, e.sender, c.membership, event_id, e.stream_ordering
-                    FROM current_state_events AS c
-                    INNER JOIN events AS e USING (room_id, event_id)
-                    WHERE
-                        c.type = 'm.room.member'
-                        AND state_key = ?
-                        AND %s
-                """ % (
-                    clause,
-                )
-            else:
-                clause, args = make_in_list_sql_clause(
-                    self.database_engine, "m.membership", membership_list
-                )
-                sql = """
-                    SELECT room_id, e.sender, m.membership, event_id, e.stream_ordering
-                    FROM current_state_events AS c
-                    INNER JOIN room_memberships AS m USING (room_id, event_id)
-                    INNER JOIN events AS e USING (room_id, event_id)
-                    WHERE
-                        c.type = 'm.room.member'
-                        AND state_key = ?
-                        AND %s
-                """ % (
-                    clause,
-                )
-
-            txn.execute(sql, (user_id, *args))
-            results = [RoomsForUser(**r) for r in self.db.cursor_to_dict(txn)]
+        clause, args = make_in_list_sql_clause(
+            self.database_engine, "c.membership", membership_list
+        )
 
-        if do_invite:
-            sql = (
-                "SELECT i.room_id, inviter, i.event_id, e.stream_ordering"
-                " FROM local_invites as i"
-                " INNER JOIN events as e USING (event_id)"
-                " WHERE invitee = ? AND locally_rejected is NULL"
-                " AND replaced_by is NULL"
-            )
+        sql = """
+            SELECT room_id, e.sender, c.membership, event_id, e.stream_ordering
+            FROM local_current_membership AS c
+            INNER JOIN events AS e USING (room_id, event_id)
+            WHERE
+                user_id = ?
+                AND %s
+        """ % (
+            clause,
+        )
 
-            txn.execute(sql, (user_id,))
-            results.extend(
-                RoomsForUser(
-                    room_id=r["room_id"],
-                    sender=r["inviter"],
-                    event_id=r["event_id"],
-                    stream_ordering=r["stream_ordering"],
-                    membership=Membership.INVITE,
-                )
-                for r in self.db.cursor_to_dict(txn)
-            )
+        txn.execute(sql, (user_id, *args))
+        results = [RoomsForUser(**r) for r in self.db.cursor_to_dict(txn)]
 
         return results
 
-    @cachedInlineCallbacks(max_entries=500000, iterable=True)
+    @cached(max_entries=500000, iterable=True)
     def get_rooms_for_user_with_stream_ordering(self, user_id):
-        """Returns a set of room_ids the user is currently joined to
+        """Returns a set of room_ids the user is currently joined to.
+
+        If a remote user only returns rooms this server is currently
+        participating in.
 
         Args:
             user_id (str)
@@ -433,17 +402,49 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             the rooms the user is in currently, along with the stream ordering
             of the most recent join for that user and room.
         """
-        rooms = yield self.get_rooms_for_user_where_membership_is(
-            user_id, membership_list=[Membership.JOIN]
-        )
-        return frozenset(
-            GetRoomsForUserWithStreamOrdering(r.room_id, r.stream_ordering)
-            for r in rooms
+        return self.db.runInteraction(
+            "get_rooms_for_user_with_stream_ordering",
+            self._get_rooms_for_user_with_stream_ordering_txn,
+            user_id,
         )
 
+    def _get_rooms_for_user_with_stream_ordering_txn(self, txn, user_id):
+        # We use `current_state_events` here and not `local_current_membership`
+        # as a) this gets called with remote users and b) this only gets called
+        # for rooms the server is participating in.
+        if self._current_state_events_membership_up_to_date:
+            sql = """
+                SELECT room_id, e.stream_ordering
+                FROM current_state_events AS c
+                INNER JOIN events AS e USING (room_id, event_id)
+                WHERE
+                    c.type = 'm.room.member'
+                    AND state_key = ?
+                    AND c.membership = ?
+            """
+        else:
+            sql = """
+                SELECT room_id, e.stream_ordering
+                FROM current_state_events AS c
+                INNER JOIN room_memberships AS m USING (room_id, event_id)
+                INNER JOIN events AS e USING (room_id, event_id)
+                WHERE
+                    c.type = 'm.room.member'
+                    AND state_key = ?
+                    AND m.membership = ?
+            """
+
+        txn.execute(sql, (user_id, Membership.JOIN))
+        results = frozenset(GetRoomsForUserWithStreamOrdering(*row) for row in txn)
+
+        return results
+
     @defer.inlineCallbacks
     def get_rooms_for_user(self, user_id, on_invalidate=None):
-        """Returns a set of room_ids the user is currently joined to
+        """Returns a set of room_ids the user is currently joined to.
+
+        If a remote user only returns rooms this server is currently
+        participating in.
         """
         rooms = yield self.get_rooms_for_user_with_stream_ordering(
             user_id, on_invalidate=on_invalidate
@@ -1022,7 +1023,7 @@ class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore):
                 event.internal_metadata.stream_ordering,
             )
             txn.call_after(
-                self.get_invited_rooms_for_user.invalidate, (event.state_key,)
+                self.get_invited_rooms_for_local_user.invalidate, (event.state_key,)
             )
 
             # We update the local_invites table only if the event is "current",
@@ -1064,6 +1065,27 @@ class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore):
                         ),
                     )
 
+                # We also update the `local_current_membership` table with
+                # latest invite info. This will usually get updated by the
+                # `current_state_events` handling, unless its an outlier.
+                if event.internal_metadata.is_outlier():
+                    # This should only happen for out of band memberships, so
+                    # we add a paranoia check.
+                    assert event.internal_metadata.is_out_of_band_membership()
+
+                    self.db.simple_upsert_txn(
+                        txn,
+                        table="local_current_membership",
+                        keyvalues={
+                            "room_id": event.room_id,
+                            "user_id": event.state_key,
+                        },
+                        values={
+                            "event_id": event.event_id,
+                            "membership": event.membership,
+                        },
+                    )
+
     @defer.inlineCallbacks
     def locally_reject_invite(self, user_id, room_id):
         sql = (
@@ -1075,6 +1097,15 @@ class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore):
         def f(txn, stream_ordering):
             txn.execute(sql, (stream_ordering, True, room_id, user_id))
 
+            # We also clear this entry from `local_current_membership`.
+            # Ideally we'd point to a leave event, but we don't have one, so
+            # nevermind.
+            self.db.simple_delete_txn(
+                txn,
+                table="local_current_membership",
+                keyvalues={"room_id": room_id, "user_id": user_id},
+            )
+
         with self._stream_id_gen.get_next() as stream_ordering:
             yield self.db.runInteraction("locally_reject_invite", f, stream_ordering)
 
diff --git a/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py b/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py
new file mode 100644
index 0000000000..63b5acdcf7
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# We create a new table called `local_current_membership` that stores the latest
+# membership state of local users in rooms, which helps track leaves/bans/etc
+# even if the server has left the room (and so has deleted the room from
+# `current_state_events`). This will also include outstanding invites for local
+# users for rooms the server isn't in.
+#
+# If the server isn't and hasn't been in the room then it will only include
+# outsstanding invites, and not e.g. pre-emptive bans of local users.
+#
+# If the server later rejoins a room `local_current_membership` can simply be
+# replaced with the new current state of the room (which results in the
+# equivalent behaviour as if the server had remained in the room).
+
+
+def run_upgrade(cur, database_engine, config, *args, **kwargs):
+    # We need to do the insert in `run_upgrade` section as we don't have access
+    # to `config` in `run_create`.
+
+    # This upgrade may take a bit of time for large servers (e.g. one minute for
+    # matrix.org) but means we avoid a lots of book keeping required to do it as
+    # a background update.
+
+    # We check if the `current_state_events.membership` is up to date by
+    # checking if the relevant background update has finished. If it has
+    # finished we can avoid doing a join against `room_memberships`, which
+    # speesd things up.
+    cur.execute(
+        """SELECT 1 FROM background_updates
+            WHERE update_name = 'current_state_events_membership'
+        """
+    )
+    current_state_membership_up_to_date = not bool(cur.fetchone())
+
+    # Cheekily drop and recreate indices, as that is faster.
+    cur.execute("DROP INDEX local_current_membership_idx")
+    cur.execute("DROP INDEX local_current_membership_room_idx")
+
+    if current_state_membership_up_to_date:
+        sql = """
+            INSERT INTO local_current_membership (room_id, user_id, event_id, membership)
+                SELECT c.room_id, state_key AS user_id, event_id, c.membership
+                FROM current_state_events AS c
+                WHERE type = 'm.room.member' AND c.membership IS NOT NULL AND state_key LIKE ?
+        """
+    else:
+        # We can't rely on the membership column, so we need to join against
+        # `room_memberships`.
+        sql = """
+            INSERT INTO local_current_membership (room_id, user_id, event_id, membership)
+                SELECT c.room_id, state_key AS user_id, event_id, r.membership
+                FROM current_state_events AS c
+                INNER JOIN room_memberships AS r USING (event_id)
+                WHERE type = 'm.room.member' AND state_key LIKE ?
+        """
+    sql = database_engine.convert_param_style(sql)
+    cur.execute(sql, ("%:" + config.server_name,))
+
+    cur.execute(
+        "CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership(user_id, room_id)"
+    )
+    cur.execute(
+        "CREATE INDEX local_current_membership_room_idx ON local_current_membership(room_id)"
+    )
+
+
+def run_create(cur, database_engine, *args, **kwargs):
+    cur.execute(
+        """
+        CREATE TABLE local_current_membership (
+            room_id TEXT NOT NULL,
+            user_id TEXT NOT NULL,
+            event_id TEXT NOT NULL,
+            membership TEXT NOT NULL
+        )"""
+    )
+
+    cur.execute(
+        "CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership(user_id, room_id)"
+    )
+    cur.execute(
+        "CREATE INDEX local_current_membership_room_idx ON local_current_membership(room_id)"
+    )
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index d07440e3ed..33bebd1c48 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -165,19 +165,20 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         )
 
     # FIXME: how should this be cached?
-    def get_filtered_current_state_ids(self, room_id, state_filter=StateFilter.all()):
+    def get_filtered_current_state_ids(
+        self, room_id: str, state_filter: StateFilter = StateFilter.all()
+    ):
         """Get the current state event of a given type for a room based on the
         current_state_events table.  This may not be as up-to-date as the result
         of doing a fresh state resolution as per state_handler.get_current_state
 
         Args:
-            room_id (str)
-            state_filter (StateFilter): The state filter used to fetch state
+            room_id
+            state_filter: The state filter used to fetch state
                 from the database.
 
         Returns:
-            Deferred[dict[tuple[str, str], str]]: Map from type/state_key to
-            event ID.
+            defer.Deferred[StateMap[str]]: Map from type/state_key to event ID.
         """
 
         where_clause, where_args = state_filter.make_sql_filter_clause()
diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py
index 140da8dad6..056b25b13a 100644
--- a/synapse/storage/data_stores/main/stream.py
+++ b/synapse/storage/data_stores/main/stream.py
@@ -525,8 +525,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         return rows, token
 
-    def get_room_event_after_stream_ordering(self, room_id, stream_ordering):
-        """Gets details of the first event in a room at or after a stream ordering
+    def get_room_event_before_stream_ordering(self, room_id, stream_ordering):
+        """Gets details of the first event in a room at or before a stream ordering
 
         Args:
             room_id (str):
@@ -541,15 +541,15 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             sql = (
                 "SELECT stream_ordering, topological_ordering, event_id"
                 " FROM events"
-                " WHERE room_id = ? AND stream_ordering >= ?"
+                " WHERE room_id = ? AND stream_ordering <= ?"
                 " AND NOT outlier"
-                " ORDER BY stream_ordering"
+                " ORDER BY stream_ordering DESC"
                 " LIMIT 1"
             )
             txn.execute(sql, (room_id, stream_ordering))
             return txn.fetchone()
 
-        return self.db.runInteraction("get_room_event_after_stream_ordering", _f)
+        return self.db.runInteraction("get_room_event_before_stream_ordering", _f)
 
     @defer.inlineCallbacks
     def get_room_events_max_id(self, room_id=None):
diff --git a/synapse/storage/data_stores/state/store.py b/synapse/storage/data_stores/state/store.py
index d53695f238..c4ee9b7ccb 100644
--- a/synapse/storage/data_stores/state/store.py
+++ b/synapse/storage/data_stores/state/store.py
@@ -15,6 +15,7 @@
 
 import logging
 from collections import namedtuple
+from typing import Dict, Iterable, List, Set, Tuple
 
 from six import iteritems
 from six.moves import range
@@ -26,6 +27,7 @@ from synapse.storage._base import SQLBaseStore
 from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
 from synapse.storage.database import Database
 from synapse.storage.state import StateFilter
+from synapse.types import StateMap
 from synapse.util.caches import get_cache_factor_for
 from synapse.util.caches.descriptors import cached
 from synapse.util.caches.dictionary_cache import DictionaryCache
@@ -133,17 +135,18 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
         )
 
     @defer.inlineCallbacks
-    def _get_state_groups_from_groups(self, groups, state_filter):
-        """Returns the state groups for a given set of groups, filtering on
-        types of state events.
+    def _get_state_groups_from_groups(
+        self, groups: List[int], state_filter: StateFilter
+    ):
+        """Returns the state groups for a given set of groups from the
+        database, filtering on types of state events.
 
         Args:
-            groups(list[int]): list of state group IDs to query
-            state_filter (StateFilter): The state filter used to fetch state
+            groups: list of state group IDs to query
+            state_filter: The state filter used to fetch state
                 from the database.
         Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
+            Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
         """
         results = {}
 
@@ -199,18 +202,19 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
         return state_filter.filter_state(state_dict_ids), not missing_types
 
     @defer.inlineCallbacks
-    def _get_state_for_groups(self, groups, state_filter=StateFilter.all()):
+    def _get_state_for_groups(
+        self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all()
+    ):
         """Gets the state at each of a list of state groups, optionally
         filtering by type/state_key
 
         Args:
-            groups (iterable[int]): list of state groups for which we want
+            groups: list of state groups for which we want
                 to get the state.
-            state_filter (StateFilter): The state filter used to fetch state
+            state_filter: The state filter used to fetch state
                 from the database.
         Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
+            Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
         """
 
         member_filter, non_member_filter = state_filter.get_member_split()
@@ -268,24 +272,24 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
 
         return state
 
-    def _get_state_for_groups_using_cache(self, groups, cache, state_filter):
+    def _get_state_for_groups_using_cache(
+        self, groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter
+    ) -> Tuple[Dict[int, StateMap[str]], Set[int]]:
         """Gets the state at each of a list of state groups, optionally
         filtering by type/state_key, querying from a specific cache.
 
         Args:
-            groups (iterable[int]): list of state groups for which we want
-                to get the state.
-            cache (DictionaryCache): the cache of group ids to state dicts which
-                we will pass through - either the normal state cache or the specific
-                members state cache.
-            state_filter (StateFilter): The state filter used to fetch state
-                from the database.
+            groups: list of state groups for which we want to get the state.
+            cache: the cache of group ids to state dicts which
+                we will pass through - either the normal state cache or the
+                specific members state cache.
+            state_filter: The state filter used to fetch state from the
+                database.
 
         Returns:
-            tuple[dict[int, dict[tuple[str, str], str]], set[int]]: Tuple of
-            dict of state_group_id -> (dict of (type, state_key) -> event id)
-            of entries in the cache, and the state group ids either missing
-            from the cache or incomplete.
+            Tuple of dict of state_group_id to state map of entries in the
+            cache, and the state group ids either missing from the cache or
+            incomplete.
         """
         results = {}
         incomplete_groups = set()
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index b7c4eda338..c84cb452b0 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -32,20 +32,7 @@ class PostgresEngine(object):
         self.synchronous_commit = database_config.get("synchronous_commit", True)
         self._version = None  # unknown as yet
 
-    def check_database(self, txn):
-        txn.execute("SHOW SERVER_ENCODING")
-        rows = txn.fetchall()
-        if rows and rows[0][0] != "UTF8":
-            raise IncorrectDatabaseSetup(
-                "Database has incorrect encoding: '%s' instead of 'UTF8'\n"
-                "See docs/postgres.rst for more information." % (rows[0][0],)
-            )
-
-    def convert_param_style(self, sql):
-        return sql.replace("?", "%s")
-
-    def on_new_connection(self, db_conn):
-
+    def check_database(self, db_conn, allow_outdated_version: bool = False):
         # Get the version of PostgreSQL that we're using. As per the psycopg2
         # docs: The number is formed by converting the major, minor, and
         # revision numbers into two-decimal-digit numbers and appending them
@@ -53,9 +40,22 @@ class PostgresEngine(object):
         self._version = db_conn.server_version
 
         # Are we on a supported PostgreSQL version?
-        if self._version < 90500:
+        if not allow_outdated_version and self._version < 90500:
             raise RuntimeError("Synapse requires PostgreSQL 9.5+ or above.")
 
+        with db_conn.cursor() as txn:
+            txn.execute("SHOW SERVER_ENCODING")
+            rows = txn.fetchall()
+            if rows and rows[0][0] != "UTF8":
+                raise IncorrectDatabaseSetup(
+                    "Database has incorrect encoding: '%s' instead of 'UTF8'\n"
+                    "See docs/postgres.rst for more information." % (rows[0][0],)
+                )
+
+    def convert_param_style(self, sql):
+        return sql.replace("?", "%s")
+
+    def on_new_connection(self, db_conn):
         db_conn.set_isolation_level(
             self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
         )
@@ -119,8 +119,8 @@ class PostgresEngine(object):
         Returns:
             string
         """
-        # note that this is a bit of a hack because it relies on on_new_connection
-        # having been called at least once. Still, that should be a safe bet here.
+        # note that this is a bit of a hack because it relies on check_database
+        # having been called. Still, that should be a safe bet here.
         numver = self._version
         assert numver is not None
 
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index df039a072d..cbf52f5191 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -53,8 +53,11 @@ class Sqlite3Engine(object):
         """
         return False
 
-    def check_database(self, txn):
-        pass
+    def check_database(self, db_conn, allow_outdated_version: bool = False):
+        if not allow_outdated_version:
+            version = self.module.sqlite_version_info
+            if version < (3, 11, 0):
+                raise RuntimeError("Synapse requires sqlite 3.11 or above.")
 
     def convert_param_style(self, sql):
         return sql
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index 1ed44925fc..368c457321 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -17,19 +17,24 @@
 
 import logging
 from collections import deque, namedtuple
+from typing import Iterable, List, Optional, Tuple
 
 from six import iteritems
 from six.moves import range
 
+import attr
 from prometheus_client import Counter, Histogram
 
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
+from synapse.events import FrozenEvent
+from synapse.events.snapshot import EventContext
 from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.state import StateResolutionStore
 from synapse.storage.data_stores import DataStores
+from synapse.types import StateMap
 from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.metrics import Measure
 
@@ -67,6 +72,19 @@ stale_forward_extremities_counter = Histogram(
 )
 
 
+@attr.s(slots=True, frozen=True)
+class DeltaState:
+    """Deltas to use to update the `current_state_events` table.
+
+    Attributes:
+        to_delete: List of type/state_keys to delete from current state
+        to_insert: Map of state to upsert into current state
+    """
+
+    to_delete = attr.ib(type=List[Tuple[str, str]])
+    to_insert = attr.ib(type=StateMap[str])
+
+
 class _EventPeristenceQueue(object):
     """Queues up events so that they can be persisted in bulk with only one
     concurrent transaction per room.
@@ -138,13 +156,12 @@ class _EventPeristenceQueue(object):
 
         self._currently_persisting_rooms.add(room_id)
 
-        @defer.inlineCallbacks
-        def handle_queue_loop():
+        async def handle_queue_loop():
             try:
                 queue = self._get_drainining_queue(room_id)
                 for item in queue:
                     try:
-                        ret = yield per_item_callback(item)
+                        ret = await per_item_callback(item)
                     except Exception:
                         with PreserveLoggingContext():
                             item.deferred.errback()
@@ -191,12 +208,16 @@ class EventsPersistenceStorage(object):
         self._state_resolution_handler = hs.get_state_resolution_handler()
 
     @defer.inlineCallbacks
-    def persist_events(self, events_and_contexts, backfilled=False):
+    def persist_events(
+        self,
+        events_and_contexts: List[Tuple[FrozenEvent, EventContext]],
+        backfilled: bool = False,
+    ):
         """
         Write events to the database
         Args:
             events_and_contexts: list of tuples of (event, context)
-            backfilled (bool): Whether the results are retrieved from federation
+            backfilled: Whether the results are retrieved from federation
                 via backfill or not. Used to determine if they're "new" events
                 which might update the current state etc.
 
@@ -226,16 +247,12 @@ class EventsPersistenceStorage(object):
         return max_persisted_id
 
     @defer.inlineCallbacks
-    def persist_event(self, event, context, backfilled=False):
+    def persist_event(
+        self, event: FrozenEvent, context: EventContext, backfilled: bool = False
+    ):
         """
-
-        Args:
-            event (EventBase):
-            context (EventContext):
-            backfilled (bool):
-
         Returns:
-            Deferred: resolves to (int, int): the stream ordering of ``event``,
+            Deferred[Tuple[int, int]]: the stream ordering of ``event``,
             and the stream ordering of the latest persisted event
         """
         deferred = self._event_persist_queue.add_to_queue(
@@ -249,28 +266,22 @@ class EventsPersistenceStorage(object):
         max_persisted_id = yield self.main_store.get_current_events_token()
         return (event.internal_metadata.stream_ordering, max_persisted_id)
 
-    def _maybe_start_persisting(self, room_id):
-        @defer.inlineCallbacks
-        def persisting_queue(item):
+    def _maybe_start_persisting(self, room_id: str):
+        async def persisting_queue(item):
             with Measure(self._clock, "persist_events"):
-                yield self._persist_events(
+                await self._persist_events(
                     item.events_and_contexts, backfilled=item.backfilled
                 )
 
         self._event_persist_queue.handle_queue(room_id, persisting_queue)
 
-    @defer.inlineCallbacks
-    def _persist_events(self, events_and_contexts, backfilled=False):
+    async def _persist_events(
+        self,
+        events_and_contexts: List[Tuple[FrozenEvent, EventContext]],
+        backfilled: bool = False,
+    ):
         """Calculates the change to current state and forward extremities, and
         persists the given events and with those updates.
-
-        Args:
-            events_and_contexts (list[(EventBase, EventContext)]):
-            backfilled (bool):
-            delete_existing (bool):
-
-        Returns:
-            Deferred: resolves when the events have been persisted
         """
         if not events_and_contexts:
             return
@@ -315,10 +326,10 @@ class EventsPersistenceStorage(object):
                         )
 
                     for room_id, ev_ctx_rm in iteritems(events_by_room):
-                        latest_event_ids = yield self.main_store.get_latest_event_ids_in_room(
+                        latest_event_ids = await self.main_store.get_latest_event_ids_in_room(
                             room_id
                         )
-                        new_latest_event_ids = yield self._calculate_new_extremities(
+                        new_latest_event_ids = await self._calculate_new_extremities(
                             room_id, ev_ctx_rm, latest_event_ids
                         )
 
@@ -374,7 +385,7 @@ class EventsPersistenceStorage(object):
                         with Measure(
                             self._clock, "persist_events.get_new_state_after_events"
                         ):
-                            res = yield self._get_new_state_after_events(
+                            res = await self._get_new_state_after_events(
                                 room_id,
                                 ev_ctx_rm,
                                 latest_event_ids,
@@ -389,12 +400,12 @@ class EventsPersistenceStorage(object):
                             # If there is a delta we know that we've
                             # only added or replaced state, never
                             # removed keys entirely.
-                            state_delta_for_room[room_id] = ([], delta_ids)
+                            state_delta_for_room[room_id] = DeltaState([], delta_ids)
                         elif current_state is not None:
                             with Measure(
                                 self._clock, "persist_events.calculate_state_delta"
                             ):
-                                delta = yield self._calculate_state_delta(
+                                delta = await self._calculate_state_delta(
                                     room_id, current_state
                                 )
                             state_delta_for_room[room_id] = delta
@@ -404,7 +415,7 @@ class EventsPersistenceStorage(object):
                         if current_state is not None:
                             current_state_for_room[room_id] = current_state
 
-            yield self.main_store._persist_events_and_state_updates(
+            await self.main_store._persist_events_and_state_updates(
                 chunk,
                 current_state_for_room=current_state_for_room,
                 state_delta_for_room=state_delta_for_room,
@@ -412,8 +423,12 @@ class EventsPersistenceStorage(object):
                 backfilled=backfilled,
             )
 
-    @defer.inlineCallbacks
-    def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids):
+    async def _calculate_new_extremities(
+        self,
+        room_id: str,
+        event_contexts: List[Tuple[FrozenEvent, EventContext]],
+        latest_event_ids: List[str],
+    ):
         """Calculates the new forward extremities for a room given events to
         persist.
 
@@ -444,13 +459,13 @@ class EventsPersistenceStorage(object):
         )
 
         # Remove any events which are prev_events of any existing events.
-        existing_prevs = yield self.main_store._get_events_which_are_prevs(result)
+        existing_prevs = await self.main_store._get_events_which_are_prevs(result)
         result.difference_update(existing_prevs)
 
         # Finally handle the case where the new events have soft-failed prev
         # events. If they do we need to remove them and their prev events,
         # otherwise we end up with dangling extremities.
-        existing_prevs = yield self.main_store._get_prevs_before_rejected(
+        existing_prevs = await self.main_store._get_prevs_before_rejected(
             e_id for event in new_events for e_id in event.prev_event_ids()
         )
         result.difference_update(existing_prevs)
@@ -464,10 +479,13 @@ class EventsPersistenceStorage(object):
 
         return result
 
-    @defer.inlineCallbacks
-    def _get_new_state_after_events(
-        self, room_id, events_context, old_latest_event_ids, new_latest_event_ids
-    ):
+    async def _get_new_state_after_events(
+        self,
+        room_id: str,
+        events_context: List[Tuple[FrozenEvent, EventContext]],
+        old_latest_event_ids: Iterable[str],
+        new_latest_event_ids: Iterable[str],
+    ) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]]]:
         """Calculate the current state dict after adding some new events to
         a room
 
@@ -485,7 +503,6 @@ class EventsPersistenceStorage(object):
                 the new forward extremities for the room.
 
         Returns:
-            Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]:
             Returns a tuple of two state maps, the first being the full new current
             state and the second being the delta to the existing current state.
             If both are None then there has been no change.
@@ -547,7 +564,7 @@ class EventsPersistenceStorage(object):
 
         if missing_event_ids:
             # Now pull out the state groups for any missing events from DB
-            event_to_groups = yield self.main_store._get_state_group_for_events(
+            event_to_groups = await self.main_store._get_state_group_for_events(
                 missing_event_ids
             )
             event_id_to_state_group.update(event_to_groups)
@@ -588,7 +605,7 @@ class EventsPersistenceStorage(object):
         # their state IDs so we can resolve to a single state set.
         missing_state = new_state_groups - set(state_groups_map)
         if missing_state:
-            group_to_state = yield self.state_store._get_state_for_groups(missing_state)
+            group_to_state = await self.state_store._get_state_for_groups(missing_state)
             state_groups_map.update(group_to_state)
 
         if len(new_state_groups) == 1:
@@ -612,10 +629,10 @@ class EventsPersistenceStorage(object):
                 break
 
         if not room_version:
-            room_version = yield self.main_store.get_room_version(room_id)
+            room_version = await self.main_store.get_room_version(room_id)
 
         logger.debug("calling resolve_state_groups from preserve_events")
-        res = yield self._state_resolution_handler.resolve_state_groups(
+        res = await self._state_resolution_handler.resolve_state_groups(
             room_id,
             room_version,
             state_groups,
@@ -625,18 +642,14 @@ class EventsPersistenceStorage(object):
 
         return res.state, None
 
-    @defer.inlineCallbacks
-    def _calculate_state_delta(self, room_id, current_state):
+    async def _calculate_state_delta(
+        self, room_id: str, current_state: StateMap[str]
+    ) -> DeltaState:
         """Calculate the new state deltas for a room.
 
         Assumes that we are only persisting events for one room at a time.
-
-        Returns:
-            tuple[list, dict] (to_delete, to_insert): where to_delete are the
-            type/state_keys to remove from current_state_events and `to_insert`
-            are the updates to current_state_events.
         """
-        existing_state = yield self.main_store.get_current_state_ids(room_id)
+        existing_state = await self.main_store.get_current_state_ids(room_id)
 
         to_delete = [key for key in existing_state if key not in current_state]
 
@@ -646,4 +659,4 @@ class EventsPersistenceStorage(object):
             if ev_id != existing_state.get(key)
         }
 
-        return to_delete, to_insert
+        return DeltaState(to_delete=to_delete, to_insert=to_insert)
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index e70026b80a..e86984cd50 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -29,7 +29,7 @@ logger = logging.getLogger(__name__)
 
 # Remember to update this number every time a change is made to database
 # schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 56
+SCHEMA_VERSION = 57
 
 dir_path = os.path.abspath(os.path.dirname(__file__))
 
diff --git a/synapse/storage/purge_events.py b/synapse/storage/purge_events.py
index d6a7bd7834..fdc0abf5cf 100644
--- a/synapse/storage/purge_events.py
+++ b/synapse/storage/purge_events.py
@@ -34,7 +34,7 @@ class PurgeEventsStorage(object):
         """
 
         state_groups_to_delete = yield self.stores.main.purge_room(room_id)
-        yield self.stores.main.purge_room_state(room_id, state_groups_to_delete)
+        yield self.stores.state.purge_room_state(room_id, state_groups_to_delete)
 
     @defer.inlineCallbacks
     def purge_history(self, room_id, token, delete_local_events):
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index cbeb586014..c522c80922 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 import logging
+from typing import Iterable, List, TypeVar
 
 from six import iteritems, itervalues
 
@@ -22,9 +23,13 @@ import attr
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
+from synapse.types import StateMap
 
 logger = logging.getLogger(__name__)
 
+# Used for generic functions below
+T = TypeVar("T")
+
 
 @attr.s(slots=True)
 class StateFilter(object):
@@ -233,14 +238,14 @@ class StateFilter(object):
 
         return len(self.concrete_types())
 
-    def filter_state(self, state_dict):
+    def filter_state(self, state_dict: StateMap[T]) -> StateMap[T]:
         """Returns the state filtered with by this StateFilter
 
         Args:
-            state (dict[tuple[str, str], Any]): The state map to filter
+            state: The state map to filter
 
         Returns:
-            dict[tuple[str, str], Any]: The filtered state map
+            The filtered state map
         """
         if self.is_full():
             return dict(state_dict)
@@ -333,12 +338,12 @@ class StateGroupStorage(object):
     def __init__(self, hs, stores):
         self.stores = stores
 
-    def get_state_group_delta(self, state_group):
+    def get_state_group_delta(self, state_group: int):
         """Given a state group try to return a previous group and a delta between
         the old and the new.
 
         Returns:
-            Deferred[Tuple[Optional[int], Optional[list[dict[tuple[str, str], str]]]]]):
+            Deferred[Tuple[Optional[int], Optional[StateMap[str]]]]:
                 (prev_group, delta_ids)
         """
 
@@ -353,7 +358,7 @@ class StateGroupStorage(object):
             event_ids (iterable[str]): ids of the events
 
         Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
+            Deferred[dict[int, StateMap[str]]]:
                 dict of state_group_id -> (dict of (type, state_key) -> event id)
         """
         if not event_ids:
@@ -410,17 +415,18 @@ class StateGroupStorage(object):
             for group, event_id_map in iteritems(group_to_ids)
         }
 
-    def _get_state_groups_from_groups(self, groups, state_filter):
+    def _get_state_groups_from_groups(
+        self, groups: List[int], state_filter: StateFilter
+    ):
         """Returns the state groups for a given set of groups, filtering on
         types of state events.
 
         Args:
-            groups(list[int]): list of state group IDs to query
-            state_filter (StateFilter): The state filter used to fetch state
+            groups: list of state group IDs to query
+            state_filter: The state filter used to fetch state
                 from the database.
         Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
+            Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
         """
 
         return self.stores.state._get_state_groups_from_groups(groups, state_filter)
@@ -519,7 +525,9 @@ class StateGroupStorage(object):
         state_map = yield self.get_state_ids_for_events([event_id], state_filter)
         return state_map[event_id]
 
-    def _get_state_for_groups(self, groups, state_filter=StateFilter.all()):
+    def _get_state_for_groups(
+        self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all()
+    ):
         """Gets the state at each of a list of state groups, optionally
         filtering by type/state_key
 
@@ -529,8 +537,7 @@ class StateGroupStorage(object):
             state_filter (StateFilter): The state filter used to fetch state
                 from the database.
         Returns:
-            Deferred[dict[int, dict[tuple[str, str], str]]]:
-                dict of state_group_id -> (dict of (type, state_key) -> event id)
+            Deferred[dict[int, StateMap[str]]]: Dict of state group to state map.
         """
         return self.stores.state._get_state_for_groups(groups, state_filter)
 
diff --git a/synapse/types.py b/synapse/types.py
index cd996c0b5a..65e4d8c181 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -17,6 +17,7 @@ import re
 import string
 import sys
 from collections import namedtuple
+from typing import Dict, Tuple, TypeVar
 
 import attr
 from signedjson.key import decode_verify_key_bytes
@@ -28,7 +29,7 @@ from synapse.api.errors import SynapseError
 if sys.version_info[:3] >= (3, 6, 0):
     from typing import Collection
 else:
-    from typing import Sized, Iterable, Container, TypeVar
+    from typing import Sized, Iterable, Container
 
     T_co = TypeVar("T_co", covariant=True)
 
@@ -36,6 +37,12 @@ else:
         __slots__ = ()
 
 
+# Define a state map type from type/state_key to T (usually an event ID or
+# event)
+T = TypeVar("T")
+StateMap = Dict[Tuple[str, str], T]
+
+
 class Requester(
     namedtuple(
         "Requester", ["user", "access_token_id", "is_guest", "device_id", "app_service"]
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index 7856353002..60f0de70f7 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -15,7 +15,6 @@
 
 import logging
 import re
-from itertools import islice
 
 import attr
 
@@ -107,22 +106,6 @@ class Clock(object):
                 raise
 
 
-def batch_iter(iterable, size):
-    """batch an iterable up into tuples with a maximum size
-
-    Args:
-        iterable (iterable): the iterable to slice
-        size (int): the maximum batch size
-
-    Returns:
-        an iterator over the chunks
-    """
-    # make sure we can deal with iterables like lists too
-    sourceiter = iter(iterable)
-    # call islice until it returns an empty tuple
-    return iter(lambda: tuple(islice(sourceiter, size)), ())
-
-
 def log_failure(failure, msg, consumeErrors=True):
     """Creates a function suitable for passing to `Deferred.addErrback` that
     logs any failures that occur.
diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py
new file mode 100644
index 0000000000..06faeebe7f
--- /dev/null
+++ b/synapse/util/iterutils.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from itertools import islice
+from typing import Iterable, Iterator, Sequence, Tuple, TypeVar
+
+T = TypeVar("T")
+
+
+def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T]]:
+    """batch an iterable up into tuples with a maximum size
+
+    Args:
+        iterable (iterable): the iterable to slice
+        size (int): the maximum batch size
+
+    Returns:
+        an iterator over the chunks
+    """
+    # make sure we can deal with iterables like lists too
+    sourceiter = iter(iterable)
+    # call islice until it returns an empty tuple
+    return iter(lambda: tuple(islice(sourceiter, size)), ())
+
+
+ISeq = TypeVar("ISeq", bound=Sequence, covariant=True)
+
+
+def chunk_seq(iseq: ISeq, maxlen: int) -> Iterable[ISeq]:
+    """Split the given sequence into chunks of the given size
+
+    The last chunk may be shorter than the given size.
+
+    If the input is empty, no chunks are returned.
+    """
+    return (iseq[i : i + maxlen] for i in range(0, len(iseq), maxlen))
diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py
index 2705cbe5f8..bb62db4637 100644
--- a/synapse/util/module_loader.py
+++ b/synapse/util/module_loader.py
@@ -34,7 +34,7 @@ def load_module(provider):
     provider_class = getattr(module, clz)
 
     try:
-        provider_config = provider_class.parse_config(provider["config"])
+        provider_config = provider_class.parse_config(provider.get("config"))
     except Exception as e:
         raise ConfigError("Failed to parse config for %r: %r" % (provider["module"], e))
 
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 1e9ba3a201..e2915eb7b1 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -269,8 +269,6 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
               one will be randomly generated.
         Returns:
             A tuple of (user_id, access_token).
-        Raises:
-            RegistrationError if there was a problem registering.
         """
         if localpart is None:
             raise SynapseError(400, "Request must include user id")
diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
index 758ee071a5..4cbe9784ed 100644
--- a/tests/handlers/test_sync.py
+++ b/tests/handlers/test_sync.py
@@ -32,8 +32,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
 
     def test_wait_for_sync_for_user_auth_blocking(self):
 
-        user_id1 = "@user1:server"
-        user_id2 = "@user2:server"
+        user_id1 = "@user1:test"
+        user_id2 = "@user2:test"
         sync_config = self._generate_sync_config(user_id1)
 
         self.reactor.advance(100)  # So we get not 0 time
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index b68e9fe082..b1b037006d 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -115,13 +115,13 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
 
     def test_invites(self):
         self.persist(type="m.room.create", key="", creator=USER_ID)
-        self.check("get_invited_rooms_for_user", [USER_ID_2], [])
+        self.check("get_invited_rooms_for_local_user", [USER_ID_2], [])
         event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite")
 
         self.replicate()
 
         self.check(
-            "get_invited_rooms_for_user",
+            "get_invited_rooms_for_local_user",
             [USER_ID_2],
             [
                 RoomsForUser(
diff --git a/tests/replication/tcp/streams/_base.py b/tests/replication/tcp/streams/_base.py
index 1d14e77255..e96ad4ca4e 100644
--- a/tests/replication/tcp/streams/_base.py
+++ b/tests/replication/tcp/streams/_base.py
@@ -73,6 +73,6 @@ class TestReplicationClientHandler(object):
     def finished_connecting(self):
         pass
 
-    def on_rdata(self, stream_name, token, rows):
+    async def on_rdata(self, stream_name, token, rows):
         for r in rows:
             self.received_rdata_rows.append((stream_name, token, r))
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 6ceb483aa8..0342aed416 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -14,13 +14,20 @@
 # limitations under the License.
 
 import json
+import os
+import urllib.parse
+from binascii import unhexlify
+from typing import List, Optional
 
 from mock import Mock
 
+from twisted.internet.defer import Deferred
+
 import synapse.rest.admin
 from synapse.http.server import JsonResource
+from synapse.logging.context import make_deferred_yieldable
 from synapse.rest.admin import VersionServlet
-from synapse.rest.client.v1 import events, login, room
+from synapse.rest.client.v1 import directory, events, login, room
 from synapse.rest.client.v2_alpha import groups
 
 from tests import unittest
@@ -331,7 +338,7 @@ class PurgeRoomTestCase(unittest.HomeserverTestCase):
             "local_invites",
             "room_account_data",
             "room_tags",
-            "state_groups",
+            # "state_groups",  # Current impl leaves orphaned state groups around.
             "state_groups_state",
         ):
             count = self.get_success(
@@ -345,4 +352,728 @@ class PurgeRoomTestCase(unittest.HomeserverTestCase):
 
             self.assertEqual(count, 0, msg="Rows not purged in {}".format(table))
 
-    test_purge_room.skip = "Disabled because it's currently broken"
+
+class QuarantineMediaTestCase(unittest.HomeserverTestCase):
+    """Test /quarantine_media admin API.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        synapse.rest.admin.register_servlets_for_media_repo,
+        login.register_servlets,
+        room.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.store = hs.get_datastore()
+        self.hs = hs
+
+        # Allow for uploading and downloading to/from the media repo
+        self.media_repo = hs.get_media_repository_resource()
+        self.download_resource = self.media_repo.children[b"download"]
+        self.upload_resource = self.media_repo.children[b"upload"]
+        self.image_data = unhexlify(
+            b"89504e470d0a1a0a0000000d4948445200000001000000010806"
+            b"0000001f15c4890000000a49444154789c63000100000500010d"
+            b"0a2db40000000049454e44ae426082"
+        )
+
+    def make_homeserver(self, reactor, clock):
+
+        self.fetches = []
+
+        def get_file(destination, path, output_stream, args=None, max_size=None):
+            """
+            Returns tuple[int,dict,str,int] of file length, response headers,
+            absolute URI, and response code.
+            """
+
+            def write_to(r):
+                data, response = r
+                output_stream.write(data)
+                return response
+
+            d = Deferred()
+            d.addCallback(write_to)
+            self.fetches.append((d, destination, path, args))
+            return make_deferred_yieldable(d)
+
+        client = Mock()
+        client.get_file = get_file
+
+        self.storage_path = self.mktemp()
+        self.media_store_path = self.mktemp()
+        os.mkdir(self.storage_path)
+        os.mkdir(self.media_store_path)
+
+        config = self.default_config()
+        config["media_store_path"] = self.media_store_path
+        config["thumbnail_requirements"] = {}
+        config["max_image_pixels"] = 2000000
+
+        provider_config = {
+            "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend",
+            "store_local": True,
+            "store_synchronous": False,
+            "store_remote": True,
+            "config": {"directory": self.storage_path},
+        }
+        config["media_storage_providers"] = [provider_config]
+
+        hs = self.setup_test_homeserver(config=config, http_client=client)
+
+        return hs
+
+    def test_quarantine_media_requires_admin(self):
+        self.register_user("nonadmin", "pass", admin=False)
+        non_admin_user_tok = self.login("nonadmin", "pass")
+
+        # Attempt quarantine media APIs as non-admin
+        url = "/_synapse/admin/v1/media/quarantine/example.org/abcde12345"
+        request, channel = self.make_request(
+            "POST", url.encode("ascii"), access_token=non_admin_user_tok,
+        )
+        self.render(request)
+
+        # Expect a forbidden error
+        self.assertEqual(
+            403,
+            int(channel.result["code"]),
+            msg="Expected forbidden on quarantining media as a non-admin",
+        )
+
+        # And the roomID/userID endpoint
+        url = "/_synapse/admin/v1/room/!room%3Aexample.com/media/quarantine"
+        request, channel = self.make_request(
+            "POST", url.encode("ascii"), access_token=non_admin_user_tok,
+        )
+        self.render(request)
+
+        # Expect a forbidden error
+        self.assertEqual(
+            403,
+            int(channel.result["code"]),
+            msg="Expected forbidden on quarantining media as a non-admin",
+        )
+
+    def test_quarantine_media_by_id(self):
+        self.register_user("id_admin", "pass", admin=True)
+        admin_user_tok = self.login("id_admin", "pass")
+
+        self.register_user("id_nonadmin", "pass", admin=False)
+        non_admin_user_tok = self.login("id_nonadmin", "pass")
+
+        # Upload some media into the room
+        response = self.helper.upload_media(
+            self.upload_resource, self.image_data, tok=admin_user_tok
+        )
+
+        # Extract media ID from the response
+        server_name_and_media_id = response["content_uri"][6:]  # Cut off 'mxc://'
+        server_name, media_id = server_name_and_media_id.split("/")
+
+        # Attempt to access the media
+        request, channel = self.make_request(
+            "GET",
+            server_name_and_media_id,
+            shorthand=False,
+            access_token=non_admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be successful
+        self.assertEqual(200, int(channel.code), msg=channel.result["body"])
+
+        # Quarantine the media
+        url = "/_synapse/admin/v1/media/quarantine/%s/%s" % (
+            urllib.parse.quote(server_name),
+            urllib.parse.quote(media_id),
+        )
+        request, channel = self.make_request("POST", url, access_token=admin_user_tok,)
+        self.render(request)
+        self.pump(1.0)
+        self.assertEqual(200, int(channel.code), msg=channel.result["body"])
+
+        # Attempt to access the media
+        request, channel = self.make_request(
+            "GET",
+            server_name_and_media_id,
+            shorthand=False,
+            access_token=admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be quarantined
+        self.assertEqual(
+            404,
+            int(channel.code),
+            msg=(
+                "Expected to receive a 404 on accessing quarantined media: %s"
+                % server_name_and_media_id
+            ),
+        )
+
+    def test_quarantine_all_media_in_room(self, override_url_template=None):
+        self.register_user("room_admin", "pass", admin=True)
+        admin_user_tok = self.login("room_admin", "pass")
+
+        non_admin_user = self.register_user("room_nonadmin", "pass", admin=False)
+        non_admin_user_tok = self.login("room_nonadmin", "pass")
+
+        room_id = self.helper.create_room_as(non_admin_user, tok=admin_user_tok)
+        self.helper.join(room_id, non_admin_user, tok=non_admin_user_tok)
+
+        # Upload some media
+        response_1 = self.helper.upload_media(
+            self.upload_resource, self.image_data, tok=non_admin_user_tok
+        )
+        response_2 = self.helper.upload_media(
+            self.upload_resource, self.image_data, tok=non_admin_user_tok
+        )
+
+        # Extract mxcs
+        mxc_1 = response_1["content_uri"]
+        mxc_2 = response_2["content_uri"]
+
+        # Send it into the room
+        self.helper.send_event(
+            room_id,
+            "m.room.message",
+            content={"body": "image-1", "msgtype": "m.image", "url": mxc_1},
+            txn_id="111",
+            tok=non_admin_user_tok,
+        )
+        self.helper.send_event(
+            room_id,
+            "m.room.message",
+            content={"body": "image-2", "msgtype": "m.image", "url": mxc_2},
+            txn_id="222",
+            tok=non_admin_user_tok,
+        )
+
+        # Quarantine all media in the room
+        if override_url_template:
+            url = override_url_template % urllib.parse.quote(room_id)
+        else:
+            url = "/_synapse/admin/v1/room/%s/media/quarantine" % urllib.parse.quote(
+                room_id
+            )
+        request, channel = self.make_request("POST", url, access_token=admin_user_tok,)
+        self.render(request)
+        self.pump(1.0)
+        self.assertEqual(200, int(channel.code), msg=channel.result["body"])
+        self.assertEqual(
+            json.loads(channel.result["body"].decode("utf-8")),
+            {"num_quarantined": 2},
+            "Expected 2 quarantined items",
+        )
+
+        # Convert mxc URLs to server/media_id strings
+        server_and_media_id_1 = mxc_1[6:]
+        server_and_media_id_2 = mxc_2[6:]
+
+        # Test that we cannot download any of the media anymore
+        request, channel = self.make_request(
+            "GET",
+            server_and_media_id_1,
+            shorthand=False,
+            access_token=non_admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be quarantined
+        self.assertEqual(
+            404,
+            int(channel.code),
+            msg=(
+                "Expected to receive a 404 on accessing quarantined media: %s"
+                % server_and_media_id_1
+            ),
+        )
+
+        request, channel = self.make_request(
+            "GET",
+            server_and_media_id_2,
+            shorthand=False,
+            access_token=non_admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be quarantined
+        self.assertEqual(
+            404,
+            int(channel.code),
+            msg=(
+                "Expected to receive a 404 on accessing quarantined media: %s"
+                % server_and_media_id_2
+            ),
+        )
+
+    def test_quaraantine_all_media_in_room_deprecated_api_path(self):
+        # Perform the above test with the deprecated API path
+        self.test_quarantine_all_media_in_room("/_synapse/admin/v1/quarantine_media/%s")
+
+    def test_quarantine_all_media_by_user(self):
+        self.register_user("user_admin", "pass", admin=True)
+        admin_user_tok = self.login("user_admin", "pass")
+
+        non_admin_user = self.register_user("user_nonadmin", "pass", admin=False)
+        non_admin_user_tok = self.login("user_nonadmin", "pass")
+
+        # Upload some media
+        response_1 = self.helper.upload_media(
+            self.upload_resource, self.image_data, tok=non_admin_user_tok
+        )
+        response_2 = self.helper.upload_media(
+            self.upload_resource, self.image_data, tok=non_admin_user_tok
+        )
+
+        # Extract media IDs
+        server_and_media_id_1 = response_1["content_uri"][6:]
+        server_and_media_id_2 = response_2["content_uri"][6:]
+
+        # Quarantine all media by this user
+        url = "/_synapse/admin/v1/user/%s/media/quarantine" % urllib.parse.quote(
+            non_admin_user
+        )
+        request, channel = self.make_request(
+            "POST", url.encode("ascii"), access_token=admin_user_tok,
+        )
+        self.render(request)
+        self.pump(1.0)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+        self.assertEqual(
+            json.loads(channel.result["body"].decode("utf-8")),
+            {"num_quarantined": 2},
+            "Expected 2 quarantined items",
+        )
+
+        # Attempt to access each piece of media
+        request, channel = self.make_request(
+            "GET",
+            server_and_media_id_1,
+            shorthand=False,
+            access_token=non_admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be quarantined
+        self.assertEqual(
+            404,
+            int(channel.code),
+            msg=(
+                "Expected to receive a 404 on accessing quarantined media: %s"
+                % server_and_media_id_1,
+            ),
+        )
+
+        # Attempt to access each piece of media
+        request, channel = self.make_request(
+            "GET",
+            server_and_media_id_2,
+            shorthand=False,
+            access_token=non_admin_user_tok,
+        )
+        request.render(self.download_resource)
+        self.pump(1.0)
+
+        # Should be quarantined
+        self.assertEqual(
+            404,
+            int(channel.code),
+            msg=(
+                "Expected to receive a 404 on accessing quarantined media: %s"
+                % server_and_media_id_2
+            ),
+        )
+
+
+class RoomTestCase(unittest.HomeserverTestCase):
+    """Test /room admin API.
+    """
+
+    servlets = [
+        synapse.rest.admin.register_servlets,
+        login.register_servlets,
+        room.register_servlets,
+        directory.register_servlets,
+    ]
+
+    def prepare(self, reactor, clock, hs):
+        self.store = hs.get_datastore()
+
+        # Create user
+        self.admin_user = self.register_user("admin", "pass", admin=True)
+        self.admin_user_tok = self.login("admin", "pass")
+
+    def test_list_rooms(self):
+        """Test that we can list rooms"""
+        # Create 3 test rooms
+        total_rooms = 3
+        room_ids = []
+        for x in range(total_rooms):
+            room_id = self.helper.create_room_as(
+                self.admin_user, tok=self.admin_user_tok
+            )
+            room_ids.append(room_id)
+
+        # Request the list of rooms
+        url = "/_synapse/admin/v1/rooms"
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        # Check request completed successfully
+        self.assertEqual(200, int(channel.code), msg=channel.json_body)
+
+        # Check that response json body contains a "rooms" key
+        self.assertTrue(
+            "rooms" in channel.json_body,
+            msg="Response body does not " "contain a 'rooms' key",
+        )
+
+        # Check that 3 rooms were returned
+        self.assertEqual(3, len(channel.json_body["rooms"]), msg=channel.json_body)
+
+        # Check their room_ids match
+        returned_room_ids = [room["room_id"] for room in channel.json_body["rooms"]]
+        self.assertEqual(room_ids, returned_room_ids)
+
+        # Check that all fields are available
+        for r in channel.json_body["rooms"]:
+            self.assertIn("name", r)
+            self.assertIn("canonical_alias", r)
+            self.assertIn("joined_members", r)
+
+        # Check that the correct number of total rooms was returned
+        self.assertEqual(channel.json_body["total_rooms"], total_rooms)
+
+        # Check that the offset is correct
+        # Should be 0 as we aren't paginating
+        self.assertEqual(channel.json_body["offset"], 0)
+
+        # Check that the prev_batch parameter is not present
+        self.assertNotIn("prev_batch", channel.json_body)
+
+        # We shouldn't receive a next token here as there's no further rooms to show
+        self.assertNotIn("next_batch", channel.json_body)
+
+    def test_list_rooms_pagination(self):
+        """Test that we can get a full list of rooms through pagination"""
+        # Create 5 test rooms
+        total_rooms = 5
+        room_ids = []
+        for x in range(total_rooms):
+            room_id = self.helper.create_room_as(
+                self.admin_user, tok=self.admin_user_tok
+            )
+            room_ids.append(room_id)
+
+        # Set the name of the rooms so we get a consistent returned ordering
+        for idx, room_id in enumerate(room_ids):
+            self.helper.send_state(
+                room_id, "m.room.name", {"name": str(idx)}, tok=self.admin_user_tok,
+            )
+
+        # Request the list of rooms
+        returned_room_ids = []
+        start = 0
+        limit = 2
+
+        run_count = 0
+        should_repeat = True
+        while should_repeat:
+            run_count += 1
+
+            url = "/_synapse/admin/v1/rooms?from=%d&limit=%d&order_by=%s" % (
+                start,
+                limit,
+                "alphabetical",
+            )
+            request, channel = self.make_request(
+                "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+            )
+            self.render(request)
+            self.assertEqual(
+                200, int(channel.result["code"]), msg=channel.result["body"]
+            )
+
+            self.assertTrue("rooms" in channel.json_body)
+            for r in channel.json_body["rooms"]:
+                returned_room_ids.append(r["room_id"])
+
+            # Check that the correct number of total rooms was returned
+            self.assertEqual(channel.json_body["total_rooms"], total_rooms)
+
+            # Check that the offset is correct
+            # We're only getting 2 rooms each page, so should be 2 * last run_count
+            self.assertEqual(channel.json_body["offset"], 2 * (run_count - 1))
+
+            if run_count > 1:
+                # Check the value of prev_batch is correct
+                self.assertEqual(channel.json_body["prev_batch"], 2 * (run_count - 2))
+
+            if "next_batch" not in channel.json_body:
+                # We have reached the end of the list
+                should_repeat = False
+            else:
+                # Make another query with an updated start value
+                start = channel.json_body["next_batch"]
+
+        # We should've queried the endpoint 3 times
+        self.assertEqual(
+            run_count,
+            3,
+            msg="Should've queried 3 times for 5 rooms with limit 2 per query",
+        )
+
+        # Check that we received all of the room ids
+        self.assertEqual(room_ids, returned_room_ids)
+
+        url = "/_synapse/admin/v1/rooms?from=%d&limit=%d" % (start, limit)
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+    def test_correct_room_attributes(self):
+        """Test the correct attributes for a room are returned"""
+        # Create a test room
+        room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        test_alias = "#test:test"
+        test_room_name = "something"
+
+        # Have another user join the room
+        user_2 = self.register_user("user4", "pass")
+        user_tok_2 = self.login("user4", "pass")
+        self.helper.join(room_id, user_2, tok=user_tok_2)
+
+        # Create a new alias to this room
+        url = "/_matrix/client/r0/directory/room/%s" % (urllib.parse.quote(test_alias),)
+        request, channel = self.make_request(
+            "PUT",
+            url.encode("ascii"),
+            {"room_id": room_id},
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Set this new alias as the canonical alias for this room
+        self.helper.send_state(
+            room_id,
+            "m.room.aliases",
+            {"aliases": [test_alias]},
+            tok=self.admin_user_tok,
+            state_key="test",
+        )
+        self.helper.send_state(
+            room_id,
+            "m.room.canonical_alias",
+            {"alias": test_alias},
+            tok=self.admin_user_tok,
+        )
+
+        # Set a name for the room
+        self.helper.send_state(
+            room_id, "m.room.name", {"name": test_room_name}, tok=self.admin_user_tok,
+        )
+
+        # Request the list of rooms
+        url = "/_synapse/admin/v1/rooms"
+        request, channel = self.make_request(
+            "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Check that rooms were returned
+        self.assertTrue("rooms" in channel.json_body)
+        rooms = channel.json_body["rooms"]
+
+        # Check that only one room was returned
+        self.assertEqual(len(rooms), 1)
+
+        # And that the value of the total_rooms key was correct
+        self.assertEqual(channel.json_body["total_rooms"], 1)
+
+        # Check that the offset is correct
+        # We're not paginating, so should be 0
+        self.assertEqual(channel.json_body["offset"], 0)
+
+        # Check that there is no `prev_batch`
+        self.assertNotIn("prev_batch", channel.json_body)
+
+        # Check that there is no `next_batch`
+        self.assertNotIn("next_batch", channel.json_body)
+
+        # Check that all provided attributes are set
+        r = rooms[0]
+        self.assertEqual(room_id, r["room_id"])
+        self.assertEqual(test_room_name, r["name"])
+        self.assertEqual(test_alias, r["canonical_alias"])
+
+    def test_room_list_sort_order(self):
+        """Test room list sort ordering. alphabetical versus number of members,
+        reversing the order, etc.
+        """
+        # Create 3 test rooms
+        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+        room_id_3 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        # Set room names in alphabetical order. room 1 -> A, 2 -> B, 3 -> C
+        self.helper.send_state(
+            room_id_1, "m.room.name", {"name": "A"}, tok=self.admin_user_tok,
+        )
+        self.helper.send_state(
+            room_id_2, "m.room.name", {"name": "B"}, tok=self.admin_user_tok,
+        )
+        self.helper.send_state(
+            room_id_3, "m.room.name", {"name": "C"}, tok=self.admin_user_tok,
+        )
+
+        # Set room member size in the reverse order. room 1 -> 1 member, 2 -> 2, 3 -> 3
+        user_1 = self.register_user("bob1", "pass")
+        user_1_tok = self.login("bob1", "pass")
+        self.helper.join(room_id_2, user_1, tok=user_1_tok)
+
+        user_2 = self.register_user("bob2", "pass")
+        user_2_tok = self.login("bob2", "pass")
+        self.helper.join(room_id_3, user_2, tok=user_2_tok)
+
+        user_3 = self.register_user("bob3", "pass")
+        user_3_tok = self.login("bob3", "pass")
+        self.helper.join(room_id_3, user_3, tok=user_3_tok)
+
+        def _order_test(
+            order_type: str, expected_room_list: List[str], reverse: bool = False,
+        ):
+            """Request the list of rooms in a certain order. Assert that order is what
+            we expect
+
+            Args:
+                order_type: The type of ordering to give the server
+                expected_room_list: The list of room_ids in the order we expect to get
+                    back from the server
+            """
+            # Request the list of rooms in the given order
+            url = "/_synapse/admin/v1/rooms?order_by=%s" % (order_type,)
+            if reverse:
+                url += "&dir=b"
+            request, channel = self.make_request(
+                "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+            )
+            self.render(request)
+            self.assertEqual(200, channel.code, msg=channel.json_body)
+
+            # Check that rooms were returned
+            self.assertTrue("rooms" in channel.json_body)
+            rooms = channel.json_body["rooms"]
+
+            # Check for the correct total_rooms value
+            self.assertEqual(channel.json_body["total_rooms"], 3)
+
+            # Check that the offset is correct
+            # We're not paginating, so should be 0
+            self.assertEqual(channel.json_body["offset"], 0)
+
+            # Check that there is no `prev_batch`
+            self.assertNotIn("prev_batch", channel.json_body)
+
+            # Check that there is no `next_batch`
+            self.assertNotIn("next_batch", channel.json_body)
+
+            # Check that rooms were returned in alphabetical order
+            returned_order = [r["room_id"] for r in rooms]
+            self.assertListEqual(expected_room_list, returned_order)  # order is checked
+
+        # Test different sort orders, with forward and reverse directions
+        _order_test("alphabetical", [room_id_1, room_id_2, room_id_3])
+        _order_test("alphabetical", [room_id_3, room_id_2, room_id_1], reverse=True)
+
+        _order_test("size", [room_id_3, room_id_2, room_id_1])
+        _order_test("size", [room_id_1, room_id_2, room_id_3], reverse=True)
+
+    def test_search_term(self):
+        """Test that searching for a room works correctly"""
+        # Create two test rooms
+        room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+        room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+
+        room_name_1 = "something"
+        room_name_2 = "else"
+
+        # Set the name for each room
+        self.helper.send_state(
+            room_id_1, "m.room.name", {"name": room_name_1}, tok=self.admin_user_tok,
+        )
+        self.helper.send_state(
+            room_id_2, "m.room.name", {"name": room_name_2}, tok=self.admin_user_tok,
+        )
+
+        def _search_test(
+            expected_room_id: Optional[str],
+            search_term: str,
+            expected_http_code: int = 200,
+        ):
+            """Search for a room and check that the returned room's id is a match
+
+            Args:
+                expected_room_id: The room_id expected to be returned by the API. Set
+                    to None to expect zero results for the search
+                search_term: The term to search for room names with
+                expected_http_code: The expected http code for the request
+            """
+            url = "/_synapse/admin/v1/rooms?search_term=%s" % (search_term,)
+            request, channel = self.make_request(
+                "GET", url.encode("ascii"), access_token=self.admin_user_tok,
+            )
+            self.render(request)
+            self.assertEqual(expected_http_code, channel.code, msg=channel.json_body)
+
+            if expected_http_code != 200:
+                return
+
+            # Check that rooms were returned
+            self.assertTrue("rooms" in channel.json_body)
+            rooms = channel.json_body["rooms"]
+
+            # Check that the expected number of rooms were returned
+            expected_room_count = 1 if expected_room_id else 0
+            self.assertEqual(len(rooms), expected_room_count)
+            self.assertEqual(channel.json_body["total_rooms"], expected_room_count)
+
+            # Check that the offset is correct
+            # We're not paginating, so should be 0
+            self.assertEqual(channel.json_body["offset"], 0)
+
+            # Check that there is no `prev_batch`
+            self.assertNotIn("prev_batch", channel.json_body)
+
+            # Check that there is no `next_batch`
+            self.assertNotIn("next_batch", channel.json_body)
+
+            if expected_room_id:
+                # Check that the first returned room id is correct
+                r = rooms[0]
+                self.assertEqual(expected_room_id, r["room_id"])
+
+        # Perform search tests
+        _search_test(room_id_1, "something")
+        _search_test(room_id_1, "thing")
+
+        _search_test(room_id_2, "else")
+        _search_test(room_id_2, "se")
+
+        _search_test(None, "foo")
+        _search_test(None, "bar")
+        _search_test(None, "", expected_http_code=400)
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 7352d609e6..8f09f51c61 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -435,6 +435,19 @@ class UserRestTestCase(unittest.HomeserverTestCase):
         self.assertEqual(0, channel.json_body["is_guest"])
         self.assertEqual(0, channel.json_body["deactivated"])
 
+        # Change password
+        body = json.dumps({"password": "hahaha"})
+
+        request, channel = self.make_request(
+            "PUT",
+            self.url,
+            access_token=self.admin_user_tok,
+            content=body.encode(encoding="utf_8"),
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
         # Modify user
         body = json.dumps({"displayname": "foobar", "deactivated": True})
 
diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py
index f340b7e851..ffb2de1505 100644
--- a/tests/rest/client/v1/test_events.py
+++ b/tests/rest/client/v1/test_events.py
@@ -134,3 +134,30 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase):
 
         # someone else set topic, expect 6 (join,send,topic,join,send,topic)
         pass
+
+
+class GetEventsTestCase(unittest.HomeserverTestCase):
+    servlets = [
+        events.register_servlets,
+        room.register_servlets,
+        synapse.rest.admin.register_servlets_for_client_rest_resource,
+        login.register_servlets,
+    ]
+
+    def prepare(self, hs, reactor, clock):
+
+        # register an account
+        self.user_id = self.register_user("sid1", "pass")
+        self.token = self.login(self.user_id, "pass")
+
+        self.room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+    def test_get_event_via_events(self):
+        resp = self.helper.send(self.room_id, tok=self.token)
+        event_id = resp["event_id"]
+
+        request, channel = self.make_request(
+            "GET", "/events/" + event_id, access_token=self.token,
+        )
+        self.render(request)
+        self.assertEquals(channel.code, 200, msg=channel.result)
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index e7417b3d14..873d5ef99c 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -21,6 +21,8 @@ import time
 
 import attr
 
+from twisted.web.resource import Resource
+
 from synapse.api.constants import Membership
 
 from tests.server import make_request, render
@@ -160,3 +162,38 @@ class RestHelper(object):
         )
 
         return channel.json_body
+
+    def upload_media(
+        self,
+        resource: Resource,
+        image_data: bytes,
+        tok: str,
+        filename: str = "test.png",
+        expect_code: int = 200,
+    ) -> dict:
+        """Upload a piece of test media to the media repo
+        Args:
+            resource: The resource that will handle the upload request
+            image_data: The image data to upload
+            tok: The user token to use during the upload
+            filename: The filename of the media to be uploaded
+            expect_code: The return code to expect from attempting to upload the media
+        """
+        image_length = len(image_data)
+        path = "/_matrix/media/r0/upload?filename=%s" % (filename,)
+        request, channel = make_request(
+            self.hs.get_reactor(), "POST", path, content=image_data, access_token=tok
+        )
+        request.requestHeaders.addRawHeader(
+            b"Content-Length", str(image_length).encode("UTF-8")
+        )
+        request.render(resource)
+        self.hs.get_reactor().pump([100])
+
+        assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % (
+            expect_code,
+            int(channel.result["code"]),
+            channel.result["body"],
+        )
+
+        return channel.json_body
diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py
index 0f51895b81..c3facc00eb 100644
--- a/tests/rest/client/v2_alpha/test_account.py
+++ b/tests/rest/client/v2_alpha/test_account.py
@@ -285,7 +285,9 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
         )
 
         # Make sure the invite is here.
-        pending_invites = self.get_success(store.get_invited_rooms_for_user(invitee_id))
+        pending_invites = self.get_success(
+            store.get_invited_rooms_for_local_user(invitee_id)
+        )
         self.assertEqual(len(pending_invites), 1, pending_invites)
         self.assertEqual(pending_invites[0].room_id, room_id, pending_invites)
 
@@ -293,12 +295,16 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
         self.deactivate(invitee_id, invitee_tok)
 
         # Check that the invite isn't there anymore.
-        pending_invites = self.get_success(store.get_invited_rooms_for_user(invitee_id))
+        pending_invites = self.get_success(
+            store.get_invited_rooms_for_local_user(invitee_id)
+        )
         self.assertEqual(len(pending_invites), 0, pending_invites)
 
         # Check that the membership of @invitee:test in the room is now "leave".
         memberships = self.get_success(
-            store.get_rooms_for_user_where_membership_is(invitee_id, [Membership.LEAVE])
+            store.get_rooms_for_local_user_where_membership_is(
+                invitee_id, [Membership.LEAVE]
+            )
         )
         self.assertEqual(len(memberships), 1, memberships)
         self.assertEqual(memberships[0].room_id, room_id, memberships)
diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py
index 661c1f88b9..9c13a13786 100644
--- a/tests/rest/client/v2_alpha/test_sync.py
+++ b/tests/rest/client/v2_alpha/test_sync.py
@@ -15,8 +15,6 @@
 # limitations under the License.
 import json
 
-from mock import Mock
-
 import synapse.rest.admin
 from synapse.api.constants import EventContentFields, EventTypes
 from synapse.rest.client.v1 import login, room
@@ -36,13 +34,6 @@ class FilterTestCase(unittest.HomeserverTestCase):
         sync.register_servlets,
     ]
 
-    def make_homeserver(self, reactor, clock):
-
-        hs = self.setup_test_homeserver(
-            "red", http_client=None, federation_client=Mock()
-        )
-        return hs
-
     def test_sync_argless(self):
         request, channel = self.make_request("GET", "/sync")
         self.render(request)
diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py
index bc662b61db..1809ceb839 100644
--- a/tests/rest/media/v1/test_media_storage.py
+++ b/tests/rest/media/v1/test_media_storage.py
@@ -149,6 +149,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
 
         self.media_repo = hs.get_media_repository_resource()
         self.download_resource = self.media_repo.children[b"download"]
+        self.thumbnail_resource = self.media_repo.children[b"thumbnail"]
 
         # smol png
         self.end_content = unhexlify(
@@ -157,11 +158,11 @@ class MediaRepoTests(unittest.HomeserverTestCase):
             b"0a2db40000000049454e44ae426082"
         )
 
+        self.media_id = "example.com/12345"
+
     def _req(self, content_disposition):
 
-        request, channel = self.make_request(
-            "GET", "example.com/12345", shorthand=False
-        )
+        request, channel = self.make_request("GET", self.media_id, shorthand=False)
         request.render(self.download_resource)
         self.pump()
 
@@ -170,7 +171,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
         self.assertEqual(len(self.fetches), 1)
         self.assertEqual(self.fetches[0][1], "example.com")
         self.assertEqual(
-            self.fetches[0][2], "/_matrix/media/v1/download/example.com/12345"
+            self.fetches[0][2], "/_matrix/media/v1/download/" + self.media_id
         )
         self.assertEqual(self.fetches[0][3], {"allow_remote": "false"})
 
@@ -229,3 +230,42 @@ class MediaRepoTests(unittest.HomeserverTestCase):
         headers = channel.headers
         self.assertEqual(headers.getRawHeaders(b"Content-Type"), [b"image/png"])
         self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None)
+
+    def test_thumbnail_crop(self):
+        expected_body = unhexlify(
+            b"89504e470d0a1a0a0000000d4948445200000020000000200806"
+            b"000000737a7af40000001a49444154789cedc101010000008220"
+            b"ffaf6e484001000000ef0610200001194334ee0000000049454e"
+            b"44ae426082"
+        )
+
+        self._test_thumbnail("crop", expected_body)
+
+    def test_thumbnail_scale(self):
+        expected_body = unhexlify(
+            b"89504e470d0a1a0a0000000d4948445200000001000000010806"
+            b"0000001f15c4890000000d49444154789c636060606000000005"
+            b"0001a5f645400000000049454e44ae426082"
+        )
+
+        self._test_thumbnail("scale", expected_body)
+
+    def _test_thumbnail(self, method, expected_body):
+        params = "?width=32&height=32&method=" + method
+        request, channel = self.make_request(
+            "GET", self.media_id + params, shorthand=False
+        )
+        request.render(self.thumbnail_resource)
+        self.pump()
+
+        headers = {
+            b"Content-Length": [b"%d" % (len(self.end_content))],
+            b"Content-Type": [b"image/png"],
+        }
+        self.fetches[0][0].callback(
+            (self.end_content, (len(self.end_content), headers))
+        )
+        self.pump()
+
+        self.assertEqual(channel.code, 200)
+        self.assertEqual(channel.result["body"], expected_body, channel.result["body"])
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 7840f63fe3..00df0ea68e 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -57,7 +57,7 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
         self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
 
         rooms_for_user = self.get_success(
-            self.store.get_rooms_for_user_where_membership_is(
+            self.store.get_rooms_for_local_user_where_membership_is(
                 self.u_alice, [Membership.JOIN]
             )
         )
diff --git a/tests/test_server.py b/tests/test_server.py
index 98fef21d55..0d57eed268 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -23,8 +23,12 @@ from twisted.test.proto_helpers import AccumulatingProtocol
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
-from synapse.api.errors import Codes, SynapseError
-from synapse.http.server import JsonResource
+from synapse.api.errors import Codes, RedirectException, SynapseError
+from synapse.http.server import (
+    DirectServeResource,
+    JsonResource,
+    wrap_html_request_handler,
+)
 from synapse.http.site import SynapseSite, logger
 from synapse.logging.context import make_deferred_yieldable
 from synapse.util import Clock
@@ -164,6 +168,77 @@ class JsonResourceTests(unittest.TestCase):
         self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
 
 
+class WrapHtmlRequestHandlerTests(unittest.TestCase):
+    class TestResource(DirectServeResource):
+        callback = None
+
+        @wrap_html_request_handler
+        async def _async_render_GET(self, request):
+            return await self.callback(request)
+
+    def setUp(self):
+        self.reactor = ThreadedMemoryReactorClock()
+
+    def test_good_response(self):
+        def callback(request):
+            request.write(b"response")
+            request.finish()
+
+        res = WrapHtmlRequestHandlerTests.TestResource()
+        res.callback = callback
+
+        request, channel = make_request(self.reactor, b"GET", b"/path")
+        render(request, res, self.reactor)
+
+        self.assertEqual(channel.result["code"], b"200")
+        body = channel.result["body"]
+        self.assertEqual(body, b"response")
+
+    def test_redirect_exception(self):
+        """
+        If the callback raises a RedirectException, it is turned into a 30x
+        with the right location.
+        """
+
+        def callback(request, **kwargs):
+            raise RedirectException(b"/look/an/eagle", 301)
+
+        res = WrapHtmlRequestHandlerTests.TestResource()
+        res.callback = callback
+
+        request, channel = make_request(self.reactor, b"GET", b"/path")
+        render(request, res, self.reactor)
+
+        self.assertEqual(channel.result["code"], b"301")
+        headers = channel.result["headers"]
+        location_headers = [v for k, v in headers if k == b"Location"]
+        self.assertEqual(location_headers, [b"/look/an/eagle"])
+
+    def test_redirect_exception_with_cookie(self):
+        """
+        If the callback raises a RedirectException which sets a cookie, that is
+        returned too
+        """
+
+        def callback(request, **kwargs):
+            e = RedirectException(b"/no/over/there", 304)
+            e.cookies.append(b"session=yespls")
+            raise e
+
+        res = WrapHtmlRequestHandlerTests.TestResource()
+        res.callback = callback
+
+        request, channel = make_request(self.reactor, b"GET", b"/path")
+        render(request, res, self.reactor)
+
+        self.assertEqual(channel.result["code"], b"304")
+        headers = channel.result["headers"]
+        location_headers = [v for k, v in headers if k == b"Location"]
+        self.assertEqual(location_headers, [b"/no/over/there"])
+        cookies_headers = [v for k, v in headers if k == b"Set-Cookie"]
+        self.assertEqual(cookies_headers, [b"session=yespls"])
+
+
 class SiteTestCase(unittest.HomeserverTestCase):
     def test_lose_connection(self):
         """
diff --git a/tests/unittest.py b/tests/unittest.py
index ddcd4becfe..b56e249386 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -463,7 +463,7 @@ class HomeserverTestCase(TestCase):
         # Create the user
         request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register")
         self.render(request)
-        self.assertEqual(channel.code, 200)
+        self.assertEqual(channel.code, 200, msg=channel.result)
         nonce = channel.json_body["nonce"]
 
         want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
diff --git a/tests/util/test_itertools.py b/tests/util/test_itertools.py
new file mode 100644
index 0000000000..0ab0a91483
--- /dev/null
+++ b/tests/util/test_itertools.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.util.iterutils import chunk_seq
+
+from tests.unittest import TestCase
+
+
+class ChunkSeqTests(TestCase):
+    def test_short_seq(self):
+        parts = chunk_seq("123", 8)
+
+        self.assertEqual(
+            list(parts), ["123"],
+        )
+
+    def test_long_seq(self):
+        parts = chunk_seq("abcdefghijklmnop", 8)
+
+        self.assertEqual(
+            list(parts), ["abcdefgh", "ijklmnop"],
+        )
+
+    def test_uneven_parts(self):
+        parts = chunk_seq("abcdefghijklmnop", 5)
+
+        self.assertEqual(
+            list(parts), ["abcde", "fghij", "klmno", "p"],
+        )
+
+    def test_empty_input(self):
+        parts = chunk_seq([], 5)
+
+        self.assertEqual(
+            list(parts), [],
+        )
diff --git a/tox.ini b/tox.ini
index 0ab6d5666b..1d946a02ba 100644
--- a/tox.ini
+++ b/tox.ini
@@ -177,12 +177,13 @@ env =
     MYPYPATH = stubs/
 extras = all
 commands = mypy \
+            synapse/api \
             synapse/config/ \
             synapse/handlers/ui_auth \
             synapse/logging/ \
             synapse/module_api \
-            synapse/rest/consent \
-            synapse/rest/saml2 \
+            synapse/replication \
+            synapse/rest \
             synapse/spam_checker_api \
             synapse/storage/engines \
             synapse/streams