summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--CHANGES.md99
-rw-r--r--INSTALL.md2
-rw-r--r--changelog.d/2090.bugfix1
-rw-r--r--changelog.d/4537.feature1
-rw-r--r--changelog.d/4662.misc1
-rw-r--r--changelog.d/4699.bugfix1
-rw-r--r--changelog.d/4735.feature1
-rw-r--r--changelog.d/4740.bugfix1
-rw-r--r--changelog.d/4749.bugfix1
-rw-r--r--changelog.d/4752.misc1
-rw-r--r--changelog.d/4757.feature1
-rw-r--r--changelog.d/4757.misc1
-rw-r--r--changelog.d/4759.feature1
-rw-r--r--changelog.d/4763.bugfix1
-rw-r--r--changelog.d/4765.misc1
-rw-r--r--changelog.d/4770.misc1
-rw-r--r--changelog.d/4771.misc1
-rw-r--r--changelog.d/4772.feature1
-rw-r--r--changelog.d/4776.bugfix1
-rw-r--r--changelog.d/4779.misc1
-rw-r--r--changelog.d/4790.bugfix1
-rw-r--r--changelog.d/4791.feature1
-rw-r--r--changelog.d/4792.bugfix1
-rw-r--r--changelog.d/4794.misc1
-rw-r--r--changelog.d/4795.misc1
-rw-r--r--changelog.d/4796.feature1
-rw-r--r--changelog.d/4797.misc1
-rw-r--r--changelog.d/4798.misc1
-rw-r--r--changelog.d/4799.misc1
-rw-r--r--changelog.d/4801.feature1
-rw-r--r--changelog.d/4804.feature1
-rw-r--r--changelog.d/4814.feature1
-rw-r--r--changelog.d/4815.misc1
-rw-r--r--changelog.d/4816.misc1
-rw-r--r--changelog.d/4817.misc1
-rw-r--r--changelog.d/4818.bugfix1
-rw-r--r--changelog.d/4820.misc1
-rw-r--r--changelog.d/4821.feature1
-rw-r--r--changelog.d/4824.misc1
-rw-r--r--changelog.d/4825.misc1
-rw-r--r--changelog.d/4828.misc1
-rw-r--r--changelog.d/4829.bugfix1
-rw-r--r--changelog.d/4832.misc1
-rw-r--r--changelog.d/4837.bugfix1
-rw-r--r--changelog.d/4838.bugfix1
-rw-r--r--changelog.d/4839.misc1
-rw-r--r--changelog.d/4843.misc1
-rw-r--r--changelog.d/4844.misc1
-rw-r--r--changelog.d/4846.feature1
-rw-r--r--changelog.d/4847.misc1
-rw-r--r--changelog.d/4849.misc1
-rw-r--r--changelog.d/4852.misc1
-rw-r--r--changelog.d/4853.feature1
-rw-r--r--changelog.d/4855.misc1
-rw-r--r--changelog.d/4863.misc1
-rw-r--r--changelog.d/4864.feature1
-rw-r--r--changelog.d/4865.feature1
-rw-r--r--changelog.d/4879.misc1
-rw-r--r--changelog.d/4881.misc1
-rw-r--r--changelog.d/4886.bugfix1
-rw-r--r--changelog.d/4886.misc1
-rw-r--r--changelog.d/4887.feature1
-rw-r--r--changelog.d/4888.bugfix2
-rw-r--r--changelog.d/4889.misc1
-rw-r--r--changelog.d/4890.feature1
-rw-r--r--changelog.d/4895.feature1
-rw-r--r--changelog.d/4895.misc1
-rw-r--r--changelog.d/4896.feature1
-rw-r--r--changelog.d/4900.feature1
-rw-r--r--changelog.d/4902.misc1
-rw-r--r--changelog.d/4904.bugfix1
-rw-r--r--changelog.d/4905.misc1
-rw-r--r--debian/changelog8
-rw-r--r--docs/ACME.md2
-rw-r--r--docs/password_auth_providers.rst14
-rw-r--r--docs/reverse_proxy.rst14
-rwxr-xr-xscripts-dev/check-newsfragment4
-rw-r--r--scripts-dev/convert_server_keys.py2
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/api/auth.py22
-rw-r--r--synapse/config/_base.py6
-rw-r--r--synapse/config/appservice.py2
-rw-r--r--synapse/config/logger.py2
-rw-r--r--synapse/federation/transport/client.py23
-rw-r--r--synapse/federation/transport/server.py14
-rw-r--r--synapse/handlers/auth.py39
-rw-r--r--synapse/handlers/events.py7
-rw-r--r--synapse/handlers/initial_sync.py6
-rw-r--r--synapse/handlers/profile.py10
-rw-r--r--synapse/handlers/register.py10
-rw-r--r--synapse/handlers/state_deltas.py70
-rw-r--r--synapse/handlers/user_directory.py51
-rw-r--r--synapse/http/matrixfederationclient.py86
-rw-r--r--synapse/module_api/__init__.py18
-rw-r--r--synapse/replication/tcp/protocol.py35
-rw-r--r--synapse/replication/tcp/resource.py1
-rw-r--r--synapse/replication/tcp/streams.py11
-rw-r--r--synapse/rest/client/v1/login.py49
-rw-r--r--synapse/storage/receipts.py4
-rw-r--r--synapse/storage/state_deltas.py74
-rw-r--r--synapse/storage/user_directory.py94
-rwxr-xr-xsynctl4
-rw-r--r--tests/config/test_load.py2
-rw-r--r--tests/config/test_room_directory.py4
-rw-r--r--tests/handlers/test_register.py124
-rw-r--r--tests/handlers/test_typing.py8
-rw-r--r--tests/http/test_fedclient.py99
-rw-r--r--tests/replication/tcp/__init__.py14
-rw-r--r--tests/replication/tcp/streams/__init__.py14
-rw-r--r--tests/replication/tcp/streams/_base.py74
-rw-r--r--tests/replication/tcp/streams/test_receipts.py46
-rw-r--r--tests/rest/client/v1/test_admin.py66
-rw-r--r--tests/rest/client/v1/utils.py125
-rw-r--r--tests/server_notices/test_resource_limits_server_notices.py92
-rw-r--r--tests/unittest.py12
-rw-r--r--tests/utils.py34
116 files changed, 1033 insertions, 537 deletions
diff --git a/CHANGES.md b/CHANGES.md
index b25775d18e..490c2021e0 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,102 @@
+Synapse 0.99.3 (2019-04-01)
+===========================
+
+No significant changes.
+
+
+Synapse 0.99.3rc1 (2019-03-27)
+==============================
+
+Features
+--------
+
+- The user directory has been rewritten to make it faster, with less chance of falling behind on a large server. ([\#4537](https://github.com/matrix-org/synapse/issues/4537), [\#4846](https://github.com/matrix-org/synapse/issues/4846), [\#4864](https://github.com/matrix-org/synapse/issues/4864), [\#4887](https://github.com/matrix-org/synapse/issues/4887), [\#4900](https://github.com/matrix-org/synapse/issues/4900), [\#4944](https://github.com/matrix-org/synapse/issues/4944))
+- Add configurable rate limiting to the /register endpoint. ([\#4735](https://github.com/matrix-org/synapse/issues/4735), [\#4804](https://github.com/matrix-org/synapse/issues/4804))
+- Move server key queries to federation reader. ([\#4757](https://github.com/matrix-org/synapse/issues/4757))
+- Add support for /account/3pid REST endpoint to client_reader worker. ([\#4759](https://github.com/matrix-org/synapse/issues/4759))
+- Add an endpoint to the admin API for querying the server version. Contributed by Joseph Weston. ([\#4772](https://github.com/matrix-org/synapse/issues/4772))
+- Include a default configuration file in the 'docs' directory. ([\#4791](https://github.com/matrix-org/synapse/issues/4791), [\#4801](https://github.com/matrix-org/synapse/issues/4801))
+- Synapse is now permissive about trailing slashes on some of its federation endpoints, allowing zero or more to be present. ([\#4793](https://github.com/matrix-org/synapse/issues/4793))
+- Add support for /keys/query and /keys/changes REST endpoints to client_reader worker. ([\#4796](https://github.com/matrix-org/synapse/issues/4796))
+- Add checks to incoming events over federation for events evading auth (aka "soft fail"). ([\#4814](https://github.com/matrix-org/synapse/issues/4814))
+- Add configurable rate limiting to the /login endpoint. ([\#4821](https://github.com/matrix-org/synapse/issues/4821), [\#4865](https://github.com/matrix-org/synapse/issues/4865))
+- Remove trailing slashes from certain outbound federation requests. Retry if receiving a 404. Context: #3622. ([\#4840](https://github.com/matrix-org/synapse/issues/4840))
+- Allow passing --daemonize flags to workers in the same way as with master. ([\#4853](https://github.com/matrix-org/synapse/issues/4853))
+- Batch up outgoing read-receipts to reduce federation traffic. ([\#4890](https://github.com/matrix-org/synapse/issues/4890), [\#4927](https://github.com/matrix-org/synapse/issues/4927))
+- Add option to disable searching the user directory. ([\#4895](https://github.com/matrix-org/synapse/issues/4895))
+- Add option to disable searching of local and remote public room lists. ([\#4896](https://github.com/matrix-org/synapse/issues/4896))
+- Add ability for password providers to login/register a user via 3PID (email, phone). ([\#4931](https://github.com/matrix-org/synapse/issues/4931))
+
+
+Bugfixes
+--------
+
+- Fix a bug where media with spaces in the name would get a corrupted name. ([\#2090](https://github.com/matrix-org/synapse/issues/2090))
+- Fix attempting to paginate in rooms where server cannot see any events, to avoid unnecessarily pulling in lots of redacted events. ([\#4699](https://github.com/matrix-org/synapse/issues/4699))
+- 'event_id' is now a required parameter in federated state requests, as per the matrix spec. ([\#4740](https://github.com/matrix-org/synapse/issues/4740))
+- Fix tightloop over connecting to replication server. ([\#4749](https://github.com/matrix-org/synapse/issues/4749))
+- Fix parsing of Content-Disposition headers on remote media requests and URL previews. ([\#4763](https://github.com/matrix-org/synapse/issues/4763))
+- Fix incorrect log about not persisting duplicate state event. ([\#4776](https://github.com/matrix-org/synapse/issues/4776))
+- Fix v4v6 option in HAProxy example config. Contributed by Flakebi. ([\#4790](https://github.com/matrix-org/synapse/issues/4790))
+- Handle batch updates in worker replication protocol. ([\#4792](https://github.com/matrix-org/synapse/issues/4792))
+- Fix bug where we didn't correctly throttle sending of USER_IP commands over replication. ([\#4818](https://github.com/matrix-org/synapse/issues/4818))
+- Fix potential race in handling missing updates in device list updates. ([\#4829](https://github.com/matrix-org/synapse/issues/4829))
+- Fix bug where synapse expected an un-specced `prev_state` field on state events. ([\#4837](https://github.com/matrix-org/synapse/issues/4837))
+- Transfer a user's notification settings (push rules) on room upgrade. ([\#4838](https://github.com/matrix-org/synapse/issues/4838))
+- fix test_auto_create_auto_join_where_no_consent. ([\#4886](https://github.com/matrix-org/synapse/issues/4886))
+- Fix a bug where hs_disabled_message was sometimes not correctly enforced. ([\#4888](https://github.com/matrix-org/synapse/issues/4888))
+- Fix bug in shutdown room admin API where it would fail if a user in the room hadn't consented to the privacy policy. ([\#4904](https://github.com/matrix-org/synapse/issues/4904))
+- Fix bug where blocked world-readable rooms were still peekable. ([\#4908](https://github.com/matrix-org/synapse/issues/4908))
+
+
+Internal Changes
+----------------
+
+- Add a systemd setup that supports synapse workers. Contributed by Luca Corbatto. ([\#4662](https://github.com/matrix-org/synapse/issues/4662))
+- Change from TravisCI to Buildkite for CI. ([\#4752](https://github.com/matrix-org/synapse/issues/4752))
+- When presence is disabled don't send over replication. ([\#4757](https://github.com/matrix-org/synapse/issues/4757))
+- Minor docstring fixes for MatrixFederationAgent. ([\#4765](https://github.com/matrix-org/synapse/issues/4765))
+- Optimise EDU transmission for the federation_sender worker. ([\#4770](https://github.com/matrix-org/synapse/issues/4770))
+- Update test_typing to use HomeserverTestCase. ([\#4771](https://github.com/matrix-org/synapse/issues/4771))
+- Update URLs for riot.im icons and logos in the default notification templates. ([\#4779](https://github.com/matrix-org/synapse/issues/4779))
+- Removed unnecessary $ from some federation endpoint path regexes. ([\#4794](https://github.com/matrix-org/synapse/issues/4794))
+- Remove link to deleted title in README. ([\#4795](https://github.com/matrix-org/synapse/issues/4795))
+- Clean up read-receipt handling. ([\#4797](https://github.com/matrix-org/synapse/issues/4797))
+- Add some debug about processing read receipts. ([\#4798](https://github.com/matrix-org/synapse/issues/4798))
+- Clean up some replication code. ([\#4799](https://github.com/matrix-org/synapse/issues/4799))
+- Add some docstrings. ([\#4815](https://github.com/matrix-org/synapse/issues/4815))
+- Add debug logger to try and track down #4422. ([\#4816](https://github.com/matrix-org/synapse/issues/4816))
+- Make shutdown API send explanation message to room after users have been forced joined. ([\#4817](https://github.com/matrix-org/synapse/issues/4817))
+- Update example_log_config.yaml. ([\#4820](https://github.com/matrix-org/synapse/issues/4820))
+- Document the `generate` option for the docker image. ([\#4824](https://github.com/matrix-org/synapse/issues/4824))
+- Fix check-newsfragment for debian-only changes. ([\#4825](https://github.com/matrix-org/synapse/issues/4825))
+- Add some debug logging for device list updates to help with #4828. ([\#4828](https://github.com/matrix-org/synapse/issues/4828))
+- Improve federation documentation, specifically .well-known support. Many thanks to @vaab. ([\#4832](https://github.com/matrix-org/synapse/issues/4832))
+- Disable captcha registration by default in unit tests. ([\#4839](https://github.com/matrix-org/synapse/issues/4839))
+- Add stuff back to the .gitignore. ([\#4843](https://github.com/matrix-org/synapse/issues/4843))
+- Clarify what registration_shared_secret allows for. ([\#4844](https://github.com/matrix-org/synapse/issues/4844))
+- Correctly log expected errors when fetching server keys. ([\#4847](https://github.com/matrix-org/synapse/issues/4847))
+- Update install docs to explicitly state a full-chain (not just the top-level) TLS certificate must be provided to Synapse. This caused some people's Synapse ports to appear correct in a browser but still (rightfully so) upset the federation tester. ([\#4849](https://github.com/matrix-org/synapse/issues/4849))
+- Move client read-receipt processing to federation sender worker. ([\#4852](https://github.com/matrix-org/synapse/issues/4852))
+- Refactor federation TransactionQueue. ([\#4855](https://github.com/matrix-org/synapse/issues/4855))
+- Comment out most options in the generated config. ([\#4863](https://github.com/matrix-org/synapse/issues/4863))
+- Fix yaml library warnings by using safe_load. ([\#4869](https://github.com/matrix-org/synapse/issues/4869))
+- Update Apache setup to remove location syntax. Thanks to @cwmke! ([\#4870](https://github.com/matrix-org/synapse/issues/4870))
+- Reinstate test case that runs unit tests against oldest supported dependencies. ([\#4879](https://github.com/matrix-org/synapse/issues/4879))
+- Update link to federation docs. ([\#4881](https://github.com/matrix-org/synapse/issues/4881))
+- fix test_auto_create_auto_join_where_no_consent. ([\#4886](https://github.com/matrix-org/synapse/issues/4886))
+- Use a regular HomeServerConfig object for unit tests rater than a Mock. ([\#4889](https://github.com/matrix-org/synapse/issues/4889))
+- Add some notes about tuning postgres for larger deployments. ([\#4895](https://github.com/matrix-org/synapse/issues/4895))
+- Add a config option for torture-testing worker replication. ([\#4902](https://github.com/matrix-org/synapse/issues/4902))
+- Log requests which are simulated by the unit tests. ([\#4905](https://github.com/matrix-org/synapse/issues/4905))
+- Allow newsfragments to end with exclamation marks. Exciting! ([\#4912](https://github.com/matrix-org/synapse/issues/4912))
+- Refactor some more tests to use HomeserverTestCase. ([\#4913](https://github.com/matrix-org/synapse/issues/4913))
+- Refactor out the state deltas portion of the user directory store and handler. ([\#4917](https://github.com/matrix-org/synapse/issues/4917))
+- Fix nginx example in ACME doc. ([\#4923](https://github.com/matrix-org/synapse/issues/4923))
+- Use an explicit dbname for postgres connections in the tests. ([\#4928](https://github.com/matrix-org/synapse/issues/4928))
+- Fix `ClientReplicationStreamProtocol.__str__()`. ([\#4929](https://github.com/matrix-org/synapse/issues/4929))
+
+
 Synapse 0.99.2 (2019-03-01)
 ===========================
 
diff --git a/INSTALL.md b/INSTALL.md
index de6893530d..a5c3c6efaa 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -384,7 +384,7 @@ To configure Synapse to expose an HTTPS port, you will need to edit
   `cert.pem`).
 
 For those of you upgrading your TLS certificate in readiness for Synapse 1.0,
-please take a look at `our guide <docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100>`_.
+please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100).
 
 ## Registering a user
 
diff --git a/changelog.d/2090.bugfix b/changelog.d/2090.bugfix
deleted file mode 100644
index de2d22fcb8..0000000000
--- a/changelog.d/2090.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug where media with spaces in the name would get a corrupted name.
diff --git a/changelog.d/4537.feature b/changelog.d/4537.feature
deleted file mode 100644
index 8f792b8890..0000000000
--- a/changelog.d/4537.feature
+++ /dev/null
@@ -1 +0,0 @@
-The user directory has been rewritten to make it faster, with less chance of falling behind on a large server.
diff --git a/changelog.d/4662.misc b/changelog.d/4662.misc
deleted file mode 100644
index f4ec0d6a68..0000000000
--- a/changelog.d/4662.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a systemd setup that supports synapse workers. Contributed by Luca Corbatto.
diff --git a/changelog.d/4699.bugfix b/changelog.d/4699.bugfix
deleted file mode 100644
index 1d7f3174e7..0000000000
--- a/changelog.d/4699.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix attempting to paginate in rooms where server cannot see any events, to avoid unnecessarily pulling in lots of redacted events.
diff --git a/changelog.d/4735.feature b/changelog.d/4735.feature
deleted file mode 100644
index a4c0b196f6..0000000000
--- a/changelog.d/4735.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add configurable rate limiting to the /register endpoint.
diff --git a/changelog.d/4740.bugfix b/changelog.d/4740.bugfix
deleted file mode 100644
index f82bb4227a..0000000000
--- a/changelog.d/4740.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-'event_id' is now a required parameter in federated state requests, as per the matrix spec.
diff --git a/changelog.d/4749.bugfix b/changelog.d/4749.bugfix
deleted file mode 100644
index 174e6b4e5e..0000000000
--- a/changelog.d/4749.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix tightloop over connecting to replication server.
diff --git a/changelog.d/4752.misc b/changelog.d/4752.misc
deleted file mode 100644
index fb1e76edce..0000000000
--- a/changelog.d/4752.misc
+++ /dev/null
@@ -1 +0,0 @@
-Change from TravisCI to Buildkite for CI.
diff --git a/changelog.d/4757.feature b/changelog.d/4757.feature
deleted file mode 100644
index b89029f2b4..0000000000
--- a/changelog.d/4757.feature
+++ /dev/null
@@ -1 +0,0 @@
-Move server key queries to federation reader.
diff --git a/changelog.d/4757.misc b/changelog.d/4757.misc
deleted file mode 100644
index 42bb66f7aa..0000000000
--- a/changelog.d/4757.misc
+++ /dev/null
@@ -1 +0,0 @@
-When presence is disabled don't send over replication.
diff --git a/changelog.d/4759.feature b/changelog.d/4759.feature
deleted file mode 100644
index 643ee404dc..0000000000
--- a/changelog.d/4759.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for /account/3pid REST endpoint to client_reader worker.
diff --git a/changelog.d/4763.bugfix b/changelog.d/4763.bugfix
deleted file mode 100644
index 213ea44b70..0000000000
--- a/changelog.d/4763.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix parsing of Content-Disposition headers on remote media requests and URL previews.
diff --git a/changelog.d/4765.misc b/changelog.d/4765.misc
deleted file mode 100644
index c273fd0cc4..0000000000
--- a/changelog.d/4765.misc
+++ /dev/null
@@ -1 +0,0 @@
-Minor docstring fixes for MatrixFederationAgent.
\ No newline at end of file
diff --git a/changelog.d/4770.misc b/changelog.d/4770.misc
deleted file mode 100644
index 144d819958..0000000000
--- a/changelog.d/4770.misc
+++ /dev/null
@@ -1 +0,0 @@
-Optimise EDU transmission for the federation_sender worker.
diff --git a/changelog.d/4771.misc b/changelog.d/4771.misc
deleted file mode 100644
index 8fa3401ca4..0000000000
--- a/changelog.d/4771.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update test_typing to use HomeserverTestCase.
diff --git a/changelog.d/4772.feature b/changelog.d/4772.feature
deleted file mode 100644
index 19bb91f1e8..0000000000
--- a/changelog.d/4772.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add an endpoint to the admin API for querying the server version. Contributed by Joseph Weston.
diff --git a/changelog.d/4776.bugfix b/changelog.d/4776.bugfix
deleted file mode 100644
index ce3e6ce33c..0000000000
--- a/changelog.d/4776.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix incorrect log about not persisting duplicate state event.
diff --git a/changelog.d/4779.misc b/changelog.d/4779.misc
deleted file mode 100644
index 2442bf31bd..0000000000
--- a/changelog.d/4779.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update URLs for riot.im icons and logos in the default notification templates.
diff --git a/changelog.d/4790.bugfix b/changelog.d/4790.bugfix
deleted file mode 100644
index aa8eb93246..0000000000
--- a/changelog.d/4790.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix v4v6 option in HAProxy example config. Contributed by Flakebi.
diff --git a/changelog.d/4791.feature b/changelog.d/4791.feature
deleted file mode 100644
index 1e5fd32463..0000000000
--- a/changelog.d/4791.feature
+++ /dev/null
@@ -1 +0,0 @@
-Include a default configuration file in the 'docs' directory.
diff --git a/changelog.d/4792.bugfix b/changelog.d/4792.bugfix
deleted file mode 100644
index b127b6254f..0000000000
--- a/changelog.d/4792.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Handle batch updates in worker replication protocol.
\ No newline at end of file
diff --git a/changelog.d/4794.misc b/changelog.d/4794.misc
deleted file mode 100644
index 99b543ecba..0000000000
--- a/changelog.d/4794.misc
+++ /dev/null
@@ -1 +0,0 @@
-Removed unnecessary $ from some federation endpoint path regexes.
\ No newline at end of file
diff --git a/changelog.d/4795.misc b/changelog.d/4795.misc
deleted file mode 100644
index 03995f42fe..0000000000
--- a/changelog.d/4795.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove link to deleted title in README.
\ No newline at end of file
diff --git a/changelog.d/4796.feature b/changelog.d/4796.feature
deleted file mode 100644
index 9e05560a3f..0000000000
--- a/changelog.d/4796.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add support for /keys/query and /keys/changes REST endpoints to client_reader worker.
diff --git a/changelog.d/4797.misc b/changelog.d/4797.misc
deleted file mode 100644
index 822e98e6a7..0000000000
--- a/changelog.d/4797.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up read-receipt handling.
diff --git a/changelog.d/4798.misc b/changelog.d/4798.misc
deleted file mode 100644
index d60f208dc3..0000000000
--- a/changelog.d/4798.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some debug about processing read receipts.
diff --git a/changelog.d/4799.misc b/changelog.d/4799.misc
deleted file mode 100644
index 5ab11a5c0b..0000000000
--- a/changelog.d/4799.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clean up some replication code.
diff --git a/changelog.d/4801.feature b/changelog.d/4801.feature
deleted file mode 100644
index 1e5fd32463..0000000000
--- a/changelog.d/4801.feature
+++ /dev/null
@@ -1 +0,0 @@
-Include a default configuration file in the 'docs' directory.
diff --git a/changelog.d/4804.feature b/changelog.d/4804.feature
deleted file mode 100644
index a4c0b196f6..0000000000
--- a/changelog.d/4804.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add configurable rate limiting to the /register endpoint.
diff --git a/changelog.d/4814.feature b/changelog.d/4814.feature
deleted file mode 100644
index 9433acd959..0000000000
--- a/changelog.d/4814.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add checks to incoming events over federation for events evading auth (aka "soft fail").
diff --git a/changelog.d/4815.misc b/changelog.d/4815.misc
deleted file mode 100644
index b123b36f7f..0000000000
--- a/changelog.d/4815.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some docstrings.
diff --git a/changelog.d/4816.misc b/changelog.d/4816.misc
deleted file mode 100644
index 43d94251f7..0000000000
--- a/changelog.d/4816.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add debug logger to try and track down #4422.
diff --git a/changelog.d/4817.misc b/changelog.d/4817.misc
deleted file mode 100644
index 438a51dc63..0000000000
--- a/changelog.d/4817.misc
+++ /dev/null
@@ -1 +0,0 @@
-Make shutdown API send explanation message to room after users have been forced joined.
diff --git a/changelog.d/4818.bugfix b/changelog.d/4818.bugfix
deleted file mode 100644
index ebbc27a433..0000000000
--- a/changelog.d/4818.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where we didn't correctly throttle sending of USER_IP commands over replication.
diff --git a/changelog.d/4820.misc b/changelog.d/4820.misc
deleted file mode 100644
index 1e35b5b63c..0000000000
--- a/changelog.d/4820.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update example_log_config.yaml.
diff --git a/changelog.d/4821.feature b/changelog.d/4821.feature
deleted file mode 100644
index 61d4eb8d60..0000000000
--- a/changelog.d/4821.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add configurable rate limiting to the /login endpoint.
diff --git a/changelog.d/4824.misc b/changelog.d/4824.misc
deleted file mode 100644
index a4c5a1df37..0000000000
--- a/changelog.d/4824.misc
+++ /dev/null
@@ -1 +0,0 @@
-Document the `generate` option for the docker image.
diff --git a/changelog.d/4825.misc b/changelog.d/4825.misc
deleted file mode 100644
index 166661ab6a..0000000000
--- a/changelog.d/4825.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix check-newsfragment for debian-only changes.
diff --git a/changelog.d/4828.misc b/changelog.d/4828.misc
deleted file mode 100644
index 2fe554884a..0000000000
--- a/changelog.d/4828.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some debug logging for device list updates to help with #4828.
diff --git a/changelog.d/4829.bugfix b/changelog.d/4829.bugfix
deleted file mode 100644
index b05235e215..0000000000
--- a/changelog.d/4829.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix potential race in handling missing updates in device list updates.
diff --git a/changelog.d/4832.misc b/changelog.d/4832.misc
deleted file mode 100644
index 92022266c6..0000000000
--- a/changelog.d/4832.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve federation documentation, specifically .well-known support. Many thanks to @vaab.
diff --git a/changelog.d/4837.bugfix b/changelog.d/4837.bugfix
deleted file mode 100644
index 989aeb82bb..0000000000
--- a/changelog.d/4837.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where synapse expected an un-specced `prev_state` field on state events.
diff --git a/changelog.d/4838.bugfix b/changelog.d/4838.bugfix
deleted file mode 100644
index 7f4fceabff..0000000000
--- a/changelog.d/4838.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Transfer a user's notification settings (push rules) on room upgrade.
\ No newline at end of file
diff --git a/changelog.d/4839.misc b/changelog.d/4839.misc
deleted file mode 100644
index 7c6868051b..0000000000
--- a/changelog.d/4839.misc
+++ /dev/null
@@ -1 +0,0 @@
-Disable captcha registration by default in unit tests.
\ No newline at end of file
diff --git a/changelog.d/4843.misc b/changelog.d/4843.misc
deleted file mode 100644
index 03d0a3e2e7..0000000000
--- a/changelog.d/4843.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add stuff back to the .gitignore.
diff --git a/changelog.d/4844.misc b/changelog.d/4844.misc
deleted file mode 100644
index eff6f1c43c..0000000000
--- a/changelog.d/4844.misc
+++ /dev/null
@@ -1 +0,0 @@
-Clarify what registration_shared_secret allows for.
diff --git a/changelog.d/4846.feature b/changelog.d/4846.feature
deleted file mode 100644
index 8f792b8890..0000000000
--- a/changelog.d/4846.feature
+++ /dev/null
@@ -1 +0,0 @@
-The user directory has been rewritten to make it faster, with less chance of falling behind on a large server.
diff --git a/changelog.d/4847.misc b/changelog.d/4847.misc
deleted file mode 100644
index a001238e08..0000000000
--- a/changelog.d/4847.misc
+++ /dev/null
@@ -1 +0,0 @@
-Correctly log expected errors when fetching server keys.
diff --git a/changelog.d/4849.misc b/changelog.d/4849.misc
deleted file mode 100644
index f2cab20b44..0000000000
--- a/changelog.d/4849.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update install docs to explicitly state a full-chain (not just the top-level) TLS certificate must be provided to Synapse. This caused some people's Synapse ports to appear correct in a browser but still (rightfully so) upset the federation tester.
\ No newline at end of file
diff --git a/changelog.d/4852.misc b/changelog.d/4852.misc
deleted file mode 100644
index 76ab1e34e7..0000000000
--- a/changelog.d/4852.misc
+++ /dev/null
@@ -1 +0,0 @@
- Move client read-receipt processing to federation sender worker.
\ No newline at end of file
diff --git a/changelog.d/4853.feature b/changelog.d/4853.feature
deleted file mode 100644
index 360f92e1de..0000000000
--- a/changelog.d/4853.feature
+++ /dev/null
@@ -1 +0,0 @@
-Allow passing --daemonize flags to workers in the same way as with master.
diff --git a/changelog.d/4855.misc b/changelog.d/4855.misc
deleted file mode 100644
index c4906d2f56..0000000000
--- a/changelog.d/4855.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor federation TransactionQueue.
\ No newline at end of file
diff --git a/changelog.d/4863.misc b/changelog.d/4863.misc
deleted file mode 100644
index bfe03cbedc..0000000000
--- a/changelog.d/4863.misc
+++ /dev/null
@@ -1 +0,0 @@
-Comment out most options in the generated config.
diff --git a/changelog.d/4864.feature b/changelog.d/4864.feature
deleted file mode 100644
index 57927f2620..0000000000
--- a/changelog.d/4864.feature
+++ /dev/null
@@ -1 +0,0 @@
-The user directory has been rewritten to make it faster, with less chance of falling behind on a large server.
\ No newline at end of file
diff --git a/changelog.d/4865.feature b/changelog.d/4865.feature
deleted file mode 100644
index 61d4eb8d60..0000000000
--- a/changelog.d/4865.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add configurable rate limiting to the /login endpoint.
diff --git a/changelog.d/4879.misc b/changelog.d/4879.misc
deleted file mode 100644
index 574017230c..0000000000
--- a/changelog.d/4879.misc
+++ /dev/null
@@ -1 +0,0 @@
-Reinstate test case that runs unit tests against oldest supported dependencies.
diff --git a/changelog.d/4881.misc b/changelog.d/4881.misc
deleted file mode 100644
index 308c21c839..0000000000
--- a/changelog.d/4881.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update link to federation docs.
diff --git a/changelog.d/4886.bugfix b/changelog.d/4886.bugfix
deleted file mode 100644
index b17aa92485..0000000000
--- a/changelog.d/4886.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-fix test_auto_create_auto_join_where_no_consent.
diff --git a/changelog.d/4886.misc b/changelog.d/4886.misc
deleted file mode 100644
index b17aa92485..0000000000
--- a/changelog.d/4886.misc
+++ /dev/null
@@ -1 +0,0 @@
-fix test_auto_create_auto_join_where_no_consent.
diff --git a/changelog.d/4887.feature b/changelog.d/4887.feature
deleted file mode 100644
index e7ff0b9297..0000000000
--- a/changelog.d/4887.feature
+++ /dev/null
@@ -1 +0,0 @@
-The user directory has been rewritten to make it faster, with less chance of falling behind on a large server. 
diff --git a/changelog.d/4888.bugfix b/changelog.d/4888.bugfix
deleted file mode 100644
index 0e193709e5..0000000000
--- a/changelog.d/4888.bugfix
+++ /dev/null
@@ -1,2 +0,0 @@
-Fix a bug where hs_disabled_message was sometimes not correctly enforced.
-
diff --git a/changelog.d/4889.misc b/changelog.d/4889.misc
deleted file mode 100644
index f1948db65e..0000000000
--- a/changelog.d/4889.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use a regular HomeServerConfig object for unit tests rater than a Mock.
diff --git a/changelog.d/4890.feature b/changelog.d/4890.feature
deleted file mode 100644
index 8d74262250..0000000000
--- a/changelog.d/4890.feature
+++ /dev/null
@@ -1 +0,0 @@
-Batch up outgoing read-receipts to reduce federation traffic.
diff --git a/changelog.d/4895.feature b/changelog.d/4895.feature
deleted file mode 100644
index 5dd7c68194..0000000000
--- a/changelog.d/4895.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add option to disable searching the user directory.
diff --git a/changelog.d/4895.misc b/changelog.d/4895.misc
deleted file mode 100644
index 81a3261538..0000000000
--- a/changelog.d/4895.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add some notes about tuning postgres for larger deployments.
diff --git a/changelog.d/4896.feature b/changelog.d/4896.feature
deleted file mode 100644
index 46ac49a4b4..0000000000
--- a/changelog.d/4896.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add option to disable searching of local and remote public room lists.
diff --git a/changelog.d/4900.feature b/changelog.d/4900.feature
deleted file mode 100644
index 8f792b8890..0000000000
--- a/changelog.d/4900.feature
+++ /dev/null
@@ -1 +0,0 @@
-The user directory has been rewritten to make it faster, with less chance of falling behind on a large server.
diff --git a/changelog.d/4902.misc b/changelog.d/4902.misc
deleted file mode 100644
index fecc06a6e8..0000000000
--- a/changelog.d/4902.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a config option for torture-testing worker replication.
diff --git a/changelog.d/4904.bugfix b/changelog.d/4904.bugfix
deleted file mode 100644
index 5c2d7f3cf1..0000000000
--- a/changelog.d/4904.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug in shutdown room admin API where it would fail if a user in the room hadn't consented to the privacy policy.
diff --git a/changelog.d/4905.misc b/changelog.d/4905.misc
deleted file mode 100644
index 0f00d5a3d5..0000000000
--- a/changelog.d/4905.misc
+++ /dev/null
@@ -1 +0,0 @@
-Log requests which are simulated by the unit tests.
diff --git a/debian/changelog b/debian/changelog
index d84931ec03..03df2e1c00 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,8 +1,12 @@
-matrix-synapse-py3 (0.99.3) UNRELEASED; urgency=medium
+matrix-synapse-py3 (0.99.3) stable; urgency=medium
 
+  [ Richard van der Hoff ]
   * Fix warning during preconfiguration. (Fixes: #4819)
 
- -- Richard van der Hoff <richard@matrix.org>  Thu, 07 Mar 2019 07:17:00 +0000
+  [ Synapse Packaging team ]
+  * New synapse release 0.99.3.
+
+ -- Synapse Packaging team <packages@matrix.org>  Mon, 01 Apr 2019 12:48:21 +0000
 
 matrix-synapse-py3 (0.99.2) stable; urgency=medium
 
diff --git a/docs/ACME.md b/docs/ACME.md
index 46136a9f2c..9eb18a9cf5 100644
--- a/docs/ACME.md
+++ b/docs/ACME.md
@@ -67,7 +67,7 @@ For nginx users, add the following line to your existing `server` block:
 
 ```
 location /.well-known/acme-challenge {
-    proxy_pass http://localhost:8009/;
+    proxy_pass http://localhost:8009;
 }
 ```
 
diff --git a/docs/password_auth_providers.rst b/docs/password_auth_providers.rst
index d8a7b61cdc..6149ba7458 100644
--- a/docs/password_auth_providers.rst
+++ b/docs/password_auth_providers.rst
@@ -75,6 +75,20 @@ Password auth provider classes may optionally provide the following methods.
     result from the ``/login`` call (including ``access_token``, ``device_id``,
     etc.)
 
+``someprovider.check_3pid_auth``\(*medium*, *address*, *password*)
+
+    This method, if implemented, is called when a user attempts to register or
+    log in with a third party identifier, such as email. It is passed the
+    medium (ex. "email"), an address (ex. "jdoe@example.com") and the user's
+    password.
+
+    The method should return a Twisted ``Deferred`` object, which resolves to
+    a ``str`` containing the user's (canonical) User ID if authentication was
+    successful, and ``None`` if not.
+
+    As with ``check_auth``, the ``Deferred`` may alternatively resolve to a
+    ``(user_id, callback)`` tuple.
+
 ``someprovider.check_password``\(*user_id*, *password*)
 
     This method provides a simpler interface than ``get_supported_login_types``
diff --git a/docs/reverse_proxy.rst b/docs/reverse_proxy.rst
index 8e26c50f1b..cc81ceb84b 100644
--- a/docs/reverse_proxy.rst
+++ b/docs/reverse_proxy.rst
@@ -69,20 +69,16 @@ Let's assume that we expect clients to connect to our server at
           SSLEngine on
           ServerName matrix.example.com;
 
-          <Location /_matrix>
-              ProxyPass http://127.0.0.1:8008/_matrix nocanon
-              ProxyPassReverse http://127.0.0.1:8008/_matrix
-          </Location>
+          ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
+          ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
       </VirtualHost>
 
       <VirtualHost *:8448>
           SSLEngine on
           ServerName example.com;
-
-          <Location /_matrix>
-              ProxyPass http://127.0.0.1:8008/_matrix nocanon
-              ProxyPassReverse http://127.0.0.1:8008/_matrix
-          </Location>
+          
+          ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
+          ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
       </VirtualHost>
 
 * HAProxy::
diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment
index e0ac84198e..0ec5075e79 100755
--- a/scripts-dev/check-newsfragment
+++ b/scripts-dev/check-newsfragment
@@ -31,8 +31,8 @@ echo
 # check that any new newsfiles on this branch end with a full stop.
 for f in `git diff --name-only FETCH_HEAD... -- changelog.d`; do
     lastchar=`tr -d '\n' < $f | tail -c 1`
-    if [ $lastchar != '.' ]; then
-        echo -e "\e[31mERROR: newsfragment $f does not end with a '.'\e[39m" >&2
+    if [ $lastchar != '.' -a $lastchar != '!' ]; then
+        echo -e "\e[31mERROR: newsfragment $f does not end with a '.' or '!'\e[39m" >&2
         exit 1
     fi
 done
diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py
index dde8596697..ac152b5c42 100644
--- a/scripts-dev/convert_server_keys.py
+++ b/scripts-dev/convert_server_keys.py
@@ -76,7 +76,7 @@ def rows_v2(server, json):
 
 
 def main():
-    config = yaml.load(open(sys.argv[1]))
+    config = yaml.safe_load(open(sys.argv[1]))
     valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
 
     server_name = config["server_name"]
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 25c10244d3..6bb5a8b24d 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -27,4 +27,4 @@ try:
 except ImportError:
     pass
 
-__version__ = "0.99.2"
+__version__ = "0.99.3"
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index ab4d2b8f11..f4171da6e3 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -614,13 +614,13 @@ class Auth(object):
 
         Returns:
             True if the the sender is allowed to redact the target event if the
-            target event was created by them.
+                target event was created by them.
             False if the sender is allowed to redact the target event with no
-            further checks.
+                further checks.
 
         Raises:
             AuthError if the event sender is definitely not allowed to redact
-            the target event.
+                the target event.
         """
         return event_auth.check_redaction(room_version, event, auth_events)
 
@@ -736,9 +736,9 @@ class Auth(object):
 
         Returns:
             Deferred[tuple[str, str|None]]: Resolves to the current membership of
-            the user in the room and the membership event ID of the user. If
-            the user is not in the room and never has been, then
-            `(Membership.JOIN, None)` is returned.
+                the user in the room and the membership event ID of the user. If
+                the user is not in the room and never has been, then
+                `(Membership.JOIN, None)` is returned.
         """
 
         try:
@@ -770,13 +770,13 @@ class Auth(object):
 
         Args:
             user_id(str|None): If present, checks for presence against existing
-            MAU cohort
+                MAU cohort
 
             threepid(dict|None): If present, checks for presence against configured
-            reserved threepid. Used in cases where the user is trying register
-            with a MAU blocked server, normally they would be rejected but their
-            threepid is on the reserved list. user_id and
-            threepid should never be set at the same time.
+                reserved threepid. Used in cases where the user is trying register
+                with a MAU blocked server, normally they would be rejected but their
+                threepid is on the reserved list. user_id and
+                threepid should never be set at the same time.
         """
 
         # Never fail an auth check for the server notices users or support user
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index a219a83550..f7d7f153bb 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -137,7 +137,7 @@ class Config(object):
     @staticmethod
     def read_config_file(file_path):
         with open(file_path) as file_stream:
-            return yaml.load(file_stream)
+            return yaml.safe_load(file_stream)
 
     def invoke_all(self, name, *args, **kargs):
         results = []
@@ -318,7 +318,7 @@ class Config(object):
                     )
                     config_file.write(config_str)
 
-                config = yaml.load(config_str)
+                config = yaml.safe_load(config_str)
                 obj.invoke_all("generate_files", config)
 
                 print(
@@ -390,7 +390,7 @@ class Config(object):
             server_name=server_name,
             generate_secrets=False,
         )
-        config = yaml.load(config_string)
+        config = yaml.safe_load(config_string)
         config.pop("log_config")
         config.update(specified_config)
 
diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py
index 9e64c76544..7e89d345d8 100644
--- a/synapse/config/appservice.py
+++ b/synapse/config/appservice.py
@@ -68,7 +68,7 @@ def load_appservices(hostname, config_files):
         try:
             with open(config_file, 'r') as f:
                 appservice = _load_appservice(
-                    hostname, yaml.load(f), config_file
+                    hostname, yaml.safe_load(f), config_file
                 )
                 if appservice.id in seen_ids:
                     raise ConfigError(
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 464c28c2d9..c1febbe9d3 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -195,7 +195,7 @@ def setup_logging(config, use_worker_options=False):
     else:
         def load_log_config():
             with open(log_config, 'r') as f:
-                logging.config.dictConfig(yaml.load(f))
+                logging.config.dictConfig(yaml.safe_load(f))
 
         def sighup(*args):
             # it might be better to use a file watcher or something for this.
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 8e2be218e2..e424c40fdf 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -51,9 +51,10 @@ class TransportLayerClient(object):
         logger.debug("get_room_state dest=%s, room=%s",
                      destination, room_id)
 
-        path = _create_v1_path("/state/%s/", room_id)
+        path = _create_v1_path("/state/%s", room_id)
         return self.client.get_json(
             destination, path=path, args={"event_id": event_id},
+            try_trailing_slash_on_400=True,
         )
 
     @log_function
@@ -73,9 +74,10 @@ class TransportLayerClient(object):
         logger.debug("get_room_state_ids dest=%s, room=%s",
                      destination, room_id)
 
-        path = _create_v1_path("/state_ids/%s/", room_id)
+        path = _create_v1_path("/state_ids/%s", room_id)
         return self.client.get_json(
             destination, path=path, args={"event_id": event_id},
+            try_trailing_slash_on_400=True,
         )
 
     @log_function
@@ -95,8 +97,11 @@ class TransportLayerClient(object):
         logger.debug("get_pdu dest=%s, event_id=%s",
                      destination, event_id)
 
-        path = _create_v1_path("/event/%s/", event_id)
-        return self.client.get_json(destination, path=path, timeout=timeout)
+        path = _create_v1_path("/event/%s", event_id)
+        return self.client.get_json(
+            destination, path=path, timeout=timeout,
+            try_trailing_slash_on_400=True,
+        )
 
     @log_function
     def backfill(self, destination, room_id, event_tuples, limit):
@@ -121,7 +126,7 @@ class TransportLayerClient(object):
             # TODO: raise?
             return
 
-        path = _create_v1_path("/backfill/%s/", room_id)
+        path = _create_v1_path("/backfill/%s", room_id)
 
         args = {
             "v": event_tuples,
@@ -132,6 +137,7 @@ class TransportLayerClient(object):
             destination,
             path=path,
             args=args,
+            try_trailing_slash_on_400=True,
         )
 
     @defer.inlineCallbacks
@@ -167,7 +173,7 @@ class TransportLayerClient(object):
         # generated by the json_data_callback.
         json_data = transaction.get_dict()
 
-        path = _create_v1_path("/send/%s/", transaction.transaction_id)
+        path = _create_v1_path("/send/%s", transaction.transaction_id)
 
         response = yield self.client.put_json(
             transaction.destination,
@@ -176,6 +182,7 @@ class TransportLayerClient(object):
             json_data_callback=json_data_callback,
             long_retries=True,
             backoff_on_404=True,  # If we get a 404 the other side has gone
+            try_trailing_slash_on_400=True,
         )
 
         defer.returnValue(response)
@@ -959,7 +966,7 @@ def _create_v1_path(path, *args):
 
     Example:
 
-        _create_v1_path("/event/%s/", event_id)
+        _create_v1_path("/event/%s", event_id)
 
     Args:
         path (str): String template for the path
@@ -980,7 +987,7 @@ def _create_v2_path(path, *args):
 
     Example:
 
-        _create_v2_path("/event/%s/", event_id)
+        _create_v2_path("/event/%s", event_id)
 
     Args:
         path (str): String template for the path
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 96d680a5ad..efb6bdca48 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -312,7 +312,7 @@ class BaseFederationServlet(object):
 
 
 class FederationSendServlet(BaseFederationServlet):
-    PATH = "/send/(?P<transaction_id>[^/]*)/"
+    PATH = "/send/(?P<transaction_id>[^/]*)/?"
 
     def __init__(self, handler, server_name, **kwargs):
         super(FederationSendServlet, self).__init__(
@@ -378,7 +378,7 @@ class FederationSendServlet(BaseFederationServlet):
 
 
 class FederationEventServlet(BaseFederationServlet):
-    PATH = "/event/(?P<event_id>[^/]*)/"
+    PATH = "/event/(?P<event_id>[^/]*)/?"
 
     # This is when someone asks for a data item for a given server data_id pair.
     def on_GET(self, origin, content, query, event_id):
@@ -386,7 +386,7 @@ class FederationEventServlet(BaseFederationServlet):
 
 
 class FederationStateServlet(BaseFederationServlet):
-    PATH = "/state/(?P<context>[^/]*)/"
+    PATH = "/state/(?P<context>[^/]*)/?"
 
     # This is when someone asks for all data for a given context.
     def on_GET(self, origin, content, query, context):
@@ -398,7 +398,7 @@ class FederationStateServlet(BaseFederationServlet):
 
 
 class FederationStateIdsServlet(BaseFederationServlet):
-    PATH = "/state_ids/(?P<room_id>[^/]*)/"
+    PATH = "/state_ids/(?P<room_id>[^/]*)/?"
 
     def on_GET(self, origin, content, query, room_id):
         return self.handler.on_state_ids_request(
@@ -409,7 +409,7 @@ class FederationStateIdsServlet(BaseFederationServlet):
 
 
 class FederationBackfillServlet(BaseFederationServlet):
-    PATH = "/backfill/(?P<context>[^/]*)/"
+    PATH = "/backfill/(?P<context>[^/]*)/?"
 
     def on_GET(self, origin, content, query, context):
         versions = [x.decode('ascii') for x in query[b"v"]]
@@ -1080,7 +1080,7 @@ class FederationGroupsCategoriesServlet(BaseFederationServlet):
     """Get all categories for a group
     """
     PATH = (
-        "/groups/(?P<group_id>[^/]*)/categories/"
+        "/groups/(?P<group_id>[^/]*)/categories/?"
     )
 
     @defer.inlineCallbacks
@@ -1150,7 +1150,7 @@ class FederationGroupsRolesServlet(BaseFederationServlet):
     """Get roles in a group
     """
     PATH = (
-        "/groups/(?P<group_id>[^/]*)/roles/"
+        "/groups/(?P<group_id>[^/]*)/roles/?"
     )
 
     @defer.inlineCallbacks
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index caad9ae2dd..4544de821d 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -746,6 +746,42 @@ class AuthHandler(BaseHandler):
         )
 
     @defer.inlineCallbacks
+    def check_password_provider_3pid(self, medium, address, password):
+        """Check if a password provider is able to validate a thirdparty login
+
+        Args:
+            medium (str): The medium of the 3pid (ex. email).
+            address (str): The address of the 3pid (ex. jdoe@example.com).
+            password (str): The password of the user.
+
+        Returns:
+            Deferred[(str|None, func|None)]: A tuple of `(user_id,
+            callback)`. If authentication is successful, `user_id` is a `str`
+            containing the authenticated, canonical user ID. `callback` is
+            then either a function to be later run after the server has
+            completed login/registration, or `None`. If authentication was
+            unsuccessful, `user_id` and `callback` are both `None`.
+        """
+        for provider in self.password_providers:
+            if hasattr(provider, "check_3pid_auth"):
+                # This function is able to return a deferred that either
+                # resolves None, meaning authentication failure, or upon
+                # success, to a str (which is the user_id) or a tuple of
+                # (user_id, callback_func), where callback_func should be run
+                # after we've finished everything else
+                result = yield provider.check_3pid_auth(
+                    medium, address, password,
+                )
+                if result:
+                    # Check if the return value is a str or a tuple
+                    if isinstance(result, str):
+                        # If it's a str, set callback function to None
+                        result = (result, None)
+                    defer.returnValue(result)
+
+        defer.returnValue((None, None))
+
+    @defer.inlineCallbacks
     def _check_local_password(self, user_id, password):
         """Authenticate a user against the local password database.
 
@@ -756,7 +792,8 @@ class AuthHandler(BaseHandler):
             user_id (unicode): complete @user:id
             password (unicode): the provided password
         Returns:
-            (unicode) the canonical_user_id, or None if unknown user / bad password
+            Deferred[unicode] the canonical_user_id, or Deferred[None] if
+                unknown user/bad password
 
         Raises:
             LimitExceededError if the ratelimiter's login requests count for this
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index f772e62c28..d883e98381 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -19,7 +19,7 @@ import random
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, Membership
-from synapse.api.errors import AuthError
+from synapse.api.errors import AuthError, SynapseError
 from synapse.events import EventBase
 from synapse.events.utils import serialize_event
 from synapse.types import UserID
@@ -61,6 +61,11 @@ class EventStreamHandler(BaseHandler):
         If `only_keys` is not None, events from keys will be sent down.
         """
 
+        if room_id:
+            blocked = yield self.store.is_room_blocked(room_id)
+            if blocked:
+                raise SynapseError(403, "This room has been blocked on this server")
+
         # send any outstanding server notices to the user.
         yield self._server_notices_sender.on_user_syncing(auth_user_id)
 
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 563bb3cea3..7dfae78db0 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -18,7 +18,7 @@ import logging
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, Membership
-from synapse.api.errors import AuthError, Codes
+from synapse.api.errors import AuthError, Codes, SynapseError
 from synapse.events.utils import serialize_event
 from synapse.events.validator import EventValidator
 from synapse.handlers.presence import format_user_presence_state
@@ -262,6 +262,10 @@ class InitialSyncHandler(BaseHandler):
             A JSON serialisable dict with the snapshot of the room.
         """
 
+        blocked = yield self.store.is_room_blocked(room_id)
+        if blocked:
+            raise SynapseError(403, "This room has been blocked on this server")
+
         user_id = requester.user.to_string()
 
         membership, member_event_id = yield self._check_in_room_or_world_readable(
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 7b8c77ba4d..2df2eaf609 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -233,8 +233,14 @@ class BaseProfileHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def set_displayname(self, target_user, requester, new_displayname, by_admin=False):
-        """target_user is the UserID whose displayname is to be changed;
-        requester is the authenticated user attempting to make this change."""
+        """Set the displayname of a user
+
+        Args:
+            target_user (UserID): the user whose displayname is to be changed.
+            requester (Requester): The user attempting to make this change.
+            new_displayname (str): The displayname to give this user.
+            by_admin (bool): Whether this change was made by an administrator.
+        """
         if not self.hs.is_mine(target_user):
             raise SynapseError(400, "User is not hosted on this Home Server")
 
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index b0468cd3d5..f4745614f1 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -172,7 +172,7 @@ class RegistrationHandler(BaseHandler):
               api.constants.UserTypes, or None for a normal user.
             default_display_name (unicode|None): if set, the new user's displayname
               will be set to this. Defaults to 'localpart'.
-            address (str|None): the IP address used to perform the regitration.
+            address (str|None): the IP address used to perform the registration.
         Returns:
             A tuple of (user_id, access_token).
         Raises:
@@ -719,7 +719,7 @@ class RegistrationHandler(BaseHandler):
             admin (boolean): is an admin user?
             user_type (str|None): type of user. One of the values from
                 api.constants.UserTypes, or None for a normal user.
-            address (str|None): the IP address used to perform the regitration.
+            address (str|None): the IP address used to perform the registration.
 
         Returns:
             Deferred
@@ -817,9 +817,9 @@ class RegistrationHandler(BaseHandler):
             access_token (str|None): The access token of the newly logged in
                 device, or None if `inhibit_login` enabled.
             bind_email (bool): Whether to bind the email with the identity
-                server
+                server.
             bind_msisdn (bool): Whether to bind the msisdn with the identity
-                server
+                server.
         """
         if self.hs.config.worker_app:
             yield self._post_registration_client(
@@ -861,7 +861,7 @@ class RegistrationHandler(BaseHandler):
         """A user consented to the terms on registration
 
         Args:
-            user_id (str): The user ID that consented
+            user_id (str): The user ID that consented.
             consent_version (str): version of the policy the user has
                 consented to.
         """
diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py
new file mode 100644
index 0000000000..b268bbcb2c
--- /dev/null
+++ b/synapse/handlers/state_deltas.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+logger = logging.getLogger(__name__)
+
+
+class StateDeltasHandler(object):
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def _get_key_change(self, prev_event_id, event_id, key_name, public_value):
+        """Given two events check if the `key_name` field in content changed
+        from not matching `public_value` to doing so.
+
+        For example, check if `history_visibility` (`key_name`) changed from
+        `shared` to `world_readable` (`public_value`).
+
+        Returns:
+            None if the field in the events either both match `public_value`
+            or if neither do, i.e. there has been no change.
+            True if it didnt match `public_value` but now does
+            False if it did match `public_value` but now doesn't
+        """
+        prev_event = None
+        event = None
+        if prev_event_id:
+            prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
+
+        if event_id:
+            event = yield self.store.get_event(event_id, allow_none=True)
+
+        if not event and not prev_event:
+            logger.debug("Neither event exists: %r %r", prev_event_id, event_id)
+            defer.returnValue(None)
+
+        prev_value = None
+        value = None
+
+        if prev_event:
+            prev_value = prev_event.content.get(key_name)
+
+        if event:
+            value = event.content.get(key_name)
+
+        logger.debug("prev_value: %r -> value: %r", prev_value, value)
+
+        if value == public_value and prev_value != public_value:
+            defer.returnValue(True)
+        elif value != public_value and prev_value == public_value:
+            defer.returnValue(False)
+        else:
+            defer.returnValue(None)
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 7dc0e236e7..b689979b4b 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -21,6 +21,7 @@ from twisted.internet import defer
 
 import synapse.metrics
 from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.handlers.state_deltas import StateDeltasHandler
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage.roommember import ProfileInfo
 from synapse.types import get_localpart_from_id
@@ -29,7 +30,7 @@ from synapse.util.metrics import Measure
 logger = logging.getLogger(__name__)
 
 
-class UserDirectoryHandler(object):
+class UserDirectoryHandler(StateDeltasHandler):
     """Handles querying of and keeping updated the user_directory.
 
     N.B.: ASSUMES IT IS THE ONLY THING THAT MODIFIES THE USER DIRECTORY
@@ -41,6 +42,8 @@ class UserDirectoryHandler(object):
     """
 
     def __init__(self, hs):
+        super(UserDirectoryHandler, self).__init__(hs)
+
         self.store = hs.get_datastore()
         self.state = hs.get_state_handler()
         self.server_name = hs.hostname
@@ -360,7 +363,7 @@ class UserDirectoryHandler(object):
 
     @defer.inlineCallbacks
     def _handle_remove_user(self, room_id, user_id):
-        """Called when we might need to remove user to directory
+        """Called when we might need to remove user from directory
 
         Args:
             room_id (str): room_id that user left or stopped being public that
@@ -402,47 +405,3 @@ class UserDirectoryHandler(object):
 
         if prev_name != new_name or prev_avatar != new_avatar:
             yield self.store.update_profile_in_user_dir(user_id, new_name, new_avatar)
-
-    @defer.inlineCallbacks
-    def _get_key_change(self, prev_event_id, event_id, key_name, public_value):
-        """Given two events check if the `key_name` field in content changed
-        from not matching `public_value` to doing so.
-
-        For example, check if `history_visibility` (`key_name`) changed from
-        `shared` to `world_readable` (`public_value`).
-
-        Returns:
-            None if the field in the events either both match `public_value`
-            or if neither do, i.e. there has been no change.
-            True if it didnt match `public_value` but now does
-            False if it did match `public_value` but now doesn't
-        """
-        prev_event = None
-        event = None
-        if prev_event_id:
-            prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
-
-        if event_id:
-            event = yield self.store.get_event(event_id, allow_none=True)
-
-        if not event and not prev_event:
-            logger.debug("Neither event exists: %r %r", prev_event_id, event_id)
-            defer.returnValue(None)
-
-        prev_value = None
-        value = None
-
-        if prev_event:
-            prev_value = prev_event.content.get(key_name)
-
-        if event:
-            value = event.content.get(key_name)
-
-        logger.debug("prev_value: %r -> value: %r", prev_value, value)
-
-        if value == public_value and prev_value != public_value:
-            defer.returnValue(True)
-        elif value != public_value and prev_value == public_value:
-            defer.returnValue(False)
-        else:
-            defer.returnValue(None)
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 1682c9af13..ff63d0b2a8 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -189,6 +189,58 @@ class MatrixFederationHttpClient(object):
         self._cooperator = Cooperator(scheduler=schedule)
 
     @defer.inlineCallbacks
+    def _send_request_with_optional_trailing_slash(
+        self,
+        request,
+        try_trailing_slash_on_400=False,
+        **send_request_args
+    ):
+        """Wrapper for _send_request which can optionally retry the request
+        upon receiving a combination of a 400 HTTP response code and a
+        'M_UNRECOGNIZED' errcode. This is a workaround for Synapse <= v0.99.3
+        due to #3622.
+
+        Args:
+            request (MatrixFederationRequest): details of request to be sent
+            try_trailing_slash_on_400 (bool): Whether on receiving a 400
+                'M_UNRECOGNIZED' from the server to retry the request with a
+                trailing slash appended to the request path.
+            send_request_args (Dict): A dictionary of arguments to pass to
+                `_send_request()`.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+
+        Returns:
+            Deferred[Dict]: Parsed JSON response body.
+        """
+        try:
+            response = yield self._send_request(
+                request, **send_request_args
+            )
+        except HttpResponseException as e:
+            # Received an HTTP error > 300. Check if it meets the requirements
+            # to retry with a trailing slash
+            if not try_trailing_slash_on_400:
+                raise
+
+            if e.code != 400 or e.to_synapse_error().errcode != "M_UNRECOGNIZED":
+                raise
+
+            # Retry with a trailing slash if we received a 400 with
+            # 'M_UNRECOGNIZED' which some endpoints can return when omitting a
+            # trailing slash on Synapse <= v0.99.3.
+            logger.info("Retrying request with trailing slash")
+            request.path += "/"
+
+            response = yield self._send_request(
+                request, **send_request_args
+            )
+
+        defer.returnValue(response)
+
+    @defer.inlineCallbacks
     def _send_request(
         self,
         request,
@@ -196,7 +248,7 @@ class MatrixFederationHttpClient(object):
         timeout=None,
         long_retries=False,
         ignore_backoff=False,
-        backoff_on_404=False
+        backoff_on_404=False,
     ):
         """
         Sends a request to the given server.
@@ -473,7 +525,8 @@ class MatrixFederationHttpClient(object):
                  json_data_callback=None,
                  long_retries=False, timeout=None,
                  ignore_backoff=False,
-                 backoff_on_404=False):
+                 backoff_on_404=False,
+                 try_trailing_slash_on_400=False):
         """ Sends the specifed json data using PUT
 
         Args:
@@ -493,7 +546,12 @@ class MatrixFederationHttpClient(object):
                 and try the request anyway.
             backoff_on_404 (bool): True if we should count a 404 response as
                 a failure of the server (and should therefore back off future
-                requests)
+                requests).
+            try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED
+                response we should try appending a trailing slash to the end
+                of the request. Workaround for #3622 in Synapse <= v0.99.3. This
+                will be attempted before backing off if backing off has been
+                enabled.
 
         Returns:
             Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
@@ -509,7 +567,6 @@ class MatrixFederationHttpClient(object):
             RequestSendFailed: If there were problems connecting to the
                 remote, due to e.g. DNS failures, connection timeouts etc.
         """
-
         request = MatrixFederationRequest(
             method="PUT",
             destination=destination,
@@ -519,17 +576,19 @@ class MatrixFederationHttpClient(object):
             json=data,
         )
 
-        response = yield self._send_request(
+        response = yield self._send_request_with_optional_trailing_slash(
             request,
+            try_trailing_slash_on_400,
+            backoff_on_404=backoff_on_404,
+            ignore_backoff=ignore_backoff,
             long_retries=long_retries,
             timeout=timeout,
-            ignore_backoff=ignore_backoff,
-            backoff_on_404=backoff_on_404,
         )
 
         body = yield _handle_json_response(
             self.hs.get_reactor(), self.default_timeout, request, response,
         )
+
         defer.returnValue(body)
 
     @defer.inlineCallbacks
@@ -592,7 +651,8 @@ class MatrixFederationHttpClient(object):
 
     @defer.inlineCallbacks
     def get_json(self, destination, path, args=None, retry_on_dns_fail=True,
-                 timeout=None, ignore_backoff=False):
+                 timeout=None, ignore_backoff=False,
+                 try_trailing_slash_on_400=False):
         """ GETs some json from the given host homeserver and path
 
         Args:
@@ -606,6 +666,9 @@ class MatrixFederationHttpClient(object):
                 be retried.
             ignore_backoff (bool): true to ignore the historical backoff data
                 and try the request anyway.
+            try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED
+                response we should try appending a trailing slash to the end of
+                the request. Workaround for #3622 in Synapse <= v0.99.3.
         Returns:
             Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
             result will be the decoded JSON body.
@@ -631,16 +694,19 @@ class MatrixFederationHttpClient(object):
             query=args,
         )
 
-        response = yield self._send_request(
+        response = yield self._send_request_with_optional_trailing_slash(
             request,
+            try_trailing_slash_on_400,
+            backoff_on_404=False,
+            ignore_backoff=ignore_backoff,
             retry_on_dns_fail=retry_on_dns_fail,
             timeout=timeout,
-            ignore_backoff=ignore_backoff,
         )
 
         body = yield _handle_json_response(
             self.hs.get_reactor(), self.default_timeout, request, response,
         )
+
         defer.returnValue(body)
 
     @defer.inlineCallbacks
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index fc9a20ff59..235ce8334e 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -73,14 +73,26 @@ class ModuleApi(object):
         """
         return self._auth_handler.check_user_exists(user_id)
 
-    def register(self, localpart):
-        """Registers a new user with given localpart
+    @defer.inlineCallbacks
+    def register(self, localpart, displayname=None):
+        """Registers a new user with given localpart and optional
+           displayname.
+
+        Args:
+            localpart (str): The localpart of the new user.
+            displayname (str|None): The displayname of the new user. If None,
+                the user's displayname will default to `localpart`.
 
         Returns:
             Deferred: a 2-tuple of (user_id, access_token)
         """
+        # Register the user
         reg = self.hs.get_registration_handler()
-        return reg.register(localpart=localpart)
+        user_id, access_token = yield reg.register(
+            localpart=localpart, default_display_name=displayname,
+        )
+
+        defer.returnValue((user_id, access_token))
 
     @defer.inlineCallbacks
     def invalidate_access_token(self, access_token):
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 55630ba9a7..02e5bf6cc8 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -223,14 +223,25 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
             return
 
         # Now lets try and call on_<CMD_NAME> function
-        try:
-            run_as_background_process(
-                "replication-" + cmd.get_logcontext_id(),
-                getattr(self, "on_%s" % (cmd_name,)),
-                cmd,
-            )
-        except Exception:
-            logger.exception("[%s] Failed to handle line: %r", self.id(), line)
+        run_as_background_process(
+            "replication-" + cmd.get_logcontext_id(),
+            self.handle_command,
+            cmd,
+        )
+
+    def handle_command(self, cmd):
+        """Handle a command we have received over the replication stream.
+
+        By default delegates to on_<COMMAND>
+
+        Args:
+            cmd (synapse.replication.tcp.commands.Command): received command
+
+        Returns:
+            Deferred
+        """
+        handler = getattr(self, "on_%s" % (cmd.NAME,))
+        return handler(cmd)
 
     def close(self):
         logger.warn("[%s] Closing connection", self.id())
@@ -364,8 +375,11 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
             self.transport.unregisterProducer()
 
     def __str__(self):
+        addr = None
+        if self.transport:
+            addr = str(self.transport.getPeer())
         return "ReplicationConnection<name=%s,conn_id=%s,addr=%s>" % (
-            self.name, self.conn_id, self.addr,
+            self.name, self.conn_id, addr,
         )
 
     def id(self):
@@ -381,12 +395,11 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
     VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS
     VALID_OUTBOUND_COMMANDS = VALID_SERVER_COMMANDS
 
-    def __init__(self, server_name, clock, streamer, addr):
+    def __init__(self, server_name, clock, streamer):
         BaseReplicationStreamProtocol.__init__(self, clock)  # Old style class
 
         self.server_name = server_name
         self.streamer = streamer
-        self.addr = addr
 
         # The streams the client has subscribed to and is up to date with
         self.replication_streams = set()
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index 47cdf30bd3..7fc346c7b6 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -57,7 +57,6 @@ class ReplicationStreamProtocolFactory(Factory):
             self.server_name,
             self.clock,
             self.streamer,
-            addr
         )
 
 
diff --git a/synapse/replication/tcp/streams.py b/synapse/replication/tcp/streams.py
index c1e626be3f..e23084baae 100644
--- a/synapse/replication/tcp/streams.py
+++ b/synapse/replication/tcp/streams.py
@@ -23,7 +23,7 @@ Each stream is defined by the following information:
     current_token:      The function that returns the current token for the stream
     update_function:    The function that returns a list of updates between two tokens
 """
-
+import itertools
 import logging
 from collections import namedtuple
 
@@ -195,8 +195,8 @@ class Stream(object):
                 limit=MAX_EVENTS_BEHIND + 1,
             )
 
-            if len(rows) >= MAX_EVENTS_BEHIND:
-                raise Exception("stream %s has fallen behind" % (self.NAME))
+            # never turn more than MAX_EVENTS_BEHIND + 1 into updates.
+            rows = itertools.islice(rows, MAX_EVENTS_BEHIND + 1)
         else:
             rows = yield self.update_function(
                 from_token, current_token,
@@ -204,6 +204,11 @@ class Stream(object):
 
         updates = [(row[0], self.ROW_TYPE(*row[1:])) for row in rows]
 
+        # check we didn't get more rows than the limit.
+        # doing it like this allows the update_function to be a generator.
+        if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND:
+            raise Exception("stream %s has fallen behind" % (self.NAME))
+
         defer.returnValue((updates, current_token))
 
     def current_token(self):
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 8d56effbb8..5180e9eaf1 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -201,6 +201,24 @@ class LoginRestServlet(ClientV1RestServlet):
                 # We store all email addreses as lowercase in the DB.
                 # (See add_threepid in synapse/handlers/auth.py)
                 address = address.lower()
+
+            # Check for login providers that support 3pid login types
+            canonical_user_id, callback_3pid = (
+                yield self.auth_handler.check_password_provider_3pid(
+                    medium,
+                    address,
+                    login_submission["password"],
+                )
+            )
+            if canonical_user_id:
+                # Authentication through password provider and 3pid succeeded
+                result = yield self._register_device_with_callback(
+                    canonical_user_id, login_submission, callback_3pid,
+                )
+                defer.returnValue(result)
+
+            # No password providers were able to handle this 3pid
+            # Check local store
             user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
                 medium, address,
             )
@@ -223,20 +241,43 @@ class LoginRestServlet(ClientV1RestServlet):
         if "user" not in identifier:
             raise SynapseError(400, "User identifier is missing 'user' key")
 
-        auth_handler = self.auth_handler
-        canonical_user_id, callback = yield auth_handler.validate_login(
+        canonical_user_id, callback = yield self.auth_handler.validate_login(
             identifier["user"],
             login_submission,
         )
 
+        result = yield self._register_device_with_callback(
+            canonical_user_id, login_submission, callback,
+        )
+        defer.returnValue(result)
+
+    @defer.inlineCallbacks
+    def _register_device_with_callback(
+        self,
+        user_id,
+        login_submission,
+        callback=None,
+    ):
+        """ Registers a device with a given user_id. Optionally run a callback
+        function after registration has completed.
+
+        Args:
+            user_id (str): ID of the user to register.
+            login_submission (dict): Dictionary of login information.
+            callback (func|None): Callback function to run after registration.
+
+        Returns:
+            result (Dict[str,str]): Dictionary of account information after
+                successful registration.
+        """
         device_id = login_submission.get("device_id")
         initial_display_name = login_submission.get("initial_device_display_name")
         device_id, access_token = yield self.registration_handler.register_device(
-            canonical_user_id, device_id, initial_display_name,
+            user_id, device_id, initial_display_name,
         )
 
         result = {
-            "user_id": canonical_user_id,
+            "user_id": user_id,
             "access_token": access_token,
             "home_server": self.hs.hostname,
             "device_id": device_id,
diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py
index 0fd1ccc40a..89a1f7e3d7 100644
--- a/synapse/storage/receipts.py
+++ b/synapse/storage/receipts.py
@@ -301,7 +301,9 @@ class ReceiptsWorkerStore(SQLBaseStore):
                 args.append(limit)
             txn.execute(sql, args)
 
-            return txn.fetchall()
+            return (
+                r[0:5] + (json.loads(r[5]), ) for r in txn
+            )
         return self.runInteraction(
             "get_all_updated_receipts", get_all_updated_receipts_txn
         )
diff --git a/synapse/storage/state_deltas.py b/synapse/storage/state_deltas.py
new file mode 100644
index 0000000000..57bc45cdb9
--- /dev/null
+++ b/synapse/storage/state_deltas.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 Vector Creations Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from synapse.storage._base import SQLBaseStore
+
+logger = logging.getLogger(__name__)
+
+
+class StateDeltasStore(SQLBaseStore):
+
+    def get_current_state_deltas(self, prev_stream_id):
+        prev_stream_id = int(prev_stream_id)
+        if not self._curr_state_delta_stream_cache.has_any_entity_changed(prev_stream_id):
+            return []
+
+        def get_current_state_deltas_txn(txn):
+            # First we calculate the max stream id that will give us less than
+            # N results.
+            # We arbitarily limit to 100 stream_id entries to ensure we don't
+            # select toooo many.
+            sql = """
+                SELECT stream_id, count(*)
+                FROM current_state_delta_stream
+                WHERE stream_id > ?
+                GROUP BY stream_id
+                ORDER BY stream_id ASC
+                LIMIT 100
+            """
+            txn.execute(sql, (prev_stream_id,))
+
+            total = 0
+            max_stream_id = prev_stream_id
+            for max_stream_id, count in txn:
+                total += count
+                if total > 100:
+                    # We arbitarily limit to 100 entries to ensure we don't
+                    # select toooo many.
+                    break
+
+            # Now actually get the deltas
+            sql = """
+                SELECT stream_id, room_id, type, state_key, event_id, prev_event_id
+                FROM current_state_delta_stream
+                WHERE ? < stream_id AND stream_id <= ?
+                ORDER BY stream_id ASC
+            """
+            txn.execute(sql, (prev_stream_id, max_stream_id,))
+            return self.cursor_to_dict(txn)
+
+        return self.runInteraction(
+            "get_current_state_deltas", get_current_state_deltas_txn
+        )
+
+    def get_max_stream_id_in_current_state_deltas(self):
+        return self._simple_select_one_onecol(
+            table="current_state_delta_stream",
+            keyvalues={},
+            retcol="COALESCE(MAX(stream_id), -1)",
+            desc="get_max_stream_id_in_current_state_deltas",
+        )
diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py
index d360e857d1..4d60a5726f 100644
--- a/synapse/storage/user_directory.py
+++ b/synapse/storage/user_directory.py
@@ -22,6 +22,7 @@ from synapse.api.constants import EventTypes, JoinRules
 from synapse.storage.background_updates import BackgroundUpdateStore
 from synapse.storage.engines import PostgresEngine, Sqlite3Engine
 from synapse.storage.state import StateFilter
+from synapse.storage.state_deltas import StateDeltasStore
 from synapse.types import get_domain_from_id, get_localpart_from_id
 from synapse.util.caches.descriptors import cached
 
@@ -31,7 +32,7 @@ logger = logging.getLogger(__name__)
 TEMP_TABLE = "_temp_populate_user_directory"
 
 
-class UserDirectoryStore(BackgroundUpdateStore):
+class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
 
     # How many records do we calculate before sending it to
     # add_users_who_share_private_rooms?
@@ -134,7 +135,12 @@ class UserDirectoryStore(BackgroundUpdateStore):
 
     @defer.inlineCallbacks
     def _populate_user_directory_process_rooms(self, progress, batch_size):
-
+        """
+        Args:
+            progress (dict)
+            batch_size (int): Maximum number of state events to process
+                per cycle.
+        """
         state = self.hs.get_state_handler()
 
         # If we don't have progress filed, delete everything.
@@ -142,13 +148,14 @@ class UserDirectoryStore(BackgroundUpdateStore):
             yield self.delete_all_from_user_dir()
 
         def _get_next_batch(txn):
+            # Only fetch 250 rooms, so we don't fetch too many at once, even
+            # if those 250 rooms have less than batch_size state events.
             sql = """
-                SELECT room_id FROM %s
+                SELECT room_id, events FROM %s
                 ORDER BY events DESC
-                LIMIT %s
+                LIMIT 250
             """ % (
                 TEMP_TABLE + "_rooms",
-                str(batch_size),
             )
             txn.execute(sql)
             rooms_to_work_on = txn.fetchall()
@@ -156,8 +163,6 @@ class UserDirectoryStore(BackgroundUpdateStore):
             if not rooms_to_work_on:
                 return None
 
-            rooms_to_work_on = [x[0] for x in rooms_to_work_on]
-
             # Get how many are left to process, so we can give status on how
             # far we are in processing
             txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms")
@@ -179,7 +184,9 @@ class UserDirectoryStore(BackgroundUpdateStore):
             % (len(rooms_to_work_on), progress["remaining"])
         )
 
-        for room_id in rooms_to_work_on:
+        processed_event_count = 0
+
+        for room_id, event_count in rooms_to_work_on:
             is_in_room = yield self.is_host_joined(room_id, self.server_name)
 
             if is_in_room:
@@ -246,7 +253,13 @@ class UserDirectoryStore(BackgroundUpdateStore):
                 progress,
             )
 
-        defer.returnValue(len(rooms_to_work_on))
+            processed_event_count += event_count
+
+            if processed_event_count > batch_size:
+                # Don't process any more rooms, we've hit our batch size.
+                defer.returnValue(processed_event_count)
+
+        defer.returnValue(processed_event_count)
 
     @defer.inlineCallbacks
     def _populate_user_directory_process_users(self, progress, batch_size):
@@ -488,16 +501,6 @@ class UserDirectoryStore(BackgroundUpdateStore):
 
         defer.returnValue(user_ids)
 
-    @defer.inlineCallbacks
-    def get_all_local_users(self):
-        """Get all local users
-        """
-        sql = """
-            SELECT name FROM users
-        """
-        rows = yield self._execute("get_all_local_users", None, sql)
-        defer.returnValue([name for name, in rows])
-
     def add_users_who_share_private_room(self, room_id, user_id_tuples):
         """Insert entries into the users_who_share_private_rooms table. The first
         user should be a local user.
@@ -675,59 +678,6 @@ class UserDirectoryStore(BackgroundUpdateStore):
             desc="update_user_directory_stream_pos",
         )
 
-    def get_current_state_deltas(self, prev_stream_id):
-        prev_stream_id = int(prev_stream_id)
-        if not self._curr_state_delta_stream_cache.has_any_entity_changed(
-            prev_stream_id
-        ):
-            return []
-
-        def get_current_state_deltas_txn(txn):
-            # First we calculate the max stream id that will give us less than
-            # N results.
-            # We arbitarily limit to 100 stream_id entries to ensure we don't
-            # select toooo many.
-            sql = """
-                SELECT stream_id, count(*)
-                FROM current_state_delta_stream
-                WHERE stream_id > ?
-                GROUP BY stream_id
-                ORDER BY stream_id ASC
-                LIMIT 100
-            """
-            txn.execute(sql, (prev_stream_id,))
-
-            total = 0
-            max_stream_id = prev_stream_id
-            for max_stream_id, count in txn:
-                total += count
-                if total > 100:
-                    # We arbitarily limit to 100 entries to ensure we don't
-                    # select toooo many.
-                    break
-
-            # Now actually get the deltas
-            sql = """
-                SELECT stream_id, room_id, type, state_key, event_id, prev_event_id
-                FROM current_state_delta_stream
-                WHERE ? < stream_id AND stream_id <= ?
-                ORDER BY stream_id ASC
-            """
-            txn.execute(sql, (prev_stream_id, max_stream_id))
-            return self.cursor_to_dict(txn)
-
-        return self.runInteraction(
-            "get_current_state_deltas", get_current_state_deltas_txn
-        )
-
-    def get_max_stream_id_in_current_state_deltas(self):
-        return self._simple_select_one_onecol(
-            table="current_state_delta_stream",
-            keyvalues={},
-            retcol="COALESCE(MAX(stream_id), -1)",
-            desc="get_max_stream_id_in_current_state_deltas",
-        )
-
     @defer.inlineCallbacks
     def search_user_dir(self, user_id, search_term, limit):
         """Searches for users in directory
diff --git a/synctl b/synctl
index 816c898b36..07a68e6d85 100755
--- a/synctl
+++ b/synctl
@@ -164,7 +164,7 @@ def main():
         sys.exit(1)
 
     with open(configfile) as stream:
-        config = yaml.load(stream)
+        config = yaml.safe_load(stream)
 
     pidfile = config["pid_file"]
     cache_factor = config.get("synctl_cache_factor")
@@ -206,7 +206,7 @@ def main():
     workers = []
     for worker_configfile in worker_configfiles:
         with open(worker_configfile) as stream:
-            worker_config = yaml.load(stream)
+            worker_config = yaml.safe_load(stream)
         worker_app = worker_config["worker_app"]
         if worker_app == "synapse.app.homeserver":
             # We need to special case all of this to pick up options that may
diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index d5f1777093..6bfc1970ad 100644
--- a/tests/config/test_load.py
+++ b/tests/config/test_load.py
@@ -43,7 +43,7 @@ class ConfigLoadingTestCase(unittest.TestCase):
         self.generate_config()
 
         with open(self.file, "r") as f:
-            raw = yaml.load(f)
+            raw = yaml.safe_load(f)
         self.assertIn("macaroon_secret_key", raw)
 
         config = HomeServerConfig.load_config("", ["-c", self.file])
diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py
index 3dc2631523..47fffcfeb2 100644
--- a/tests/config/test_room_directory.py
+++ b/tests/config/test_room_directory.py
@@ -22,7 +22,7 @@ from tests import unittest
 
 class RoomDirectoryConfigTestCase(unittest.TestCase):
     def test_alias_creation_acl(self):
-        config = yaml.load("""
+        config = yaml.safe_load("""
         alias_creation_rules:
             - user_id: "*bob*"
               alias: "*"
@@ -74,7 +74,7 @@ class RoomDirectoryConfigTestCase(unittest.TestCase):
         ))
 
     def test_room_publish_acl(self):
-        config = yaml.load("""
+        config = yaml.safe_load("""
         alias_creation_rules: []
 
         room_list_publication_rules:
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 2217eb2a10..017ea0385e 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -22,8 +22,6 @@ from synapse.api.errors import ResourceLimitError, SynapseError
 from synapse.handlers.register import RegistrationHandler
 from synapse.types import RoomAlias, UserID, create_requester
 
-from tests.utils import default_config, setup_test_homeserver
-
 from .. import unittest
 
 
@@ -32,26 +30,23 @@ class RegistrationHandlers(object):
         self.registration_handler = RegistrationHandler(hs)
 
 
-class RegistrationTestCase(unittest.TestCase):
+class RegistrationTestCase(unittest.HomeserverTestCase):
     """ Tests the RegistrationHandler. """
 
-    @defer.inlineCallbacks
-    def setUp(self):
-        self.mock_distributor = Mock()
-        self.mock_distributor.declare("registered_user")
-        self.mock_captcha_client = Mock()
-
-        hs_config = default_config("test")
+    def make_homeserver(self, reactor, clock):
+        hs_config = self.default_config("test")
 
         # some of the tests rely on us having a user consent version
         hs_config.user_consent_version = "test_consent_version"
         hs_config.max_mau_value = 50
 
-        self.hs = yield setup_test_homeserver(
-            self.addCleanup,
-            config=hs_config,
-            expire_access_token=True,
-        )
+        hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True)
+        return hs
+
+    def prepare(self, reactor, clock, hs):
+        self.mock_distributor = Mock()
+        self.mock_distributor.declare("registered_user")
+        self.mock_captcha_client = Mock()
         self.macaroon_generator = Mock(
             generate_access_token=Mock(return_value='secret')
         )
@@ -63,136 +58,133 @@ class RegistrationTestCase(unittest.TestCase):
 
         self.requester = create_requester("@requester:test")
 
-    @defer.inlineCallbacks
     def test_user_is_created_and_logged_in_if_doesnt_exist(self):
         frank = UserID.from_string("@frank:test")
         user_id = frank.to_string()
         requester = create_requester(user_id)
-        result_user_id, result_token = yield self.handler.get_or_create_user(
-            requester, frank.localpart, "Frankie"
+        result_user_id, result_token = self.get_success(
+            self.handler.get_or_create_user(requester, frank.localpart, "Frankie")
         )
         self.assertEquals(result_user_id, user_id)
         self.assertTrue(result_token is not None)
         self.assertEquals(result_token, 'secret')
 
-    @defer.inlineCallbacks
     def test_if_user_exists(self):
         store = self.hs.get_datastore()
         frank = UserID.from_string("@frank:test")
-        yield store.register(
-            user_id=frank.to_string(),
-            token="jkv;g498752-43gj['eamb!-5",
-            password_hash=None,
+        self.get_success(
+            store.register(
+                user_id=frank.to_string(),
+                token="jkv;g498752-43gj['eamb!-5",
+                password_hash=None,
+            )
         )
         local_part = frank.localpart
         user_id = frank.to_string()
         requester = create_requester(user_id)
-        result_user_id, result_token = yield self.handler.get_or_create_user(
-            requester, local_part, None
+        result_user_id, result_token = self.get_success(
+            self.handler.get_or_create_user(requester, local_part, None)
         )
         self.assertEquals(result_user_id, user_id)
         self.assertTrue(result_token is not None)
 
-    @defer.inlineCallbacks
     def test_mau_limits_when_disabled(self):
         self.hs.config.limit_usage_by_mau = False
         # Ensure does not throw exception
-        yield self.handler.get_or_create_user(self.requester, 'a', "display_name")
+        self.get_success(
+            self.handler.get_or_create_user(self.requester, 'a', "display_name")
+        )
 
-    @defer.inlineCallbacks
     def test_get_or_create_user_mau_not_blocked(self):
         self.hs.config.limit_usage_by_mau = True
         self.store.count_monthly_users = Mock(
             return_value=defer.succeed(self.hs.config.max_mau_value - 1)
         )
         # Ensure does not throw exception
-        yield self.handler.get_or_create_user(self.requester, 'c', "User")
+        self.get_success(self.handler.get_or_create_user(self.requester, 'c', "User"))
 
-    @defer.inlineCallbacks
     def test_get_or_create_user_mau_blocked(self):
         self.hs.config.limit_usage_by_mau = True
         self.store.get_monthly_active_count = Mock(
             return_value=defer.succeed(self.lots_of_users)
         )
-        with self.assertRaises(ResourceLimitError):
-            yield self.handler.get_or_create_user(self.requester, 'b', "display_name")
+        self.get_failure(
+            self.handler.get_or_create_user(self.requester, 'b', "display_name"),
+            ResourceLimitError,
+        )
 
         self.store.get_monthly_active_count = Mock(
             return_value=defer.succeed(self.hs.config.max_mau_value)
         )
-        with self.assertRaises(ResourceLimitError):
-            yield self.handler.get_or_create_user(self.requester, 'b', "display_name")
+        self.get_failure(
+            self.handler.get_or_create_user(self.requester, 'b', "display_name"),
+            ResourceLimitError,
+        )
 
-    @defer.inlineCallbacks
     def test_register_mau_blocked(self):
         self.hs.config.limit_usage_by_mau = True
         self.store.get_monthly_active_count = Mock(
             return_value=defer.succeed(self.lots_of_users)
         )
-        with self.assertRaises(ResourceLimitError):
-            yield self.handler.register(localpart="local_part")
+        self.get_failure(
+            self.handler.register(localpart="local_part"), ResourceLimitError
+        )
 
         self.store.get_monthly_active_count = Mock(
             return_value=defer.succeed(self.hs.config.max_mau_value)
         )
-        with self.assertRaises(ResourceLimitError):
-            yield self.handler.register(localpart="local_part")
+        self.get_failure(
+            self.handler.register(localpart="local_part"), ResourceLimitError
+        )
 
-    @defer.inlineCallbacks
     def test_auto_create_auto_join_rooms(self):
         room_alias_str = "#room:test"
         self.hs.config.auto_join_rooms = [room_alias_str]
-        res = yield self.handler.register(localpart='jeff')
-        rooms = yield self.store.get_rooms_for_user(res[0])
+        res = self.get_success(self.handler.register(localpart='jeff'))
+        rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
         directory_handler = self.hs.get_handlers().directory_handler
         room_alias = RoomAlias.from_string(room_alias_str)
-        room_id = yield directory_handler.get_association(room_alias)
+        room_id = self.get_success(directory_handler.get_association(room_alias))
 
         self.assertTrue(room_id['room_id'] in rooms)
         self.assertEqual(len(rooms), 1)
 
-    @defer.inlineCallbacks
     def test_auto_create_auto_join_rooms_with_no_rooms(self):
         self.hs.config.auto_join_rooms = []
         frank = UserID.from_string("@frank:test")
-        res = yield self.handler.register(frank.localpart)
+        res = self.get_success(self.handler.register(frank.localpart))
         self.assertEqual(res[0], frank.to_string())
-        rooms = yield self.store.get_rooms_for_user(res[0])
+        rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
         self.assertEqual(len(rooms), 0)
 
-    @defer.inlineCallbacks
     def test_auto_create_auto_join_where_room_is_another_domain(self):
         self.hs.config.auto_join_rooms = ["#room:another"]
         frank = UserID.from_string("@frank:test")
-        res = yield self.handler.register(frank.localpart)
+        res = self.get_success(self.handler.register(frank.localpart))
         self.assertEqual(res[0], frank.to_string())
-        rooms = yield self.store.get_rooms_for_user(res[0])
+        rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
         self.assertEqual(len(rooms), 0)
 
-    @defer.inlineCallbacks
     def test_auto_create_auto_join_where_auto_create_is_false(self):
         self.hs.config.autocreate_auto_join_rooms = False
         room_alias_str = "#room:test"
         self.hs.config.auto_join_rooms = [room_alias_str]
-        res = yield self.handler.register(localpart='jeff')
-        rooms = yield self.store.get_rooms_for_user(res[0])
+        res = self.get_success(self.handler.register(localpart='jeff'))
+        rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
         self.assertEqual(len(rooms), 0)
 
-    @defer.inlineCallbacks
     def test_auto_create_auto_join_rooms_when_support_user_exists(self):
         room_alias_str = "#room:test"
         self.hs.config.auto_join_rooms = [room_alias_str]
 
         self.store.is_support_user = Mock(return_value=True)
-        res = yield self.handler.register(localpart='support')
-        rooms = yield self.store.get_rooms_for_user(res[0])
+        res = self.get_success(self.handler.register(localpart='support'))
+        rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
         self.assertEqual(len(rooms), 0)
         directory_handler = self.hs.get_handlers().directory_handler
         room_alias = RoomAlias.from_string(room_alias_str)
-        with self.assertRaises(SynapseError):
-            yield directory_handler.get_association(room_alias)
+        self.get_failure(directory_handler.get_association(room_alias), SynapseError)
 
-    @defer.inlineCallbacks
     def test_auto_create_auto_join_where_no_consent(self):
         """Test to ensure that the first user is not auto-joined to a room if
         they have not given general consent.
@@ -208,27 +200,27 @@ class RegistrationTestCase(unittest.TestCase):
         # (Messing with the internals of event_creation_handler is fragile
         # but can't see a better way to do this. One option could be to subclass
         # the test with custom config.)
-        event_creation_handler._block_events_without_consent_error = ("Error")
+        event_creation_handler._block_events_without_consent_error = "Error"
         event_creation_handler._consent_uri_builder = Mock()
         room_alias_str = "#room:test"
         self.hs.config.auto_join_rooms = [room_alias_str]
 
         # When:-
         #   * the user is registered and post consent actions are called
-        res = yield self.handler.register(localpart='jeff')
-        yield self.handler.post_consent_actions(res[0])
+        res = self.get_success(self.handler.register(localpart='jeff'))
+        self.get_success(self.handler.post_consent_actions(res[0]))
 
         # Then:-
         #   * Ensure that they have not been joined to the room
-        rooms = yield self.store.get_rooms_for_user(res[0])
+        rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
         self.assertEqual(len(rooms), 0)
 
-    @defer.inlineCallbacks
     def test_register_support_user(self):
-        res = yield self.handler.register(localpart='user', user_type=UserTypes.SUPPORT)
+        res = self.get_success(
+            self.handler.register(localpart='user', user_type=UserTypes.SUPPORT)
+        )
         self.assertTrue(self.store.is_support_user(res[0]))
 
-    @defer.inlineCallbacks
     def test_register_not_support_user(self):
-        res = yield self.handler.register(localpart='user')
+        res = self.get_success(self.handler.register(localpart='user'))
         self.assertFalse(self.store.is_support_user(res[0]))
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 13486930fb..6460cbc708 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -180,7 +180,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
         put_json = self.hs.get_http_client().put_json
         put_json.assert_called_once_with(
             "farm",
-            path="/_matrix/federation/v1/send/1000000/",
+            path="/_matrix/federation/v1/send/1000000",
             data=_expect_edu_transaction(
                 "m.typing",
                 content={
@@ -192,6 +192,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
             json_data_callback=ANY,
             long_retries=True,
             backoff_on_404=True,
+            try_trailing_slash_on_400=True,
         )
 
     def test_started_typing_remote_recv(self):
@@ -201,7 +202,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
 
         (request, channel) = self.make_request(
             "PUT",
-            "/_matrix/federation/v1/send/1000000/",
+            "/_matrix/federation/v1/send/1000000",
             _make_edu_transaction_json(
                 "m.typing",
                 content={
@@ -257,7 +258,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
         put_json = self.hs.get_http_client().put_json
         put_json.assert_called_once_with(
             "farm",
-            path="/_matrix/federation/v1/send/1000000/",
+            path="/_matrix/federation/v1/send/1000000",
             data=_expect_edu_transaction(
                 "m.typing",
                 content={
@@ -269,6 +270,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
             json_data_callback=ANY,
             long_retries=True,
             backoff_on_404=True,
+            try_trailing_slash_on_400=True,
         )
 
         self.assertEquals(self.event_source.get_current_key(), 1)
diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py
index b03b37affe..cd8e086f86 100644
--- a/tests/http/test_fedclient.py
+++ b/tests/http/test_fedclient.py
@@ -268,6 +268,105 @@ class FederationClientTests(HomeserverTestCase):
 
         self.assertIsInstance(f.value, TimeoutError)
 
+    def test_client_requires_trailing_slashes(self):
+        """
+        If a connection is made to a client but the client rejects it due to
+        requiring a trailing slash. We need to retry the request with a
+        trailing slash. Workaround for Synapse <= v0.99.3, explained in #3622.
+        """
+        d = self.cl.get_json(
+            "testserv:8008", "foo/bar", try_trailing_slash_on_400=True,
+        )
+
+        # Send the request
+        self.pump()
+
+        # there should have been a call to connectTCP
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (_host, _port, factory, _timeout, _bindAddress) = clients[0]
+
+        # complete the connection and wire it up to a fake transport
+        client = factory.buildProtocol(None)
+        conn = StringTransport()
+        client.makeConnection(conn)
+
+        # that should have made it send the request to the connection
+        self.assertRegex(conn.value(), b"^GET /foo/bar")
+
+        # Clear the original request data before sending a response
+        conn.clear()
+
+        # Send the HTTP response
+        client.dataReceived(
+            b"HTTP/1.1 400 Bad Request\r\n"
+            b"Content-Type: application/json\r\n"
+            b"Content-Length: 59\r\n"
+            b"\r\n"
+            b'{"errcode":"M_UNRECOGNIZED","error":"Unrecognized request"}'
+        )
+
+        # We should get another request with a trailing slash
+        self.assertRegex(conn.value(), b"^GET /foo/bar/")
+
+        # Send a happy response this time
+        client.dataReceived(
+            b"HTTP/1.1 200 OK\r\n"
+            b"Content-Type: application/json\r\n"
+            b"Content-Length: 2\r\n"
+            b"\r\n"
+            b'{}'
+        )
+
+        # We should get a successful response
+        r = self.successResultOf(d)
+        self.assertEqual(r, {})
+
+    def test_client_does_not_retry_on_400_plus(self):
+        """
+        Another test for trailing slashes but now test that we don't retry on
+        trailing slashes on a non-400/M_UNRECOGNIZED response.
+
+        See test_client_requires_trailing_slashes() for context.
+        """
+        d = self.cl.get_json(
+            "testserv:8008", "foo/bar", try_trailing_slash_on_400=True,
+        )
+
+        # Send the request
+        self.pump()
+
+        # there should have been a call to connectTCP
+        clients = self.reactor.tcpClients
+        self.assertEqual(len(clients), 1)
+        (_host, _port, factory, _timeout, _bindAddress) = clients[0]
+
+        # complete the connection and wire it up to a fake transport
+        client = factory.buildProtocol(None)
+        conn = StringTransport()
+        client.makeConnection(conn)
+
+        # that should have made it send the request to the connection
+        self.assertRegex(conn.value(), b"^GET /foo/bar")
+
+        # Clear the original request data before sending a response
+        conn.clear()
+
+        # Send the HTTP response
+        client.dataReceived(
+            b"HTTP/1.1 404 Not Found\r\n"
+            b"Content-Type: application/json\r\n"
+            b"Content-Length: 2\r\n"
+            b"\r\n"
+            b"{}"
+        )
+
+        # We should not get another request
+        self.assertEqual(conn.value(), b"")
+
+        # We should get a 404 failure response
+        self.failureResultOf(d)
+
     def test_client_sends_body(self):
         self.cl.post_json(
             "testserv:8008", "foo/bar", timeout=10000,
diff --git a/tests/replication/tcp/__init__.py b/tests/replication/tcp/__init__.py
new file mode 100644
index 0000000000..1453d04571
--- /dev/null
+++ b/tests/replication/tcp/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/replication/tcp/streams/__init__.py b/tests/replication/tcp/streams/__init__.py
new file mode 100644
index 0000000000..1453d04571
--- /dev/null
+++ b/tests/replication/tcp/streams/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/replication/tcp/streams/_base.py b/tests/replication/tcp/streams/_base.py
new file mode 100644
index 0000000000..38b368a972
--- /dev/null
+++ b/tests/replication/tcp/streams/_base.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.replication.tcp.commands import ReplicateCommand
+from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol
+from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
+
+from tests import unittest
+from tests.server import FakeTransport
+
+
+class BaseStreamTestCase(unittest.HomeserverTestCase):
+    """Base class for tests of the replication streams"""
+    def prepare(self, reactor, clock, hs):
+        # build a replication server
+        server_factory = ReplicationStreamProtocolFactory(self.hs)
+        self.streamer = server_factory.streamer
+        server = server_factory.buildProtocol(None)
+
+        # build a replication client, with a dummy handler
+        self.test_handler = TestReplicationClientHandler()
+        self.client = ClientReplicationStreamProtocol(
+            "client", "test", clock, self.test_handler
+        )
+
+        # wire them together
+        self.client.makeConnection(FakeTransport(server, reactor))
+        server.makeConnection(FakeTransport(self.client, reactor))
+
+    def replicate(self):
+        """Tell the master side of replication that something has happened, and then
+        wait for the replication to occur.
+        """
+        self.streamer.on_notifier_poke()
+        self.pump(0.1)
+
+    def replicate_stream(self, stream, token="NOW"):
+        """Make the client end a REPLICATE command to set up a subscription to a stream"""
+        self.client.send_command(ReplicateCommand(stream, token))
+
+
+class TestReplicationClientHandler(object):
+    """Drop-in for ReplicationClientHandler which just collects RDATA rows"""
+    def __init__(self):
+        self.received_rdata_rows = []
+
+    def get_streams_to_replicate(self):
+        return {}
+
+    def get_currently_syncing_users(self):
+        return []
+
+    def update_connection(self, connection):
+        pass
+
+    def finished_connecting(self):
+        pass
+
+    def on_rdata(self, stream_name, token, rows):
+        for r in rows:
+            self.received_rdata_rows.append(
+                (stream_name, token, r)
+            )
diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py
new file mode 100644
index 0000000000..9aa9dfe82e
--- /dev/null
+++ b/tests/replication/tcp/streams/test_receipts.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.replication.tcp.streams import ReceiptsStreamRow
+
+from tests.replication.tcp.streams._base import BaseStreamTestCase
+
+USER_ID = "@feeling:blue"
+ROOM_ID = "!room:blue"
+EVENT_ID = "$event:blue"
+
+
+class ReceiptsStreamTestCase(BaseStreamTestCase):
+    def test_receipt(self):
+        # make the client subscribe to the receipts stream
+        self.replicate_stream("receipts", "NOW")
+
+        # tell the master to send a new receipt
+        self.get_success(
+            self.hs.get_datastore().insert_receipt(
+                ROOM_ID, "m.read", USER_ID, [EVENT_ID], {"a": 1}
+            )
+        )
+        self.replicate()
+
+        # there should be one RDATA command
+        rdata_rows = self.test_handler.received_rdata_rows
+        self.assertEqual(1, len(rdata_rows))
+        self.assertEqual(rdata_rows[0][0], "receipts")
+        row = rdata_rows[0][2]  # type: ReceiptsStreamRow
+        self.assertEqual(ROOM_ID, row.room_id)
+        self.assertEqual("m.read", row.receipt_type)
+        self.assertEqual(USER_ID, row.user_id)
+        self.assertEqual(EVENT_ID, row.event_id)
+        self.assertEqual({"a": 1}, row.data)
diff --git a/tests/rest/client/v1/test_admin.py b/tests/rest/client/v1/test_admin.py
index 0caa4aa802..ef38473bd6 100644
--- a/tests/rest/client/v1/test_admin.py
+++ b/tests/rest/client/v1/test_admin.py
@@ -20,7 +20,7 @@ import json
 from mock import Mock
 
 from synapse.api.constants import UserTypes
-from synapse.rest.client.v1 import admin, login, room
+from synapse.rest.client.v1 import admin, events, login, room
 
 from tests import unittest
 
@@ -359,7 +359,9 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase):
     servlets = [
         admin.register_servlets,
         login.register_servlets,
+        events.register_servlets,
         room.register_servlets,
+        room.register_deprecated_servlets,
     ]
 
     def prepare(self, reactor, clock, hs):
@@ -426,3 +428,65 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase):
             self.store.get_users_in_room(room_id),
         )
         self.assertEqual([], users_in_room)
+
+    @unittest.DEBUG
+    def test_shutdown_room_block_peek(self):
+        """Test that a world_readable room can no longer be peeked into after
+        it has been shut down.
+        """
+
+        self.event_creation_handler._block_events_without_consent_error = None
+
+        room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_token)
+
+        # Enable world readable
+        url = "rooms/%s/state/m.room.history_visibility" % (room_id,)
+        request, channel = self.make_request(
+            "PUT",
+            url.encode('ascii'),
+            json.dumps({"history_visibility": "world_readable"}),
+            access_token=self.other_user_token,
+        )
+        self.render(request)
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Test that the admin can still send shutdown
+        url = "admin/shutdown_room/" + room_id
+        request, channel = self.make_request(
+            "POST",
+            url.encode('ascii'),
+            json.dumps({"new_room_user_id": self.admin_user}),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+
+        self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
+        # Assert we can no longer peek into the room
+        self._assert_peek(room_id, expect_code=403)
+
+    def _assert_peek(self, room_id, expect_code):
+        """Assert that the admin user can (or cannot) peek into the room.
+        """
+
+        url = "rooms/%s/initialSync" % (room_id,)
+        request, channel = self.make_request(
+            "GET",
+            url.encode('ascii'),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(
+            expect_code, int(channel.result["code"]), msg=channel.result["body"],
+        )
+
+        url = "events?timeout=0&room_id=" + room_id
+        request, channel = self.make_request(
+            "GET",
+            url.encode('ascii'),
+            access_token=self.admin_user_tok,
+        )
+        self.render(request)
+        self.assertEqual(
+            expect_code, int(channel.result["code"]), msg=channel.result["body"],
+        )
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index 9c401bf300..05b0143c42 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -18,136 +18,11 @@ import time
 
 import attr
 
-from twisted.internet import defer
-
 from synapse.api.constants import Membership
 
-from tests import unittest
 from tests.server import make_request, render
 
 
-class RestTestCase(unittest.TestCase):
-    """Contains extra helper functions to quickly and clearly perform a given
-    REST action, which isn't the focus of the test.
-
-    This subclass assumes there are mock_resource and auth_user_id attributes.
-    """
-
-    def __init__(self, *args, **kwargs):
-        super(RestTestCase, self).__init__(*args, **kwargs)
-        self.mock_resource = None
-        self.auth_user_id = None
-
-    @defer.inlineCallbacks
-    def create_room_as(self, room_creator, is_public=True, tok=None):
-        temp_id = self.auth_user_id
-        self.auth_user_id = room_creator
-        path = "/createRoom"
-        content = "{}"
-        if not is_public:
-            content = '{"visibility":"private"}'
-        if tok:
-            path = path + "?access_token=%s" % tok
-        (code, response) = yield self.mock_resource.trigger("POST", path, content)
-        self.assertEquals(200, code, msg=str(response))
-        self.auth_user_id = temp_id
-        defer.returnValue(response["room_id"])
-
-    @defer.inlineCallbacks
-    def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
-        yield self.change_membership(
-            room=room,
-            src=src,
-            targ=targ,
-            tok=tok,
-            membership=Membership.INVITE,
-            expect_code=expect_code,
-        )
-
-    @defer.inlineCallbacks
-    def join(self, room=None, user=None, expect_code=200, tok=None):
-        yield self.change_membership(
-            room=room,
-            src=user,
-            targ=user,
-            tok=tok,
-            membership=Membership.JOIN,
-            expect_code=expect_code,
-        )
-
-    @defer.inlineCallbacks
-    def leave(self, room=None, user=None, expect_code=200, tok=None):
-        yield self.change_membership(
-            room=room,
-            src=user,
-            targ=user,
-            tok=tok,
-            membership=Membership.LEAVE,
-            expect_code=expect_code,
-        )
-
-    @defer.inlineCallbacks
-    def change_membership(self, room, src, targ, membership, tok=None, expect_code=200):
-        temp_id = self.auth_user_id
-        self.auth_user_id = src
-
-        path = "/rooms/%s/state/m.room.member/%s" % (room, targ)
-        if tok:
-            path = path + "?access_token=%s" % tok
-
-        data = {"membership": membership}
-
-        (code, response) = yield self.mock_resource.trigger(
-            "PUT", path, json.dumps(data)
-        )
-        self.assertEquals(
-            expect_code,
-            code,
-            msg="Expected: %d, got: %d, resp: %r" % (expect_code, code, response),
-        )
-
-        self.auth_user_id = temp_id
-
-    @defer.inlineCallbacks
-    def register(self, user_id):
-        (code, response) = yield self.mock_resource.trigger(
-            "POST",
-            "/register",
-            json.dumps(
-                {"user": user_id, "password": "test", "type": "m.login.password"}
-            ),
-        )
-        self.assertEquals(200, code, msg=response)
-        defer.returnValue(response)
-
-    @defer.inlineCallbacks
-    def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200):
-        if txn_id is None:
-            txn_id = "m%s" % (str(time.time()))
-        if body is None:
-            body = "body_text_here"
-
-        path = "/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
-        content = '{"msgtype":"m.text","body":"%s"}' % body
-        if tok:
-            path = path + "?access_token=%s" % tok
-
-        (code, response) = yield self.mock_resource.trigger("PUT", path, content)
-        self.assertEquals(expect_code, code, msg=str(response))
-
-    def assert_dict(self, required, actual):
-        """Does a partial assert of a dict.
-
-        Args:
-            required (dict): The keys and value which MUST be in 'actual'.
-            actual (dict): The test result. Extra keys will not be checked.
-        """
-        for key in required:
-            self.assertEquals(
-                required[key], actual[key], msg="%s mismatch. %s" % (key, actual)
-            )
-
-
 @attr.s
 class RestHelper(object):
     """Contains extra helper functions to quickly and clearly perform a given
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index 3bd9f1e9c1..be73e718c2 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -1,3 +1,18 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018, 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 from mock import Mock
 
 from twisted.internet import defer
@@ -9,16 +24,18 @@ from synapse.server_notices.resource_limits_server_notices import (
 )
 
 from tests import unittest
-from tests.utils import default_config, setup_test_homeserver
 
 
-class TestResourceLimitsServerNotices(unittest.TestCase):
-    @defer.inlineCallbacks
-    def setUp(self):
-        hs_config = default_config(name="test")
+class TestResourceLimitsServerNotices(unittest.HomeserverTestCase):
+
+    def make_homeserver(self, reactor, clock):
+        hs_config = self.default_config("test")
         hs_config.server_notices_mxid = "@server:test"
 
-        self.hs = yield setup_test_homeserver(self.addCleanup, config=hs_config)
+        hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True)
+        return hs
+
+    def prepare(self, reactor, clock, hs):
         self.server_notices_sender = self.hs.get_server_notices_sender()
 
         # relying on [1] is far from ideal, but the only case where
@@ -53,23 +70,21 @@ class TestResourceLimitsServerNotices(unittest.TestCase):
         self._rlsn._store.get_tags_for_room = Mock(return_value={})
         self.hs.config.admin_contact = "mailto:user@test.com"
 
-    @defer.inlineCallbacks
     def test_maybe_send_server_notice_to_user_flag_off(self):
         """Tests cases where the flags indicate nothing to do"""
         # test hs disabled case
         self.hs.config.hs_disabled = True
 
-        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
 
         self._send_notice.assert_not_called()
         # Test when mau limiting disabled
         self.hs.config.hs_disabled = False
         self.hs.limit_usage_by_mau = False
-        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
 
         self._send_notice.assert_not_called()
 
-    @defer.inlineCallbacks
     def test_maybe_send_server_notice_to_user_remove_blocked_notice(self):
         """Test when user has blocked notice, but should have it removed"""
 
@@ -81,13 +96,14 @@ class TestResourceLimitsServerNotices(unittest.TestCase):
             return_value=defer.succeed({"123": mock_event})
         )
 
-        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
         # Would be better to check the content, but once == remove blocking event
         self._send_notice.assert_called_once()
 
-    @defer.inlineCallbacks
     def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self):
-        """Test when user has blocked notice, but notice ought to be there (NOOP)"""
+        """
+        Test when user has blocked notice, but notice ought to be there (NOOP)
+        """
         self._rlsn._auth.check_auth_blocking = Mock(
             side_effect=ResourceLimitError(403, 'foo')
         )
@@ -98,52 +114,49 @@ class TestResourceLimitsServerNotices(unittest.TestCase):
         self._rlsn._store.get_events = Mock(
             return_value=defer.succeed({"123": mock_event})
         )
-        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
 
         self._send_notice.assert_not_called()
 
-    @defer.inlineCallbacks
     def test_maybe_send_server_notice_to_user_add_blocked_notice(self):
-        """Test when user does not have blocked notice, but should have one"""
+        """
+        Test when user does not have blocked notice, but should have one
+        """
 
         self._rlsn._auth.check_auth_blocking = Mock(
             side_effect=ResourceLimitError(403, 'foo')
         )
-        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
 
         # Would be better to check contents, but 2 calls == set blocking event
         self.assertTrue(self._send_notice.call_count == 2)
 
-    @defer.inlineCallbacks
     def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self):
-        """Test when user does not have blocked notice, nor should they (NOOP)"""
-
+        """
+        Test when user does not have blocked notice, nor should they (NOOP)
+        """
         self._rlsn._auth.check_auth_blocking = Mock()
 
-        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
 
         self._send_notice.assert_not_called()
 
-    @defer.inlineCallbacks
     def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self):
-
-        """Test when user is not part of the MAU cohort - this should not ever
+        """
+        Test when user is not part of the MAU cohort - this should not ever
         happen - but ...
         """
-
         self._rlsn._auth.check_auth_blocking = Mock()
         self._rlsn._store.user_last_seen_monthly_active = Mock(
             return_value=defer.succeed(None)
         )
-        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
 
         self._send_notice.assert_not_called()
 
 
-class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase):
-    @defer.inlineCallbacks
-    def setUp(self):
-        self.hs = yield setup_test_homeserver(self.addCleanup)
+class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase):
+    def prepare(self, reactor, clock, hs):
         self.store = self.hs.get_datastore()
         self.server_notices_sender = self.hs.get_server_notices_sender()
         self.server_notices_manager = self.hs.get_server_notices_manager()
@@ -168,26 +181,27 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase):
 
         self.hs.config.admin_contact = "mailto:user@test.com"
 
-    @defer.inlineCallbacks
     def test_server_notice_only_sent_once(self):
         self.store.get_monthly_active_count = Mock(return_value=1000)
 
         self.store.user_last_seen_monthly_active = Mock(return_value=1000)
 
         # Call the function multiple times to ensure we only send the notice once
-        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
-        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
-        yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
+        self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
 
         # Now lets get the last load of messages in the service notice room and
         # check that there is only one server notice
-        room_id = yield self.server_notices_manager.get_notice_room_for_user(
-            self.user_id
+        room_id = self.get_success(
+            self.server_notices_manager.get_notice_room_for_user(self.user_id)
         )
 
-        token = yield self.event_source.get_current_token()
-        events, _ = yield self.store.get_recent_events_for_room(
-            room_id, limit=100, end_token=token.room_key
+        token = self.get_success(self.event_source.get_current_token())
+        events, _ = self.get_success(
+            self.store.get_recent_events_for_room(
+                room_id, limit=100, end_token=token.room_key
+            )
         )
 
         count = 0
diff --git a/tests/unittest.py b/tests/unittest.py
index 7772a47078..27403de908 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -314,6 +314,9 @@ class HomeserverTestCase(TestCase):
         """
         kwargs = dict(kwargs)
         kwargs.update(self._hs_args)
+        if "config" not in kwargs:
+            config = self.default_config()
+            kwargs["config"] = config
         hs = setup_test_homeserver(self.addCleanup, *args, **kwargs)
         stor = hs.get_datastore()
 
@@ -336,6 +339,15 @@ class HomeserverTestCase(TestCase):
         self.pump(by=by)
         return self.successResultOf(d)
 
+    def get_failure(self, d, exc):
+        """
+        Run a Deferred and get a Failure from it. The failure must be of the type `exc`.
+        """
+        if not isinstance(d, Deferred):
+            return d
+        self.pump()
+        return self.failureResultOf(d, exc)
+
     def register_user(self, username, password, admin=False):
         """
         Register a user. Requires the Admin API be registered.
diff --git a/tests/utils.py b/tests/utils.py
index 67e99a0e40..1b8eeb5167 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018-2019 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -43,6 +44,10 @@ from synapse.util.logcontext import LoggingContext
 from synapse.util.ratelimitutils import FederationRateLimiter
 
 # set this to True to run the tests against postgres instead of sqlite.
+#
+# When running under postgres, we first create a base database with the name
+# POSTGRES_BASE_DB and update it to the current schema. Then, for each test case, we
+# create another unique database, using the base database as a template.
 USE_POSTGRES_FOR_TESTS = os.environ.get("SYNAPSE_POSTGRES", False)
 LEAVE_DB = os.environ.get("SYNAPSE_LEAVE_DB", False)
 POSTGRES_USER = os.environ.get("SYNAPSE_POSTGRES_USER", None)
@@ -50,28 +55,20 @@ POSTGRES_HOST = os.environ.get("SYNAPSE_POSTGRES_HOST", None)
 POSTGRES_PASSWORD = os.environ.get("SYNAPSE_POSTGRES_PASSWORD", None)
 POSTGRES_BASE_DB = "_synapse_unit_tests_base_%s" % (os.getpid(),)
 
+# the dbname we will connect to in order to create the base database.
+POSTGRES_DBNAME_FOR_INITIAL_CREATE = "postgres"
 
-def setupdb():
 
+def setupdb():
     # If we're using PostgreSQL, set up the db once
     if USE_POSTGRES_FOR_TESTS:
-        pgconfig = {
-            "name": "psycopg2",
-            "args": {
-                "database": POSTGRES_BASE_DB,
-                "user": POSTGRES_USER,
-                "host": POSTGRES_HOST,
-                "password": POSTGRES_PASSWORD,
-                "cp_min": 1,
-                "cp_max": 5,
-            },
-        }
-        config = Mock()
-        config.password_providers = []
-        config.database_config = pgconfig
-        db_engine = create_engine(pgconfig)
+        # create a PostgresEngine
+        db_engine = create_engine({"name": "psycopg2", "args": {}})
+
+        # connect to postgres to create the base database.
         db_conn = db_engine.module.connect(
-            user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD
+            user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD,
+            dbname=POSTGRES_DBNAME_FOR_INITIAL_CREATE,
         )
         db_conn.autocommit = True
         cur = db_conn.cursor()
@@ -96,7 +93,8 @@ def setupdb():
 
         def _cleanup():
             db_conn = db_engine.module.connect(
-                user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD
+                user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD,
+                dbname=POSTGRES_DBNAME_FOR_INITIAL_CREATE,
             )
             db_conn.autocommit = True
             cur = db_conn.cursor()