summary refs log tree commit diff
path: root/synapse
diff options
context:
space:
mode:
authorOlivier Wilkinson (reivilibre) <olivier@librepush.net>2019-08-14 08:56:24 +0100
committerOlivier Wilkinson (reivilibre) <olivier@librepush.net>2019-08-14 08:56:24 +0100
commit703f9ff3c9e757caf4c17b2d34dd500f1ad99dd6 (patch)
treec1505d3d2e4dbce686e5617032db2df87182824a /synapse
parentNewsfile (diff)
parent1.3.0rc1 (diff)
downloadsynapse-703f9ff3c9e757caf4c17b2d34dd500f1ad99dd6.tar.xz
Merge branch 'develop' into rei/room_stats_separated
Diffstat (limited to 'synapse')
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/api/auth.py72
-rw-r--r--synapse/api/errors.py3
-rw-r--r--synapse/api/filtering.py2
-rw-r--r--synapse/app/_base.py29
-rw-r--r--synapse/app/appservice.py4
-rw-r--r--synapse/app/client_reader.py4
-rw-r--r--synapse/app/event_creator.py4
-rw-r--r--synapse/app/federation_reader.py4
-rw-r--r--synapse/app/federation_sender.py4
-rw-r--r--synapse/app/frontend_proxy.py12
-rw-r--r--[-rwxr-xr-x]synapse/app/homeserver.py4
-rw-r--r--synapse/app/media_repository.py13
-rw-r--r--synapse/app/pusher.py2
-rw-r--r--synapse/app/synchrotron.py4
-rw-r--r--synapse/app/user_dir.py4
-rw-r--r--synapse/appservice/__init__.py28
-rw-r--r--synapse/appservice/api.py38
-rw-r--r--synapse/appservice/scheduler.py4
-rw-r--r--synapse/config/key.py6
-rw-r--r--synapse/config/registration.py45
-rw-r--r--synapse/config/repository.py20
-rw-r--r--synapse/config/server.py41
-rw-r--r--synapse/config/tracer.py22
-rw-r--r--synapse/crypto/context_factory.py8
-rw-r--r--synapse/crypto/keyring.py14
-rw-r--r--synapse/events/builder.py16
-rw-r--r--synapse/events/snapshot.py28
-rw-r--r--synapse/events/third_party_rules.py8
-rw-r--r--synapse/events/utils.py4
-rw-r--r--synapse/events/validator.py20
-rw-r--r--synapse/federation/federation_base.py6
-rw-r--r--synapse/federation/federation_client.py89
-rw-r--r--synapse/federation/federation_server.py81
-rw-r--r--synapse/federation/sender/per_destination_queue.py4
-rw-r--r--synapse/federation/sender/transaction_manager.py2
-rw-r--r--synapse/federation/transport/client.py61
-rw-r--r--synapse/federation/transport/server.py4
-rw-r--r--synapse/groups/attestations.py2
-rw-r--r--synapse/groups/groups_server.py92
-rw-r--r--synapse/handlers/account_data.py4
-rw-r--r--synapse/handlers/account_validity.py16
-rw-r--r--synapse/handlers/acme.py2
-rw-r--r--synapse/handlers/admin.py10
-rw-r--r--synapse/handlers/appservice.py22
-rw-r--r--synapse/handlers/auth.py46
-rw-r--r--synapse/handlers/deactivate_account.py2
-rw-r--r--synapse/handlers/device.py172
-rw-r--r--synapse/handlers/directory.py15
-rw-r--r--synapse/handlers/e2e_keys.py71
-rw-r--r--synapse/handlers/e2e_room_keys.py8
-rw-r--r--synapse/handlers/events.py6
-rw-r--r--synapse/handlers/federation.py147
-rw-r--r--synapse/handlers/groups_local.py115
-rw-r--r--synapse/handlers/identity.py18
-rw-r--r--synapse/handlers/initial_sync.py54
-rw-r--r--synapse/handlers/message.py44
-rw-r--r--synapse/handlers/pagination.py14
-rw-r--r--synapse/handlers/presence.py56
-rw-r--r--synapse/handlers/profile.py18
-rw-r--r--synapse/handlers/receipts.py49
-rw-r--r--synapse/handlers/register.py16
-rw-r--r--synapse/handlers/room.py16
-rw-r--r--synapse/handlers/room_list.py10
-rw-r--r--synapse/handlers/room_member.py126
-rw-r--r--synapse/handlers/room_member_worker.py2
-rw-r--r--synapse/handlers/search.py14
-rw-r--r--synapse/handlers/state_deltas.py8
-rw-r--r--synapse/handlers/stats.py6
-rw-r--r--synapse/handlers/sync.py155
-rw-r--r--synapse/handlers/typing.py6
-rw-r--r--synapse/handlers/user_directory.py2
-rw-r--r--synapse/http/client.py28
-rw-r--r--synapse/http/federation/matrix_federation_agent.py214
-rw-r--r--synapse/http/federation/srv_resolver.py8
-rw-r--r--synapse/http/federation/well_known_resolver.py187
-rw-r--r--synapse/http/matrixfederationclient.py16
-rw-r--r--synapse/http/server.py41
-rw-r--r--synapse/http/servlet.py11
-rw-r--r--synapse/logging/opentracing.py470
-rw-r--r--synapse/logging/scopecontextmanager.py2
-rw-r--r--synapse/module_api/__init__.py2
-rw-r--r--synapse/notifier.py18
-rw-r--r--synapse/push/baserules.py8
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py10
-rw-r--r--synapse/push/emailpusher.py18
-rw-r--r--synapse/push/httppusher.py45
-rw-r--r--synapse/push/mailer.py84
-rw-r--r--synapse/push/presentable_names.py25
-rw-r--r--synapse/push/push_tools.py4
-rw-r--r--synapse/push/pusherpool.py6
-rw-r--r--synapse/python_dependencies.py1
-rw-r--r--synapse/replication/http/_base.py4
-rw-r--r--synapse/replication/http/federation.py10
-rw-r--r--synapse/replication/http/login.py2
-rw-r--r--synapse/replication/http/membership.py4
-rw-r--r--synapse/replication/http/register.py4
-rw-r--r--synapse/replication/http/send_event.py4
-rw-r--r--synapse/replication/tcp/streams/_base.py12
-rw-r--r--synapse/replication/tcp/streams/events.py2
-rw-r--r--synapse/res/templates/account_renewed.html1
-rw-r--r--synapse/res/templates/invalid_token.html1
-rw-r--r--synapse/rest/admin/__init__.py144
-rw-r--r--synapse/rest/admin/_base.py25
-rw-r--r--synapse/rest/admin/media.py101
-rw-r--r--synapse/rest/admin/server_notice_servlet.py11
-rw-r--r--synapse/rest/client/v1/directory.py18
-rw-r--r--synapse/rest/client/v1/events.py6
-rw-r--r--synapse/rest/client/v1/initial_sync.py2
-rw-r--r--synapse/rest/client/v1/login.py14
-rw-r--r--synapse/rest/client/v1/logout.py4
-rw-r--r--synapse/rest/client/v1/presence.py4
-rw-r--r--synapse/rest/client/v1/profile.py14
-rw-r--r--synapse/rest/client/v1/push_rule.py10
-rw-r--r--synapse/rest/client/v1/pusher.py8
-rw-r--r--synapse/rest/client/v1/room.py95
-rw-r--r--synapse/rest/client/v1/voip.py22
-rw-r--r--synapse/rest/client/v2_alpha/account.py32
-rw-r--r--synapse/rest/client/v2_alpha/account_data.py8
-rw-r--r--synapse/rest/client/v2_alpha/account_validity.py21
-rw-r--r--synapse/rest/client/v2_alpha/auth.py4
-rw-r--r--synapse/rest/client/v2_alpha/capabilities.py2
-rw-r--r--synapse/rest/client/v2_alpha/devices.py10
-rw-r--r--synapse/rest/client/v2_alpha/filter.py4
-rw-r--r--synapse/rest/client/v2_alpha/groups.py64
-rw-r--r--synapse/rest/client/v2_alpha/keys.py8
-rw-r--r--synapse/rest/client/v2_alpha/notifications.py4
-rw-r--r--synapse/rest/client/v2_alpha/openid.py18
-rw-r--r--synapse/rest/client/v2_alpha/read_marker.py2
-rw-r--r--synapse/rest/client/v2_alpha/receipts.py2
-rw-r--r--synapse/rest/client/v2_alpha/register.py38
-rw-r--r--synapse/rest/client/v2_alpha/relations.py10
-rw-r--r--synapse/rest/client/v2_alpha/report_event.py2
-rw-r--r--synapse/rest/client/v2_alpha/room_keys.py14
-rw-r--r--synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py2
-rw-r--r--synapse/rest/client/v2_alpha/sendtodevice.py2
-rw-r--r--synapse/rest/client/v2_alpha/sync.py48
-rw-r--r--synapse/rest/client/v2_alpha/tags.py6
-rw-r--r--synapse/rest/client/v2_alpha/thirdparty.py10
-rw-r--r--synapse/rest/client/v2_alpha/user_directory.py4
-rw-r--r--synapse/rest/media/v1/media_repository.py24
-rw-r--r--synapse/rest/media/v1/media_storage.py12
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py34
-rw-r--r--synapse/server_notices/resource_limits_server_notices.py2
-rw-r--r--synapse/server_notices/server_notices_manager.py6
-rw-r--r--synapse/state/__init__.py38
-rw-r--r--synapse/state/v1.py8
-rw-r--r--synapse/state/v2.py26
-rw-r--r--synapse/storage/__init__.py2
-rw-r--r--synapse/storage/_base.py14
-rw-r--r--synapse/storage/account_data.py14
-rw-r--r--synapse/storage/appservice.py14
-rw-r--r--synapse/storage/background_updates.py20
-rw-r--r--synapse/storage/client_ips.py26
-rw-r--r--synapse/storage/deviceinbox.py10
-rw-r--r--synapse/storage/devices.py32
-rw-r--r--synapse/storage/directory.py10
-rw-r--r--synapse/storage/e2e_room_keys.py6
-rw-r--r--synapse/storage/end_to_end_keys.py8
-rw-r--r--synapse/storage/event_federation.py12
-rw-r--r--synapse/storage/event_push_actions.py16
-rw-r--r--synapse/storage/events.py304
-rw-r--r--synapse/storage/events_bg_updates.py6
-rw-r--r--synapse/storage/events_worker.py255
-rw-r--r--synapse/storage/filtering.py4
-rw-r--r--synapse/storage/group_server.py38
-rw-r--r--synapse/storage/monthly_active_users.py2
-rw-r--r--synapse/storage/presence.py6
-rw-r--r--synapse/storage/profile.py12
-rw-r--r--synapse/storage/push_rule.py18
-rw-r--r--synapse/storage/pusher.py40
-rw-r--r--synapse/storage/receipts.py36
-rw-r--r--synapse/storage/registration.py74
-rw-r--r--synapse/storage/relations.py4
-rw-r--r--synapse/storage/room.py10
-rw-r--r--synapse/storage/roommember.py238
-rw-r--r--synapse/storage/schema/delta/56/current_state_events_membership.sql3
-rw-r--r--synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql24
-rw-r--r--synapse/storage/schema/delta/56/room_membership_idx.sql18
-rw-r--r--synapse/storage/search.py56
-rw-r--r--synapse/storage/signatures.py2
-rw-r--r--synapse/storage/state.py54
-rw-r--r--synapse/storage/stats.py48
-rw-r--r--synapse/storage/stream.py44
-rw-r--r--synapse/storage/tags.py12
-rw-r--r--synapse/storage/transactions.py4
-rw-r--r--synapse/storage/user_directory.py30
-rw-r--r--synapse/storage/user_erasure_store.py5
-rw-r--r--synapse/streams/events.py4
-rw-r--r--synapse/util/__init__.py8
-rw-r--r--synapse/util/async_helpers.py4
-rw-r--r--synapse/util/caches/__init__.py17
-rw-r--r--synapse/util/caches/descriptors.py94
-rw-r--r--synapse/util/caches/response_cache.py2
-rw-r--r--synapse/util/metrics.py2
-rw-r--r--synapse/util/retryutils.py16
-rw-r--r--synapse/visibility.py8
197 files changed, 3493 insertions, 2438 deletions
diff --git a/synapse/__init__.py b/synapse/__init__.py
index f26e49da36..d2316c7df9 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -35,4 +35,4 @@ try:
 except ImportError:
     pass
 
-__version__ = "1.2.0rc1"
+__version__ = "1.3.0rc1"
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 7ce6540bdd..179644852a 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -128,7 +128,7 @@ class Auth(object):
             )
 
         self._check_joined_room(member, user_id, room_id)
-        defer.returnValue(member)
+        return member
 
     @defer.inlineCallbacks
     def check_user_was_in_room(self, room_id, user_id):
@@ -156,13 +156,13 @@ class Auth(object):
             if forgot:
                 raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
 
-        defer.returnValue(member)
+        return member
 
     @defer.inlineCallbacks
     def check_host_in_room(self, room_id, host):
         with Measure(self.clock, "check_host_in_room"):
             latest_event_ids = yield self.store.is_host_joined(room_id, host)
-            defer.returnValue(latest_event_ids)
+            return latest_event_ids
 
     def _check_joined_room(self, member, user_id, room_id):
         if not member or member.membership != Membership.JOIN:
@@ -219,9 +219,7 @@ class Auth(object):
                         device_id="dummy-device",  # stubbed
                     )
 
-                defer.returnValue(
-                    synapse.types.create_requester(user_id, app_service=app_service)
-                )
+                return synapse.types.create_requester(user_id, app_service=app_service)
 
             user_info = yield self.get_user_by_access_token(access_token, rights)
             user = user_info["user"]
@@ -262,10 +260,8 @@ class Auth(object):
 
             request.authenticated_entity = user.to_string()
 
-            defer.returnValue(
-                synapse.types.create_requester(
-                    user, token_id, is_guest, device_id, app_service=app_service
-                )
+            return synapse.types.create_requester(
+                user, token_id, is_guest, device_id, app_service=app_service
             )
         except KeyError:
             raise MissingClientTokenError()
@@ -276,25 +272,25 @@ class Auth(object):
             self.get_access_token_from_request(request)
         )
         if app_service is None:
-            defer.returnValue((None, None))
+            return (None, None)
 
         if app_service.ip_range_whitelist:
             ip_address = IPAddress(self.hs.get_ip_from_request(request))
             if ip_address not in app_service.ip_range_whitelist:
-                defer.returnValue((None, None))
+                return (None, None)
 
         if b"user_id" not in request.args:
-            defer.returnValue((app_service.sender, app_service))
+            return (app_service.sender, app_service)
 
         user_id = request.args[b"user_id"][0].decode("utf8")
         if app_service.sender == user_id:
-            defer.returnValue((app_service.sender, app_service))
+            return (app_service.sender, app_service)
 
         if not app_service.is_interested_in_user(user_id):
             raise AuthError(403, "Application service cannot masquerade as this user.")
         if not (yield self.store.get_user_by_id(user_id)):
             raise AuthError(403, "Application service has not registered this user")
-        defer.returnValue((user_id, app_service))
+        return (user_id, app_service)
 
     @defer.inlineCallbacks
     def get_user_by_access_token(self, token, rights="access"):
@@ -330,7 +326,7 @@ class Auth(object):
                         msg="Access token has expired", soft_logout=True
                     )
 
-                defer.returnValue(r)
+                return r
 
         # otherwise it needs to be a valid macaroon
         try:
@@ -378,7 +374,7 @@ class Auth(object):
                 }
             else:
                 raise RuntimeError("Unknown rights setting %s", rights)
-            defer.returnValue(ret)
+            return ret
         except (
             _InvalidMacaroonException,
             pymacaroons.exceptions.MacaroonException,
@@ -414,21 +410,16 @@ class Auth(object):
         try:
             user_id = self.get_user_id_from_macaroon(macaroon)
 
-            has_expiry = False
             guest = False
             for caveat in macaroon.caveats:
-                if caveat.caveat_id.startswith("time "):
-                    has_expiry = True
-                elif caveat.caveat_id == "guest = true":
+                if caveat.caveat_id == "guest = true":
                     guest = True
 
-            self.validate_macaroon(
-                macaroon, rights, self.hs.config.expire_access_token, user_id=user_id
-            )
+            self.validate_macaroon(macaroon, rights, user_id=user_id)
         except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
             raise InvalidClientTokenError("Invalid macaroon passed.")
 
-        if not has_expiry and rights == "access":
+        if rights == "access":
             self.token_cache[token] = (user_id, guest)
 
         return user_id, guest
@@ -454,7 +445,7 @@ class Auth(object):
                 return caveat.caveat_id[len(user_prefix) :]
         raise InvalidClientTokenError("No user caveat in macaroon")
 
-    def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
+    def validate_macaroon(self, macaroon, type_string, user_id):
         """
         validate that a Macaroon is understood by and was signed by this server.
 
@@ -462,7 +453,6 @@ class Auth(object):
             macaroon(pymacaroons.Macaroon): The macaroon to validate
             type_string(str): The kind of token required (e.g. "access",
                               "delete_pusher")
-            verify_expiry(bool): Whether to verify whether the macaroon has expired.
             user_id (str): The user_id required
         """
         v = pymacaroons.Verifier()
@@ -475,19 +465,7 @@ class Auth(object):
         v.satisfy_exact("type = " + type_string)
         v.satisfy_exact("user_id = %s" % user_id)
         v.satisfy_exact("guest = true")
-
-        # verify_expiry should really always be True, but there exist access
-        # tokens in the wild which expire when they should not, so we can't
-        # enforce expiry yet (so we have to allow any caveat starting with
-        # 'time < ' in access tokens).
-        #
-        # On the other hand, short-term login tokens (as used by CAS login, for
-        # example) have an expiry time which we do want to enforce.
-
-        if verify_expiry:
-            v.satisfy_general(self._verify_expiry)
-        else:
-            v.satisfy_general(lambda c: c.startswith("time < "))
+        v.satisfy_general(self._verify_expiry)
 
         # access_tokens include a nonce for uniqueness: any value is acceptable
         v.satisfy_general(lambda c: c.startswith("nonce = "))
@@ -506,7 +484,7 @@ class Auth(object):
     def _look_up_user_by_access_token(self, token):
         ret = yield self.store.get_user_by_access_token(token)
         if not ret:
-            defer.returnValue(None)
+            return None
 
         # we use ret.get() below because *lots* of unit tests stub out
         # get_user_by_access_token in a way where it only returns a couple of
@@ -518,7 +496,7 @@ class Auth(object):
             "device_id": ret.get("device_id"),
             "valid_until_ms": ret.get("valid_until_ms"),
         }
-        defer.returnValue(user_info)
+        return user_info
 
     def get_appservice_by_req(self, request):
         token = self.get_access_token_from_request(request)
@@ -543,7 +521,7 @@ class Auth(object):
     @defer.inlineCallbacks
     def compute_auth_events(self, event, current_state_ids, for_verification=False):
         if event.type == EventTypes.Create:
-            defer.returnValue([])
+            return []
 
         auth_ids = []
 
@@ -604,7 +582,7 @@ class Auth(object):
             if member_event.content["membership"] == Membership.JOIN:
                 auth_ids.append(member_event.event_id)
 
-        defer.returnValue(auth_ids)
+        return auth_ids
 
     @defer.inlineCallbacks
     def check_can_change_room_list(self, room_id, user):
@@ -618,7 +596,7 @@ class Auth(object):
 
         is_admin = yield self.is_server_admin(user)
         if is_admin:
-            defer.returnValue(True)
+            return True
 
         user_id = user.to_string()
         yield self.check_joined_room(room_id, user_id)
@@ -712,7 +690,7 @@ class Auth(object):
             #  * The user is a guest user, and has joined the room
             # else it will throw.
             member_event = yield self.check_user_was_in_room(room_id, user_id)
-            defer.returnValue((member_event.membership, member_event.event_id))
+            return (member_event.membership, member_event.event_id)
         except AuthError:
             visibility = yield self.state.get_current_state(
                 room_id, EventTypes.RoomHistoryVisibility, ""
@@ -721,7 +699,7 @@ class Auth(object):
                 visibility
                 and visibility.content["history_visibility"] == "world_readable"
             ):
-                defer.returnValue((Membership.JOIN, None))
+                return (Membership.JOIN, None)
                 return
             raise AuthError(
                 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index ad3e262041..cf1ebf1af2 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -61,6 +61,7 @@ class Codes(object):
     INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
     WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
     EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
+    USER_DEACTIVATED = "M_USER_DEACTIVATED"
 
 
 class CodeMessageException(RuntimeError):
@@ -151,7 +152,7 @@ class UserDeactivatedError(SynapseError):
             msg (str): The human-readable error message
         """
         super(UserDeactivatedError, self).__init__(
-            code=http_client.FORBIDDEN, msg=msg, errcode=Codes.UNKNOWN
+            code=http_client.FORBIDDEN, msg=msg, errcode=Codes.USER_DEACTIVATED
         )
 
 
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index 9b3daca29b..9f06556bd2 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -132,7 +132,7 @@ class Filtering(object):
     @defer.inlineCallbacks
     def get_user_filter(self, user_localpart, filter_id):
         result = yield self.store.get_user_filter(user_localpart, filter_id)
-        defer.returnValue(FilterCollection(result))
+        return FilterCollection(result)
 
     def add_user_filter(self, user_localpart, user_filter):
         self.check_valid_filter(user_filter)
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 540dbd9236..c010e70955 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -15,10 +15,12 @@
 
 import gc
 import logging
+import os
 import signal
 import sys
 import traceback
 
+import sdnotify
 from daemonize import Daemonize
 
 from twisted.internet import defer, error, reactor
@@ -242,9 +244,16 @@ def start(hs, listeners=None):
         if hasattr(signal, "SIGHUP"):
 
             def handle_sighup(*args, **kwargs):
+                # Tell systemd our state, if we're using it. This will silently fail if
+                # we're not using systemd.
+                sd_channel = sdnotify.SystemdNotifier()
+                sd_channel.notify("RELOADING=1")
+
                 for i in _sighup_callbacks:
                     i(hs)
 
+                sd_channel.notify("READY=1")
+
             signal.signal(signal.SIGHUP, handle_sighup)
 
             register_sighup(refresh_certificate)
@@ -260,6 +269,7 @@ def start(hs, listeners=None):
         hs.get_datastore().start_profiling()
 
         setup_sentry(hs)
+        setup_sdnotify(hs)
     except Exception:
         traceback.print_exc(file=sys.stderr)
         reactor = hs.get_reactor()
@@ -292,6 +302,25 @@ def setup_sentry(hs):
         scope.set_tag("worker_name", name)
 
 
+def setup_sdnotify(hs):
+    """Adds process state hooks to tell systemd what we are up to.
+    """
+
+    # Tell systemd our state, if we're using it. This will silently fail if
+    # we're not using systemd.
+    sd_channel = sdnotify.SystemdNotifier()
+
+    hs.get_reactor().addSystemEventTrigger(
+        "after",
+        "startup",
+        lambda: sd_channel.notify("READY=1\nMAINPID=%s" % (os.getpid())),
+    )
+
+    hs.get_reactor().addSystemEventTrigger(
+        "before", "shutdown", lambda: sd_channel.notify("STOPPING=1")
+    )
+
+
 def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
     """Replaces the resolver with one that limits the number of in flight DNS
     requests.
diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index e01f3e5f3b..54bb114dec 100644
--- a/synapse/app/appservice.py
+++ b/synapse/app/appservice.py
@@ -168,7 +168,9 @@ def start(config_options):
     )
 
     ps.setup()
-    reactor.callWhenRunning(_base.start, ps, config.worker_listeners)
+    reactor.addSystemEventTrigger(
+        "before", "startup", _base.start, ps, config.worker_listeners
+    )
 
     _base.start_worker_reactor("synapse-appservice", config)
 
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 29bddc4823..721bb5b119 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -194,7 +194,9 @@ def start(config_options):
     )
 
     ss.setup()
-    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
+    reactor.addSystemEventTrigger(
+        "before", "startup", _base.start, ss, config.worker_listeners
+    )
 
     _base.start_worker_reactor("synapse-client-reader", config)
 
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index 042cfd04af..473c8895d0 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -193,7 +193,9 @@ def start(config_options):
     )
 
     ss.setup()
-    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
+    reactor.addSystemEventTrigger(
+        "before", "startup", _base.start, ss, config.worker_listeners
+    )
 
     _base.start_worker_reactor("synapse-event-creator", config)
 
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 76a97f8f32..5255d9e8cc 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -175,7 +175,9 @@ def start(config_options):
     )
 
     ss.setup()
-    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
+    reactor.addSystemEventTrigger(
+        "before", "startup", _base.start, ss, config.worker_listeners
+    )
 
     _base.start_worker_reactor("synapse-federation-reader", config)
 
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index fec49d5092..c5a2880e69 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -198,7 +198,9 @@ def start(config_options):
     )
 
     ss.setup()
-    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
+    reactor.addSystemEventTrigger(
+        "before", "startup", _base.start, ss, config.worker_listeners
+    )
 
     _base.start_worker_reactor("synapse-federation-sender", config)
 
diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index 1f1f1df78e..e2822ca848 100644
--- a/synapse/app/frontend_proxy.py
+++ b/synapse/app/frontend_proxy.py
@@ -70,12 +70,12 @@ class PresenceStatusStubServlet(RestServlet):
         except HttpResponseException as e:
             raise e.to_synapse_error()
 
-        defer.returnValue((200, result))
+        return (200, result)
 
     @defer.inlineCallbacks
     def on_PUT(self, request, user_id):
         yield self.auth.get_user_by_req(request)
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class KeyUploadServlet(RestServlet):
@@ -126,11 +126,11 @@ class KeyUploadServlet(RestServlet):
                 self.main_uri + request.uri.decode("ascii"), body, headers=headers
             )
 
-            defer.returnValue((200, result))
+            return (200, result)
         else:
             # Just interested in counts.
             result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
-            defer.returnValue((200, {"one_time_key_counts": result}))
+            return (200, {"one_time_key_counts": result})
 
 
 class FrontendProxySlavedStore(
@@ -247,7 +247,9 @@ def start(config_options):
     )
 
     ss.setup()
-    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
+    reactor.addSystemEventTrigger(
+        "before", "startup", _base.start, ss, config.worker_listeners
+    )
 
     _base.start_worker_reactor("synapse-frontend-proxy", config)
 
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 0c075cb3f1..7d6b51b5bc 100755..100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -406,7 +406,7 @@ def setup(config_options):
         if provision:
             yield acme.provision_certificate()
 
-        defer.returnValue(provision)
+        return provision
 
     @defer.inlineCallbacks
     def reprovision_acme():
@@ -447,7 +447,7 @@ def setup(config_options):
                 reactor.stop()
             sys.exit(1)
 
-    reactor.callWhenRunning(start)
+    reactor.addSystemEventTrigger("before", "startup", start)
 
     return hs
 
diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py
index d70780e9d5..3a168577c7 100644
--- a/synapse/app/media_repository.py
+++ b/synapse/app/media_repository.py
@@ -26,6 +26,7 @@ from synapse.app import _base
 from synapse.config._base import ConfigError
 from synapse.config.homeserver import HomeServerConfig
 from synapse.config.logger import setup_logging
+from synapse.http.server import JsonResource
 from synapse.http.site import SynapseSite
 from synapse.logging.context import LoggingContext
 from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
@@ -35,6 +36,7 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
 from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.rest.admin import register_servlets_for_media_repo
 from synapse.rest.media.v0.content_repository import ContentRepoResource
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
@@ -71,6 +73,12 @@ class MediaRepositoryServer(HomeServer):
                     resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
                 elif name == "media":
                     media_repo = self.get_media_repository_resource()
+
+                    # We need to serve the admin servlets for media on the
+                    # worker.
+                    admin_resource = JsonResource(self, canonical_json=False)
+                    register_servlets_for_media_repo(self, admin_resource)
+
                     resources.update(
                         {
                             MEDIA_PREFIX: media_repo,
@@ -78,6 +86,7 @@ class MediaRepositoryServer(HomeServer):
                             CONTENT_REPO_PREFIX: ContentRepoResource(
                                 self, self.config.uploads_path
                             ),
+                            "/_synapse/admin": admin_resource,
                         }
                     )
 
@@ -161,7 +170,9 @@ def start(config_options):
     )
 
     ss.setup()
-    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
+    reactor.addSystemEventTrigger(
+        "before", "startup", _base.start, ss, config.worker_listeners
+    )
 
     _base.start_worker_reactor("synapse-media-repository", config)
 
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 070de7d0b0..692ffa2f04 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -216,7 +216,7 @@ def start(config_options):
         _base.start(ps, config.worker_listeners)
         ps.get_pusherpool().start()
 
-    reactor.callWhenRunning(start)
+    reactor.addSystemEventTrigger("before", "startup", start)
 
     _base.start_worker_reactor("synapse-pusher", config)
 
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 315c030694..a1c3b162f7 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -451,7 +451,9 @@ def start(config_options):
     )
 
     ss.setup()
-    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
+    reactor.addSystemEventTrigger(
+        "before", "startup", _base.start, ss, config.worker_listeners
+    )
 
     _base.start_worker_reactor("synapse-synchrotron", config)
 
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index 03ef21bd01..cb29a1afab 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -224,7 +224,9 @@ def start(config_options):
     )
 
     ss.setup()
-    reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
+    reactor.addSystemEventTrigger(
+        "before", "startup", _base.start, ss, config.worker_listeners
+    )
 
     _base.start_worker_reactor("synapse-user-dir", config)
 
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index b26a31dd54..33b3579425 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -175,21 +175,21 @@ class ApplicationService(object):
     @defer.inlineCallbacks
     def _matches_user(self, event, store):
         if not event:
-            defer.returnValue(False)
+            return False
 
         if self.is_interested_in_user(event.sender):
-            defer.returnValue(True)
+            return True
         # also check m.room.member state key
         if event.type == EventTypes.Member and self.is_interested_in_user(
             event.state_key
         ):
-            defer.returnValue(True)
+            return True
 
         if not store:
-            defer.returnValue(False)
+            return False
 
         does_match = yield self._matches_user_in_member_list(event.room_id, store)
-        defer.returnValue(does_match)
+        return does_match
 
     @cachedInlineCallbacks(num_args=1, cache_context=True)
     def _matches_user_in_member_list(self, room_id, store, cache_context):
@@ -200,8 +200,8 @@ class ApplicationService(object):
         # check joined member events
         for user_id in member_list:
             if self.is_interested_in_user(user_id):
-                defer.returnValue(True)
-        defer.returnValue(False)
+                return True
+        return False
 
     def _matches_room_id(self, event):
         if hasattr(event, "room_id"):
@@ -211,13 +211,13 @@ class ApplicationService(object):
     @defer.inlineCallbacks
     def _matches_aliases(self, event, store):
         if not store or not event:
-            defer.returnValue(False)
+            return False
 
         alias_list = yield store.get_aliases_for_room(event.room_id)
         for alias in alias_list:
             if self.is_interested_in_alias(alias):
-                defer.returnValue(True)
-        defer.returnValue(False)
+                return True
+        return False
 
     @defer.inlineCallbacks
     def is_interested(self, event, store=None):
@@ -231,15 +231,15 @@ class ApplicationService(object):
         """
         # Do cheap checks first
         if self._matches_room_id(event):
-            defer.returnValue(True)
+            return True
 
         if (yield self._matches_aliases(event, store)):
-            defer.returnValue(True)
+            return True
 
         if (yield self._matches_user(event, store)):
-            defer.returnValue(True)
+            return True
 
-        defer.returnValue(False)
+        return False
 
     def is_interested_in_user(self, user_id):
         return (
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index 571881775b..007ca75a94 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -97,40 +97,40 @@ class ApplicationServiceApi(SimpleHttpClient):
     @defer.inlineCallbacks
     def query_user(self, service, user_id):
         if service.url is None:
-            defer.returnValue(False)
+            return False
         uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
         response = None
         try:
             response = yield self.get_json(uri, {"access_token": service.hs_token})
             if response is not None:  # just an empty json object
-                defer.returnValue(True)
+                return True
         except CodeMessageException as e:
             if e.code == 404:
-                defer.returnValue(False)
+                return False
                 return
             logger.warning("query_user to %s received %s", uri, e.code)
         except Exception as ex:
             logger.warning("query_user to %s threw exception %s", uri, ex)
-        defer.returnValue(False)
+        return False
 
     @defer.inlineCallbacks
     def query_alias(self, service, alias):
         if service.url is None:
-            defer.returnValue(False)
+            return False
         uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
         response = None
         try:
             response = yield self.get_json(uri, {"access_token": service.hs_token})
             if response is not None:  # just an empty json object
-                defer.returnValue(True)
+                return True
         except CodeMessageException as e:
             logger.warning("query_alias to %s received %s", uri, e.code)
             if e.code == 404:
-                defer.returnValue(False)
+                return False
                 return
         except Exception as ex:
             logger.warning("query_alias to %s threw exception %s", uri, ex)
-        defer.returnValue(False)
+        return False
 
     @defer.inlineCallbacks
     def query_3pe(self, service, kind, protocol, fields):
@@ -141,7 +141,7 @@ class ApplicationServiceApi(SimpleHttpClient):
         else:
             raise ValueError("Unrecognised 'kind' argument %r to query_3pe()", kind)
         if service.url is None:
-            defer.returnValue([])
+            return []
 
         uri = "%s%s/thirdparty/%s/%s" % (
             service.url,
@@ -155,7 +155,7 @@ class ApplicationServiceApi(SimpleHttpClient):
                 logger.warning(
                     "query_3pe to %s returned an invalid response %r", uri, response
                 )
-                defer.returnValue([])
+                return []
 
             ret = []
             for r in response:
@@ -166,14 +166,14 @@ class ApplicationServiceApi(SimpleHttpClient):
                         "query_3pe to %s returned an invalid result %r", uri, r
                     )
 
-            defer.returnValue(ret)
+            return ret
         except Exception as ex:
             logger.warning("query_3pe to %s threw exception %s", uri, ex)
-            defer.returnValue([])
+            return []
 
     def get_3pe_protocol(self, service, protocol):
         if service.url is None:
-            defer.returnValue({})
+            return {}
 
         @defer.inlineCallbacks
         def _get():
@@ -189,7 +189,7 @@ class ApplicationServiceApi(SimpleHttpClient):
                     logger.warning(
                         "query_3pe_protocol to %s did not return a" " valid result", uri
                     )
-                    defer.returnValue(None)
+                    return None
 
                 for instance in info.get("instances", []):
                     network_id = instance.get("network_id", None)
@@ -198,10 +198,10 @@ class ApplicationServiceApi(SimpleHttpClient):
                             service.id, network_id
                         ).to_string()
 
-                defer.returnValue(info)
+                return info
             except Exception as ex:
                 logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex)
-                defer.returnValue(None)
+                return None
 
         key = (service.id, protocol)
         return self.protocol_meta_cache.wrap(key, _get)
@@ -209,7 +209,7 @@ class ApplicationServiceApi(SimpleHttpClient):
     @defer.inlineCallbacks
     def push_bulk(self, service, events, txn_id=None):
         if service.url is None:
-            defer.returnValue(True)
+            return True
 
         events = self._serialize(events)
 
@@ -229,14 +229,14 @@ class ApplicationServiceApi(SimpleHttpClient):
             )
             sent_transactions_counter.labels(service.id).inc()
             sent_events_counter.labels(service.id).inc(len(events))
-            defer.returnValue(True)
+            return True
             return
         except CodeMessageException as e:
             logger.warning("push_bulk to %s received %s", uri, e.code)
         except Exception as ex:
             logger.warning("push_bulk to %s threw exception %s", uri, ex)
         failed_transactions_counter.labels(service.id).inc()
-        defer.returnValue(False)
+        return False
 
     def _serialize(self, events):
         time_now = self.clock.time_msec()
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index e5b36494f5..42a350bff8 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -193,7 +193,7 @@ class _TransactionController(object):
     @defer.inlineCallbacks
     def _is_service_up(self, service):
         state = yield self.store.get_appservice_state(service)
-        defer.returnValue(state == ApplicationServiceState.UP or state is None)
+        return state == ApplicationServiceState.UP or state is None
 
 
 class _Recoverer(object):
@@ -208,7 +208,7 @@ class _Recoverer(object):
                 r.service.id,
             )
             r.recover()
-        defer.returnValue(recoverers)
+        return recoverers
 
     def __init__(self, clock, store, as_api, service, callback):
         self.clock = clock
diff --git a/synapse/config/key.py b/synapse/config/key.py
index 8fc74f9cdf..fe8386985c 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -116,8 +116,6 @@ class KeyConfig(Config):
             seed = bytes(self.signing_key[0])
             self.macaroon_secret_key = hashlib.sha256(seed).digest()
 
-        self.expire_access_token = config.get("expire_access_token", False)
-
         # a secret which is used to calculate HMACs for form values, to stop
         # falsification of values
         self.form_secret = config.get("form_secret", None)
@@ -144,10 +142,6 @@ class KeyConfig(Config):
         #
         %(macaroon_secret_key)s
 
-        # Used to enable access token expiration.
-        #
-        #expire_access_token: False
-
         # a secret which is used to calculate HMACs for form values, to stop
         # falsification of values. Must be specified for the User Consent
         # forms to work.
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index c3de7a4e32..e2bee3c116 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -13,8 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import os
 from distutils.util import strtobool
 
+import pkg_resources
+
 from synapse.config._base import Config, ConfigError
 from synapse.types import RoomAlias
 from synapse.util.stringutils import random_string_with_symbols
@@ -41,8 +44,36 @@ class AccountValidityConfig(Config):
 
             self.startup_job_max_delta = self.period * 10.0 / 100.0
 
-        if self.renew_by_email_enabled and "public_baseurl" not in synapse_config:
-            raise ConfigError("Can't send renewal emails without 'public_baseurl'")
+        if self.renew_by_email_enabled:
+            if "public_baseurl" not in synapse_config:
+                raise ConfigError("Can't send renewal emails without 'public_baseurl'")
+
+        template_dir = config.get("template_dir")
+
+        if not template_dir:
+            template_dir = pkg_resources.resource_filename("synapse", "res/templates")
+
+        if "account_renewed_html_path" in config:
+            file_path = os.path.join(template_dir, config["account_renewed_html_path"])
+
+            self.account_renewed_html_content = self.read_file(
+                file_path, "account_validity.account_renewed_html_path"
+            )
+        else:
+            self.account_renewed_html_content = (
+                "<html><body>Your account has been successfully renewed.</body><html>"
+            )
+
+        if "invalid_token_html_path" in config:
+            file_path = os.path.join(template_dir, config["invalid_token_html_path"])
+
+            self.invalid_token_html_content = self.read_file(
+                file_path, "account_validity.invalid_token_html_path"
+            )
+        else:
+            self.invalid_token_html_content = (
+                "<html><body>Invalid renewal token.</body><html>"
+            )
 
 
 class RegistrationConfig(Config):
@@ -145,6 +176,16 @@ class RegistrationConfig(Config):
         #  period: 6w
         #  renew_at: 1w
         #  renew_email_subject: "Renew your %%(app)s account"
+        #  # Directory in which Synapse will try to find the HTML files to serve to the
+        #  # user when trying to renew an account. Optional, defaults to
+        #  # synapse/res/templates.
+        #  template_dir: "res/templates"
+        #  # HTML to be displayed to the user after they successfully renewed their
+        #  # account. Optional.
+        #  account_renewed_html_path: "account_renewed.html"
+        #  # HTML to be displayed when the user tries to renew an account with an invalid
+        #  # renewal token. Optional.
+        #  invalid_token_html_path: "invalid_token.html"
 
         # Time that a user's session remains valid for, after they log in.
         #
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 80a628d9b0..fdb1f246d0 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -12,6 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
 import os
 from collections import namedtuple
 
@@ -87,6 +88,18 @@ def parse_thumbnail_requirements(thumbnail_sizes):
 
 class ContentRepositoryConfig(Config):
     def read_config(self, config, **kwargs):
+
+        # Only enable the media repo if either the media repo is enabled or the
+        # current worker app is the media repo.
+        if (
+            self.enable_media_repo is False
+            and config.get("worker_app") != "synapse.app.media_repository"
+        ):
+            self.can_load_media_repo = False
+            return
+        else:
+            self.can_load_media_repo = True
+
         self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M"))
         self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
         self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
@@ -202,6 +215,13 @@ class ContentRepositoryConfig(Config):
 
         return (
             r"""
+        ## Media Store ##
+
+        # Enable the media store service in the Synapse master. Uncomment the
+        # following if you are using a separate media store worker.
+        #
+        #enable_media_repo: false
+
         # Directory where uploaded images and attachments are stored.
         #
         media_store_path: "%(media_store)s"
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 00170f1393..15449695d1 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -18,6 +18,7 @@
 import logging
 import os.path
 
+import attr
 from netaddr import IPSet
 
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
@@ -38,6 +39,12 @@ DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
 
 DEFAULT_ROOM_VERSION = "4"
 
+ROOM_COMPLEXITY_TOO_GREAT = (
+    "Your homeserver is unable to join rooms this large or complex. "
+    "Please speak to your server administrator, or upgrade your instance "
+    "to join this room."
+)
+
 
 class ServerConfig(Config):
     def read_config(self, config, **kwargs):
@@ -247,6 +254,23 @@ class ServerConfig(Config):
 
         self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
 
+        @attr.s
+        class LimitRemoteRoomsConfig(object):
+            enabled = attr.ib(
+                validator=attr.validators.instance_of(bool), default=False
+            )
+            complexity = attr.ib(
+                validator=attr.validators.instance_of((int, float)), default=1.0
+            )
+            complexity_error = attr.ib(
+                validator=attr.validators.instance_of(str),
+                default=ROOM_COMPLEXITY_TOO_GREAT,
+            )
+
+        self.limit_remote_rooms = LimitRemoteRoomsConfig(
+            **config.get("limit_remote_rooms", {})
+        )
+
         bind_port = config.get("bind_port")
         if bind_port:
             if config.get("no_tls", False):
@@ -617,6 +641,23 @@ class ServerConfig(Config):
         # Used by phonehome stats to group together related servers.
         #server_context: context
 
+        # Resource-constrained Homeserver Settings
+        #
+        # If limit_remote_rooms.enabled is True, the room complexity will be
+        # checked before a user joins a new remote room. If it is above
+        # limit_remote_rooms.complexity, it will disallow joining or
+        # instantly leave.
+        #
+        # limit_remote_rooms.complexity_error can be set to customise the text
+        # displayed to the user when a room above the complexity threshold has
+        # its join cancelled.
+        #
+        # Uncomment the below lines to enable:
+        #limit_remote_rooms:
+        #  enabled: True
+        #  complexity: 1.0
+        #  complexity_error: "This room is too complex."
+
         # Whether to require a user to be in the room to add an alias to it.
         # Defaults to 'true'.
         #
diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py
index 4479454415..95e7ccb3a3 100644
--- a/synapse/config/tracer.py
+++ b/synapse/config/tracer.py
@@ -23,6 +23,12 @@ class TracerConfig(Config):
             opentracing_config = {}
 
         self.opentracer_enabled = opentracing_config.get("enabled", False)
+
+        self.jaeger_config = opentracing_config.get(
+            "jaeger_config",
+            {"sampler": {"type": "const", "param": 1}, "logging": False},
+        )
+
         if not self.opentracer_enabled:
             return
 
@@ -56,4 +62,20 @@ class TracerConfig(Config):
             #
             #homeserver_whitelist:
             #  - ".*"
+
+            # Jaeger can be configured to sample traces at different rates.
+            # All configuration options provided by Jaeger can be set here.
+            # Jaeger's configuration mostly related to trace sampling which
+            # is documented here:
+            # https://www.jaegertracing.io/docs/1.13/sampling/.
+            #
+            #jaeger_config:
+            #  sampler:
+            #    type: const
+            #    param: 1
+
+            #  Logging whether spans were started and reported
+            #
+            #  logging:
+            #    false
         """
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
index 4f48e8e88d..06e63a96b5 100644
--- a/synapse/crypto/context_factory.py
+++ b/synapse/crypto/context_factory.py
@@ -31,6 +31,7 @@ from twisted.internet.ssl import (
     platformTrust,
 )
 from twisted.python.failure import Failure
+from twisted.web.iweb import IPolicyForHTTPS
 
 logger = logging.getLogger(__name__)
 
@@ -74,6 +75,7 @@ class ServerContextFactory(ContextFactory):
         return self._context
 
 
+@implementer(IPolicyForHTTPS)
 class ClientTLSOptionsFactory(object):
     """Factory for Twisted SSLClientConnectionCreators that are used to make connections
     to remote servers for federation.
@@ -146,6 +148,12 @@ class ClientTLSOptionsFactory(object):
             f = Failure()
             tls_protocol.failVerification(f)
 
+    def creatorForNetloc(self, hostname, port):
+        """Implements the IPolicyForHTTPS interace so that this can be passed
+        directly to agents.
+        """
+        return self.get_options(hostname)
+
 
 @implementer(IOpenSSLClientConnectionCreator)
 class SSLClientConnectionCreator(object):
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index e8bb420ad1..6c3e885e72 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -462,7 +462,7 @@ class StoreKeyFetcher(KeyFetcher):
         keys = {}
         for (server_name, key_id), key in res.items():
             keys.setdefault(server_name, {})[key_id] = key
-        defer.returnValue(keys)
+        return keys
 
 
 class BaseV2KeyFetcher(object):
@@ -566,7 +566,7 @@ class BaseV2KeyFetcher(object):
             ).addErrback(unwrapFirstError)
         )
 
-        defer.returnValue(verify_keys)
+        return verify_keys
 
 
 class PerspectivesKeyFetcher(BaseV2KeyFetcher):
@@ -588,7 +588,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
                 result = yield self.get_server_verify_key_v2_indirect(
                     keys_to_fetch, key_server
                 )
-                defer.returnValue(result)
+                return result
             except KeyLookupError as e:
                 logger.warning(
                     "Key lookup failed from %r: %s", key_server.server_name, e
@@ -601,7 +601,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
                     str(e),
                 )
 
-            defer.returnValue({})
+            return {}
 
         results = yield make_deferred_yieldable(
             defer.gatherResults(
@@ -615,7 +615,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
             for server_name, keys in result.items():
                 union_of_keys.setdefault(server_name, {}).update(keys)
 
-        defer.returnValue(union_of_keys)
+        return union_of_keys
 
     @defer.inlineCallbacks
     def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server):
@@ -701,7 +701,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
             perspective_name, time_now_ms, added_keys
         )
 
-        defer.returnValue(keys)
+        return keys
 
     def _validate_perspectives_response(self, key_server, response):
         """Optionally check the signature on the result of a /key/query request
@@ -843,7 +843,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
             )
             keys.update(response_keys)
 
-        defer.returnValue(keys)
+        return keys
 
 
 @defer.inlineCallbacks
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index db011e0407..3997751337 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -144,15 +144,13 @@ class EventBuilder(object):
         if self._origin_server_ts is not None:
             event_dict["origin_server_ts"] = self._origin_server_ts
 
-        defer.returnValue(
-            create_local_event_from_event_dict(
-                clock=self._clock,
-                hostname=self._hostname,
-                signing_key=self._signing_key,
-                format_version=self.format_version,
-                event_dict=event_dict,
-                internal_metadata_dict=self.internal_metadata.get_dict(),
-            )
+        return create_local_event_from_event_dict(
+            clock=self._clock,
+            hostname=self._hostname,
+            signing_key=self._signing_key,
+            format_version=self.format_version,
+            event_dict=event_dict,
+            internal_metadata_dict=self.internal_metadata.get_dict(),
         )
 
 
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index a9545e6c1b..acbcbeeced 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -133,19 +133,17 @@ class EventContext(object):
         else:
             prev_state_id = None
 
-        defer.returnValue(
-            {
-                "prev_state_id": prev_state_id,
-                "event_type": event.type,
-                "event_state_key": event.state_key if event.is_state() else None,
-                "state_group": self.state_group,
-                "rejected": self.rejected,
-                "prev_group": self.prev_group,
-                "delta_ids": _encode_state_dict(self.delta_ids),
-                "prev_state_events": self.prev_state_events,
-                "app_service_id": self.app_service.id if self.app_service else None,
-            }
-        )
+        return {
+            "prev_state_id": prev_state_id,
+            "event_type": event.type,
+            "event_state_key": event.state_key if event.is_state() else None,
+            "state_group": self.state_group,
+            "rejected": self.rejected,
+            "prev_group": self.prev_group,
+            "delta_ids": _encode_state_dict(self.delta_ids),
+            "prev_state_events": self.prev_state_events,
+            "app_service_id": self.app_service.id if self.app_service else None,
+        }
 
     @staticmethod
     def deserialize(store, input):
@@ -202,7 +200,7 @@ class EventContext(object):
 
         yield make_deferred_yieldable(self._fetching_state_deferred)
 
-        defer.returnValue(self._current_state_ids)
+        return self._current_state_ids
 
     @defer.inlineCallbacks
     def get_prev_state_ids(self, store):
@@ -222,7 +220,7 @@ class EventContext(object):
 
         yield make_deferred_yieldable(self._fetching_state_deferred)
 
-        defer.returnValue(self._prev_state_ids)
+        return self._prev_state_ids
 
     def get_cached_current_state_ids(self):
         """Gets the current state IDs if we have them already cached.
diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index 8f5d95696b..714a9b1579 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -51,7 +51,7 @@ class ThirdPartyEventRules(object):
             defer.Deferred[bool]: True if the event should be allowed, False if not.
         """
         if self.third_party_rules is None:
-            defer.returnValue(True)
+            return True
 
         prev_state_ids = yield context.get_prev_state_ids(self.store)
 
@@ -61,7 +61,7 @@ class ThirdPartyEventRules(object):
             state_events[key] = yield self.store.get_event(event_id, allow_none=True)
 
         ret = yield self.third_party_rules.check_event_allowed(event, state_events)
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def on_create_room(self, requester, config, is_requester_admin):
@@ -98,7 +98,7 @@ class ThirdPartyEventRules(object):
         """
 
         if self.third_party_rules is None:
-            defer.returnValue(True)
+            return True
 
         state_ids = yield self.store.get_filtered_current_state_ids(room_id)
         room_state_events = yield self.store.get_events(state_ids.values())
@@ -110,4 +110,4 @@ class ThirdPartyEventRules(object):
         ret = yield self.third_party_rules.check_threepid_can_be_invited(
             medium, address, state_events
         )
-        defer.returnValue(ret)
+        return ret
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 9487a886f5..07d1c5bcf0 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -360,7 +360,7 @@ class EventClientSerializer(object):
         """
         # To handle the case of presence events and the like
         if not isinstance(event, EventBase):
-            defer.returnValue(event)
+            return event
 
         event_id = event.event_id
         serialized_event = serialize_event(event, time_now, **kwargs)
@@ -406,7 +406,7 @@ class EventClientSerializer(object):
                     "sender": edit.sender,
                 }
 
-        defer.returnValue(serialized_event)
+        return serialized_event
 
     def serialize_events(self, events, time_now, **kwargs):
         """Serializes multiple events.
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index f7ffd1d561..272426e105 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -95,10 +95,10 @@ class EventValidator(object):
 
         elif event.type == EventTypes.Topic:
             self._ensure_strings(event.content, ["topic"])
-
+            self._ensure_state_event(event)
         elif event.type == EventTypes.Name:
             self._ensure_strings(event.content, ["name"])
-
+            self._ensure_state_event(event)
         elif event.type == EventTypes.Member:
             if "membership" not in event.content:
                 raise SynapseError(400, "Content has not membership key")
@@ -106,9 +106,25 @@ class EventValidator(object):
             if event.content["membership"] not in Membership.LIST:
                 raise SynapseError(400, "Invalid membership key")
 
+            self._ensure_state_event(event)
+        elif event.type == EventTypes.Tombstone:
+            if "replacement_room" not in event.content:
+                raise SynapseError(400, "Content has no replacement_room key")
+
+            if event.content["replacement_room"] == event.room_id:
+                raise SynapseError(
+                    400, "Tombstone cannot reference the room it was sent in"
+                )
+
+            self._ensure_state_event(event)
+
     def _ensure_strings(self, d, keys):
         for s in keys:
             if s not in d:
                 raise SynapseError(400, "'%s' not in content" % (s,))
             if not isinstance(d[s], string_types):
                 raise SynapseError(400, "'%s' not a string type" % (s,))
+
+    def _ensure_state_event(self, event):
+        if not event.is_state():
+            raise SynapseError(400, "'%s' must be state events" % (event.type,))
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index f7bb806ae7..5a1e23a145 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -106,7 +106,7 @@ class FederationBase(object):
                     "Failed to find copy of %s with valid signature", pdu.event_id
                 )
 
-            defer.returnValue(res)
+            return res
 
         handle = preserve_fn(handle_check_result)
         deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
@@ -116,9 +116,9 @@ class FederationBase(object):
         ).addErrback(unwrapFirstError)
 
         if include_none:
-            defer.returnValue(valid_pdus)
+            return valid_pdus
         else:
-            defer.returnValue([p for p in valid_pdus if p])
+            return [p for p in valid_pdus if p]
 
     def _check_sigs_and_hash(self, room_version, pdu):
         return make_deferred_yieldable(
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 3cb4b94420..bec3080895 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -213,7 +213,7 @@ class FederationClient(FederationBase):
             ).addErrback(unwrapFirstError)
         )
 
-        defer.returnValue(pdus)
+        return pdus
 
     @defer.inlineCallbacks
     @log_function
@@ -245,7 +245,7 @@ class FederationClient(FederationBase):
 
         ev = self._get_pdu_cache.get(event_id)
         if ev:
-            defer.returnValue(ev)
+            return ev
 
         pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
 
@@ -307,7 +307,7 @@ class FederationClient(FederationBase):
         if signed_pdu:
             self._get_pdu_cache[event_id] = signed_pdu
 
-        defer.returnValue(signed_pdu)
+        return signed_pdu
 
     @defer.inlineCallbacks
     @log_function
@@ -355,7 +355,7 @@ class FederationClient(FederationBase):
 
             auth_chain.sort(key=lambda e: e.depth)
 
-            defer.returnValue((pdus, auth_chain))
+            return (pdus, auth_chain)
         except HttpResponseException as e:
             if e.code == 400 or e.code == 404:
                 logger.info("Failed to use get_room_state_ids API, falling back")
@@ -404,7 +404,7 @@ class FederationClient(FederationBase):
 
         signed_auth.sort(key=lambda e: e.depth)
 
-        defer.returnValue((signed_pdus, signed_auth))
+        return (signed_pdus, signed_auth)
 
     @defer.inlineCallbacks
     def get_events_from_store_or_dest(self, destination, room_id, event_ids):
@@ -429,7 +429,7 @@ class FederationClient(FederationBase):
             missing_events.discard(k)
 
         if not missing_events:
-            defer.returnValue((signed_events, failed_to_fetch))
+            return (signed_events, failed_to_fetch)
 
         logger.debug(
             "Fetching unknown state/auth events %s for room %s",
@@ -465,7 +465,7 @@ class FederationClient(FederationBase):
             # We removed all events we successfully fetched from `batch`
             failed_to_fetch.update(batch)
 
-        defer.returnValue((signed_events, failed_to_fetch))
+        return (signed_events, failed_to_fetch)
 
     @defer.inlineCallbacks
     @log_function
@@ -485,7 +485,7 @@ class FederationClient(FederationBase):
 
         signed_auth.sort(key=lambda e: e.depth)
 
-        defer.returnValue(signed_auth)
+        return signed_auth
 
     @defer.inlineCallbacks
     def _try_destination_list(self, description, destinations, callback):
@@ -511,9 +511,8 @@ class FederationClient(FederationBase):
             The [Deferred] result of callback, if it succeeds
 
         Raises:
-            SynapseError if the chosen remote server returns a 300/400 code.
-
-            RuntimeError if no servers were reachable.
+            SynapseError if the chosen remote server returns a 300/400 code, or
+            no servers were reachable.
         """
         for destination in destinations:
             if destination == self.server_name:
@@ -521,7 +520,7 @@ class FederationClient(FederationBase):
 
             try:
                 res = yield callback(destination)
-                defer.returnValue(res)
+                return res
             except InvalidResponseError as e:
                 logger.warn("Failed to %s via %s: %s", description, destination, e)
             except HttpResponseException as e:
@@ -538,7 +537,7 @@ class FederationClient(FederationBase):
             except Exception:
                 logger.warn("Failed to %s via %s", description, destination, exc_info=1)
 
-        raise RuntimeError("Failed to %s via any server" % (description,))
+        raise SynapseError(502, "Failed to %s via any server" % (description,))
 
     def make_membership_event(
         self, destinations, room_id, user_id, membership, content, params
@@ -615,7 +614,7 @@ class FederationClient(FederationBase):
                 event_dict=pdu_dict,
             )
 
-            defer.returnValue((destination, ev, event_format))
+            return (destination, ev, event_format)
 
         return self._try_destination_list(
             "make_" + membership, destinations, send_request
@@ -728,13 +727,11 @@ class FederationClient(FederationBase):
 
             check_authchain_validity(signed_auth)
 
-            defer.returnValue(
-                {
-                    "state": signed_state,
-                    "auth_chain": signed_auth,
-                    "origin": destination,
-                }
-            )
+            return {
+                "state": signed_state,
+                "auth_chain": signed_auth,
+                "origin": destination,
+            }
 
         return self._try_destination_list("send_join", destinations, send_request)
 
@@ -758,7 +755,7 @@ class FederationClient(FederationBase):
 
         # FIXME: We should handle signature failures more gracefully.
 
-        defer.returnValue(pdu)
+        return pdu
 
     @defer.inlineCallbacks
     def _do_send_invite(self, destination, pdu, room_version):
@@ -786,7 +783,7 @@ class FederationClient(FederationBase):
                     "invite_room_state": pdu.unsigned.get("invite_room_state", []),
                 },
             )
-            defer.returnValue(content)
+            return content
         except HttpResponseException as e:
             if e.code in [400, 404]:
                 err = e.to_synapse_error()
@@ -821,7 +818,7 @@ class FederationClient(FederationBase):
             event_id=pdu.event_id,
             content=pdu.get_pdu_json(time_now),
         )
-        defer.returnValue(content)
+        return content
 
     def send_leave(self, destinations, pdu):
         """Sends a leave event to one of a list of homeservers.
@@ -856,7 +853,7 @@ class FederationClient(FederationBase):
             )
 
             logger.debug("Got content: %s", content)
-            defer.returnValue(None)
+            return None
 
         return self._try_destination_list("send_leave", destinations, send_request)
 
@@ -917,7 +914,7 @@ class FederationClient(FederationBase):
             "missing": content.get("missing", []),
         }
 
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def get_missing_events(
@@ -974,7 +971,7 @@ class FederationClient(FederationBase):
             # get_missing_events
             signed_events = []
 
-        defer.returnValue(signed_events)
+        return signed_events
 
     @defer.inlineCallbacks
     def forward_third_party_invite(self, destinations, room_id, event_dict):
@@ -986,7 +983,7 @@ class FederationClient(FederationBase):
                 yield self.transport_layer.exchange_third_party_invite(
                     destination=destination, room_id=room_id, event_dict=event_dict
                 )
-                defer.returnValue(None)
+                return None
             except CodeMessageException:
                 raise
             except Exception as e:
@@ -995,3 +992,39 @@ class FederationClient(FederationBase):
                 )
 
         raise RuntimeError("Failed to send to any server.")
+
+    @defer.inlineCallbacks
+    def get_room_complexity(self, destination, room_id):
+        """
+        Fetch the complexity of a remote room from another server.
+
+        Args:
+            destination (str): The remote server
+            room_id (str): The room ID to ask about.
+
+        Returns:
+            Deferred[dict] or Deferred[None]: Dict contains the complexity
+            metric versions, while None means we could not fetch the complexity.
+        """
+        try:
+            complexity = yield self.transport_layer.get_room_complexity(
+                destination=destination, room_id=room_id
+            )
+            defer.returnValue(complexity)
+        except CodeMessageException as e:
+            # We didn't manage to get it -- probably a 404. We are okay if other
+            # servers don't give it to us.
+            logger.debug(
+                "Failed to fetch room complexity via %s for %s, got a %d",
+                destination,
+                room_id,
+                e.code,
+            )
+        except Exception:
+            logger.exception(
+                "Failed to fetch room complexity via %s for %s", destination, room_id
+            )
+
+        # If we don't manage to find it, return None. It's not an error if a
+        # server doesn't give it to us.
+        defer.returnValue(None)
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 8c0a18b120..d216c46dfe 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -99,7 +99,7 @@ class FederationServer(FederationBase):
 
             res = self._transaction_from_pdus(pdus).get_dict()
 
-        defer.returnValue((200, res))
+        return (200, res)
 
     @defer.inlineCallbacks
     @log_function
@@ -126,7 +126,7 @@ class FederationServer(FederationBase):
                 origin, transaction, request_time
             )
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _handle_incoming_transaction(self, origin, transaction, request_time):
@@ -147,8 +147,7 @@ class FederationServer(FederationBase):
                 "[%s] We've already responded to this request",
                 transaction.transaction_id,
             )
-            defer.returnValue(response)
-            return
+            return response
 
         logger.debug("[%s] Transaction is new", transaction.transaction_id)
 
@@ -163,7 +162,7 @@ class FederationServer(FederationBase):
             yield self.transaction_actions.set_response(
                 origin, transaction, 400, response
             )
-            defer.returnValue((400, response))
+            return (400, response)
 
         received_pdus_counter.inc(len(transaction.pdus))
 
@@ -265,7 +264,7 @@ class FederationServer(FederationBase):
         logger.debug("Returning: %s", str(response))
 
         yield self.transaction_actions.set_response(origin, transaction, 200, response)
-        defer.returnValue((200, response))
+        return (200, response)
 
     @defer.inlineCallbacks
     def received_edu(self, origin, edu_type, content):
@@ -298,7 +297,7 @@ class FederationServer(FederationBase):
                 event_id,
             )
 
-        defer.returnValue((200, resp))
+        return (200, resp)
 
     @defer.inlineCallbacks
     def on_state_ids_request(self, origin, room_id, event_id):
@@ -315,9 +314,7 @@ class FederationServer(FederationBase):
         state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
         auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
 
-        defer.returnValue(
-            (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids})
-        )
+        return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids})
 
     @defer.inlineCallbacks
     def _on_context_state_request_compute(self, room_id, event_id):
@@ -336,12 +333,10 @@ class FederationServer(FederationBase):
                     )
                 )
 
-        defer.returnValue(
-            {
-                "pdus": [pdu.get_pdu_json() for pdu in pdus],
-                "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
-            }
-        )
+        return {
+            "pdus": [pdu.get_pdu_json() for pdu in pdus],
+            "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
+        }
 
     @defer.inlineCallbacks
     @log_function
@@ -349,15 +344,15 @@ class FederationServer(FederationBase):
         pdu = yield self.handler.get_persisted_pdu(origin, event_id)
 
         if pdu:
-            defer.returnValue((200, self._transaction_from_pdus([pdu]).get_dict()))
+            return (200, self._transaction_from_pdus([pdu]).get_dict())
         else:
-            defer.returnValue((404, ""))
+            return (404, "")
 
     @defer.inlineCallbacks
     def on_query_request(self, query_type, args):
         received_queries_counter.labels(query_type).inc()
         resp = yield self.registry.on_query(query_type, args)
-        defer.returnValue((200, resp))
+        return (200, resp)
 
     @defer.inlineCallbacks
     def on_make_join_request(self, origin, room_id, user_id, supported_versions):
@@ -369,11 +364,9 @@ class FederationServer(FederationBase):
             logger.warn("Room version %s not in %s", room_version, supported_versions)
             raise IncompatibleRoomVersionError(room_version=room_version)
 
-        pdu = yield self.handler.on_make_join_request(room_id, user_id)
+        pdu = yield self.handler.on_make_join_request(origin, room_id, user_id)
         time_now = self._clock.time_msec()
-        defer.returnValue(
-            {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
-        )
+        return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
 
     @defer.inlineCallbacks
     def on_invite_request(self, origin, content, room_version):
@@ -391,7 +384,7 @@ class FederationServer(FederationBase):
         yield self.check_server_matches_acl(origin_host, pdu.room_id)
         ret_pdu = yield self.handler.on_invite_request(origin, pdu)
         time_now = self._clock.time_msec()
-        defer.returnValue({"event": ret_pdu.get_pdu_json(time_now)})
+        return {"event": ret_pdu.get_pdu_json(time_now)}
 
     @defer.inlineCallbacks
     def on_send_join_request(self, origin, content, room_id):
@@ -407,30 +400,26 @@ class FederationServer(FederationBase):
         logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
         res_pdus = yield self.handler.on_send_join_request(origin, pdu)
         time_now = self._clock.time_msec()
-        defer.returnValue(
-            (
-                200,
-                {
-                    "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
-                    "auth_chain": [
-                        p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
-                    ],
-                },
-            )
+        return (
+            200,
+            {
+                "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
+                "auth_chain": [
+                    p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
+                ],
+            },
         )
 
     @defer.inlineCallbacks
     def on_make_leave_request(self, origin, room_id, user_id):
         origin_host, _ = parse_server_name(origin)
         yield self.check_server_matches_acl(origin_host, room_id)
-        pdu = yield self.handler.on_make_leave_request(room_id, user_id)
+        pdu = yield self.handler.on_make_leave_request(origin, room_id, user_id)
 
         room_version = yield self.store.get_room_version(room_id)
 
         time_now = self._clock.time_msec()
-        defer.returnValue(
-            {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
-        )
+        return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
 
     @defer.inlineCallbacks
     def on_send_leave_request(self, origin, content, room_id):
@@ -445,7 +434,7 @@ class FederationServer(FederationBase):
 
         logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
         yield self.handler.on_send_leave_request(origin, pdu)
-        defer.returnValue((200, {}))
+        return (200, {})
 
     @defer.inlineCallbacks
     def on_event_auth(self, origin, room_id, event_id):
@@ -456,7 +445,7 @@ class FederationServer(FederationBase):
             time_now = self._clock.time_msec()
             auth_pdus = yield self.handler.on_event_auth(event_id)
             res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
-        defer.returnValue((200, res))
+        return (200, res)
 
     @defer.inlineCallbacks
     def on_query_auth_request(self, origin, content, room_id, event_id):
@@ -509,7 +498,7 @@ class FederationServer(FederationBase):
                 "missing": ret.get("missing", []),
             }
 
-        defer.returnValue((200, send_content))
+        return (200, send_content)
 
     @log_function
     def on_query_client_keys(self, origin, content):
@@ -548,7 +537,7 @@ class FederationServer(FederationBase):
             ),
         )
 
-        defer.returnValue({"one_time_keys": json_result})
+        return {"one_time_keys": json_result}
 
     @defer.inlineCallbacks
     @log_function
@@ -580,9 +569,7 @@ class FederationServer(FederationBase):
 
             time_now = self._clock.time_msec()
 
-        defer.returnValue(
-            {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
-        )
+        return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
 
     @log_function
     def on_openid_userinfo(self, token):
@@ -676,14 +663,14 @@ class FederationServer(FederationBase):
         ret = yield self.handler.exchange_third_party_invite(
             sender_user_id, target_user_id, room_id, signed
         )
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
         ret = yield self.handler.on_exchange_third_party_invite_request(
             origin, room_id, event_dict
         )
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def check_server_matches_acl(self, server_name, room_id):
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index 9aab12c0d3..fad980b893 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -374,7 +374,7 @@ class PerDestinationQueue(object):
 
         assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
 
-        defer.returnValue((edus, now_stream_id))
+        return (edus, now_stream_id)
 
     @defer.inlineCallbacks
     def _get_to_device_message_edus(self, limit):
@@ -393,4 +393,4 @@ class PerDestinationQueue(object):
             for content in contents
         ]
 
-        defer.returnValue((edus, stream_id))
+        return (edus, stream_id)
diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
index 0460a8c4ac..52706302f2 100644
--- a/synapse/federation/sender/transaction_manager.py
+++ b/synapse/federation/sender/transaction_manager.py
@@ -133,4 +133,4 @@ class TransactionManager(object):
                 )
             success = False
 
-        defer.returnValue(success)
+        return success
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 1aae9ec9e7..0cea0d2a10 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -21,7 +21,11 @@ from six.moves import urllib
 from twisted.internet import defer
 
 from synapse.api.constants import Membership
-from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
+from synapse.api.urls import (
+    FEDERATION_UNSTABLE_PREFIX,
+    FEDERATION_V1_PREFIX,
+    FEDERATION_V2_PREFIX,
+)
 from synapse.logging.utils import log_function
 
 logger = logging.getLogger(__name__)
@@ -183,7 +187,7 @@ class TransportLayerClient(object):
             try_trailing_slash_on_400=True,
         )
 
-        defer.returnValue(response)
+        return response
 
     @defer.inlineCallbacks
     @log_function
@@ -201,7 +205,7 @@ class TransportLayerClient(object):
             ignore_backoff=ignore_backoff,
         )
 
-        defer.returnValue(content)
+        return content
 
     @defer.inlineCallbacks
     @log_function
@@ -259,7 +263,7 @@ class TransportLayerClient(object):
             ignore_backoff=ignore_backoff,
         )
 
-        defer.returnValue(content)
+        return content
 
     @defer.inlineCallbacks
     @log_function
@@ -270,7 +274,7 @@ class TransportLayerClient(object):
             destination=destination, path=path, data=content
         )
 
-        defer.returnValue(response)
+        return response
 
     @defer.inlineCallbacks
     @log_function
@@ -288,7 +292,7 @@ class TransportLayerClient(object):
             ignore_backoff=True,
         )
 
-        defer.returnValue(response)
+        return response
 
     @defer.inlineCallbacks
     @log_function
@@ -299,7 +303,7 @@ class TransportLayerClient(object):
             destination=destination, path=path, data=content, ignore_backoff=True
         )
 
-        defer.returnValue(response)
+        return response
 
     @defer.inlineCallbacks
     @log_function
@@ -310,7 +314,7 @@ class TransportLayerClient(object):
             destination=destination, path=path, data=content, ignore_backoff=True
         )
 
-        defer.returnValue(response)
+        return response
 
     @defer.inlineCallbacks
     @log_function
@@ -339,7 +343,7 @@ class TransportLayerClient(object):
             destination=remote_server, path=path, args=args, ignore_backoff=True
         )
 
-        defer.returnValue(response)
+        return response
 
     @defer.inlineCallbacks
     @log_function
@@ -350,7 +354,7 @@ class TransportLayerClient(object):
             destination=destination, path=path, data=event_dict
         )
 
-        defer.returnValue(response)
+        return response
 
     @defer.inlineCallbacks
     @log_function
@@ -359,7 +363,7 @@ class TransportLayerClient(object):
 
         content = yield self.client.get_json(destination=destination, path=path)
 
-        defer.returnValue(content)
+        return content
 
     @defer.inlineCallbacks
     @log_function
@@ -370,7 +374,7 @@ class TransportLayerClient(object):
             destination=destination, path=path, data=content
         )
 
-        defer.returnValue(content)
+        return content
 
     @defer.inlineCallbacks
     @log_function
@@ -402,7 +406,7 @@ class TransportLayerClient(object):
         content = yield self.client.post_json(
             destination=destination, path=path, data=query_content, timeout=timeout
         )
-        defer.returnValue(content)
+        return content
 
     @defer.inlineCallbacks
     @log_function
@@ -426,7 +430,7 @@ class TransportLayerClient(object):
         content = yield self.client.get_json(
             destination=destination, path=path, timeout=timeout
         )
-        defer.returnValue(content)
+        return content
 
     @defer.inlineCallbacks
     @log_function
@@ -460,7 +464,7 @@ class TransportLayerClient(object):
         content = yield self.client.post_json(
             destination=destination, path=path, data=query_content, timeout=timeout
         )
-        defer.returnValue(content)
+        return content
 
     @defer.inlineCallbacks
     @log_function
@@ -488,7 +492,7 @@ class TransportLayerClient(object):
             timeout=timeout,
         )
 
-        defer.returnValue(content)
+        return content
 
     @log_function
     def get_group_profile(self, destination, group_id, requester_user_id):
@@ -935,6 +939,23 @@ class TransportLayerClient(object):
             destination=destination, path=path, data=content, ignore_backoff=True
         )
 
+    def get_room_complexity(self, destination, room_id):
+        """
+        Args:
+            destination (str): The remote server
+            room_id (str): The room ID to ask about.
+        """
+        path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/rooms/%s/complexity", room_id)
+
+        return self.client.get_json(destination=destination, path=path)
+
+
+def _create_path(federation_prefix, path, *args):
+    """
+    Ensures that all args are url encoded.
+    """
+    return federation_prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
+
 
 def _create_v1_path(path, *args):
     """Creates a path against V1 federation API from the path template and
@@ -951,9 +972,7 @@ def _create_v1_path(path, *args):
     Returns:
         str
     """
-    return FEDERATION_V1_PREFIX + path % tuple(
-        urllib.parse.quote(arg, "") for arg in args
-    )
+    return _create_path(FEDERATION_V1_PREFIX, path, *args)
 
 
 def _create_v2_path(path, *args):
@@ -971,6 +990,4 @@ def _create_v2_path(path, *args):
     Returns:
         str
     """
-    return FEDERATION_V2_PREFIX + path % tuple(
-        urllib.parse.quote(arg, "") for arg in args
-    )
+    return _create_path(FEDERATION_V2_PREFIX, path, *args)
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 663264dec4..ea4e1b6d0f 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -325,7 +325,9 @@ class BaseFederationServlet(object):
             if code is None:
                 continue
 
-            server.register_paths(method, (pattern,), self._wrap(code))
+            server.register_paths(
+                method, (pattern,), self._wrap(code), self.__class__.__name__
+            )
 
 
 class FederationSendServlet(BaseFederationServlet):
diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py
index f497711133..dfd7ae041b 100644
--- a/synapse/groups/attestations.py
+++ b/synapse/groups/attestations.py
@@ -157,7 +157,7 @@ class GroupAttestionRenewer(object):
 
         yield self.store.update_remote_attestion(group_id, user_id, attestation)
 
-        defer.returnValue({})
+        return {}
 
     def _start_renew_attestations(self):
         return run_as_background_process("renew_attestations", self._renew_attestations)
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index 168c9e3f84..d50e691436 100644
--- a/synapse/groups/groups_server.py
+++ b/synapse/groups/groups_server.py
@@ -85,7 +85,7 @@ class GroupsServerHandler(object):
             if not is_admin:
                 raise SynapseError(403, "User is not admin in group")
 
-        defer.returnValue(group)
+        return group
 
     @defer.inlineCallbacks
     def get_group_summary(self, group_id, requester_user_id):
@@ -151,22 +151,20 @@ class GroupsServerHandler(object):
             group_id, requester_user_id
         )
 
-        defer.returnValue(
-            {
-                "profile": profile,
-                "users_section": {
-                    "users": users,
-                    "roles": roles,
-                    "total_user_count_estimate": 0,  # TODO
-                },
-                "rooms_section": {
-                    "rooms": rooms,
-                    "categories": categories,
-                    "total_room_count_estimate": 0,  # TODO
-                },
-                "user": membership_info,
-            }
-        )
+        return {
+            "profile": profile,
+            "users_section": {
+                "users": users,
+                "roles": roles,
+                "total_user_count_estimate": 0,  # TODO
+            },
+            "rooms_section": {
+                "rooms": rooms,
+                "categories": categories,
+                "total_room_count_estimate": 0,  # TODO
+            },
+            "user": membership_info,
+        }
 
     @defer.inlineCallbacks
     def update_group_summary_room(
@@ -192,7 +190,7 @@ class GroupsServerHandler(object):
             is_public=is_public,
         )
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def delete_group_summary_room(
@@ -208,7 +206,7 @@ class GroupsServerHandler(object):
             group_id=group_id, room_id=room_id, category_id=category_id
         )
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def set_group_join_policy(self, group_id, requester_user_id, content):
@@ -228,7 +226,7 @@ class GroupsServerHandler(object):
 
         yield self.store.set_group_join_policy(group_id, join_policy=join_policy)
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def get_group_categories(self, group_id, requester_user_id):
@@ -237,7 +235,7 @@ class GroupsServerHandler(object):
         yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
 
         categories = yield self.store.get_group_categories(group_id=group_id)
-        defer.returnValue({"categories": categories})
+        return {"categories": categories}
 
     @defer.inlineCallbacks
     def get_group_category(self, group_id, requester_user_id, category_id):
@@ -249,7 +247,7 @@ class GroupsServerHandler(object):
             group_id=group_id, category_id=category_id
         )
 
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def update_group_category(self, group_id, requester_user_id, category_id, content):
@@ -269,7 +267,7 @@ class GroupsServerHandler(object):
             profile=profile,
         )
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def delete_group_category(self, group_id, requester_user_id, category_id):
@@ -283,7 +281,7 @@ class GroupsServerHandler(object):
             group_id=group_id, category_id=category_id
         )
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def get_group_roles(self, group_id, requester_user_id):
@@ -292,7 +290,7 @@ class GroupsServerHandler(object):
         yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
 
         roles = yield self.store.get_group_roles(group_id=group_id)
-        defer.returnValue({"roles": roles})
+        return {"roles": roles}
 
     @defer.inlineCallbacks
     def get_group_role(self, group_id, requester_user_id, role_id):
@@ -301,7 +299,7 @@ class GroupsServerHandler(object):
         yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
 
         res = yield self.store.get_group_role(group_id=group_id, role_id=role_id)
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def update_group_role(self, group_id, requester_user_id, role_id, content):
@@ -319,7 +317,7 @@ class GroupsServerHandler(object):
             group_id=group_id, role_id=role_id, is_public=is_public, profile=profile
         )
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def delete_group_role(self, group_id, requester_user_id, role_id):
@@ -331,7 +329,7 @@ class GroupsServerHandler(object):
 
         yield self.store.remove_group_role(group_id=group_id, role_id=role_id)
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def update_group_summary_user(
@@ -355,7 +353,7 @@ class GroupsServerHandler(object):
             is_public=is_public,
         )
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id):
@@ -369,7 +367,7 @@ class GroupsServerHandler(object):
             group_id=group_id, user_id=user_id, role_id=role_id
         )
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def get_group_profile(self, group_id, requester_user_id):
@@ -391,7 +389,7 @@ class GroupsServerHandler(object):
             group_description = {key: group[key] for key in cols}
             group_description["is_openly_joinable"] = group["join_policy"] == "open"
 
-            defer.returnValue(group_description)
+            return group_description
         else:
             raise SynapseError(404, "Unknown group")
 
@@ -461,9 +459,7 @@ class GroupsServerHandler(object):
 
         # TODO: If admin add lists of users whose attestations have timed out
 
-        defer.returnValue(
-            {"chunk": chunk, "total_user_count_estimate": len(user_results)}
-        )
+        return {"chunk": chunk, "total_user_count_estimate": len(user_results)}
 
     @defer.inlineCallbacks
     def get_invited_users_in_group(self, group_id, requester_user_id):
@@ -494,9 +490,7 @@ class GroupsServerHandler(object):
                 logger.warn("Error getting profile for %s: %s", user_id, e)
             user_profiles.append(user_profile)
 
-        defer.returnValue(
-            {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
-        )
+        return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
 
     @defer.inlineCallbacks
     def get_rooms_in_group(self, group_id, requester_user_id):
@@ -533,9 +527,7 @@ class GroupsServerHandler(object):
 
         chunk.sort(key=lambda e: -e["num_joined_members"])
 
-        defer.returnValue(
-            {"chunk": chunk, "total_room_count_estimate": len(room_results)}
-        )
+        return {"chunk": chunk, "total_room_count_estimate": len(room_results)}
 
     @defer.inlineCallbacks
     def add_room_to_group(self, group_id, requester_user_id, room_id, content):
@@ -551,7 +543,7 @@ class GroupsServerHandler(object):
 
         yield self.store.add_room_to_group(group_id, room_id, is_public=is_public)
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def update_room_in_group(
@@ -574,7 +566,7 @@ class GroupsServerHandler(object):
         else:
             raise SynapseError(400, "Uknown config option")
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def remove_room_from_group(self, group_id, requester_user_id, room_id):
@@ -586,7 +578,7 @@ class GroupsServerHandler(object):
 
         yield self.store.remove_room_from_group(group_id, room_id)
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def invite_to_group(self, group_id, user_id, requester_user_id, content):
@@ -644,9 +636,9 @@ class GroupsServerHandler(object):
             )
         elif res["state"] == "invite":
             yield self.store.add_group_invite(group_id, user_id)
-            defer.returnValue({"state": "invite"})
+            return {"state": "invite"}
         elif res["state"] == "reject":
-            defer.returnValue({"state": "reject"})
+            return {"state": "reject"}
         else:
             raise SynapseError(502, "Unknown state returned by HS")
 
@@ -679,7 +671,7 @@ class GroupsServerHandler(object):
             remote_attestation=remote_attestation,
         )
 
-        defer.returnValue(local_attestation)
+        return local_attestation
 
     @defer.inlineCallbacks
     def accept_invite(self, group_id, requester_user_id, content):
@@ -699,7 +691,7 @@ class GroupsServerHandler(object):
 
         local_attestation = yield self._add_user(group_id, requester_user_id, content)
 
-        defer.returnValue({"state": "join", "attestation": local_attestation})
+        return {"state": "join", "attestation": local_attestation}
 
     @defer.inlineCallbacks
     def join_group(self, group_id, requester_user_id, content):
@@ -716,7 +708,7 @@ class GroupsServerHandler(object):
 
         local_attestation = yield self._add_user(group_id, requester_user_id, content)
 
-        defer.returnValue({"state": "join", "attestation": local_attestation})
+        return {"state": "join", "attestation": local_attestation}
 
     @defer.inlineCallbacks
     def knock(self, group_id, requester_user_id, content):
@@ -769,7 +761,7 @@ class GroupsServerHandler(object):
         if not self.hs.is_mine_id(user_id):
             yield self.store.maybe_delete_remote_profile_cache(user_id)
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def create_group(self, group_id, requester_user_id, content):
@@ -845,7 +837,7 @@ class GroupsServerHandler(object):
                 avatar_url=user_profile.get("avatar_url"),
             )
 
-        defer.returnValue({"group_id": group_id})
+        return {"group_id": group_id}
 
     @defer.inlineCallbacks
     def delete_group(self, group_id, requester_user_id):
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index e62e6cab77..8acd9f9a83 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -51,8 +51,8 @@ class AccountDataEventSource(object):
                     {"type": account_data_type, "content": content, "room_id": room_id}
                 )
 
-        defer.returnValue((results, current_stream_id))
+        return (results, current_stream_id)
 
     @defer.inlineCallbacks
     def get_pagination_rows(self, user, config, key):
-        defer.returnValue(([], config.to_id))
+        return ([], config.to_id)
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 1f1708ba7d..34574f1a12 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -193,7 +193,7 @@ class AccountValidityHandler(object):
             if threepid["medium"] == "email":
                 addresses.append(threepid["address"])
 
-        defer.returnValue(addresses)
+        return addresses
 
     @defer.inlineCallbacks
     def _get_renewal_token(self, user_id):
@@ -214,7 +214,7 @@ class AccountValidityHandler(object):
             try:
                 renewal_token = stringutils.random_string(32)
                 yield self.store.set_renewal_token_for_user(user_id, renewal_token)
-                defer.returnValue(renewal_token)
+                return renewal_token
             except StoreError:
                 attempts += 1
         raise StoreError(500, "Couldn't generate a unique string as refresh string.")
@@ -226,11 +226,19 @@ class AccountValidityHandler(object):
 
         Args:
             renewal_token (str): Token sent with the renewal request.
+        Returns:
+            bool: Whether the provided token is valid.
         """
-        user_id = yield self.store.get_user_from_renewal_token(renewal_token)
+        try:
+            user_id = yield self.store.get_user_from_renewal_token(renewal_token)
+        except StoreError:
+            defer.returnValue(False)
+
         logger.debug("Renewing an account for user %s", user_id)
         yield self.renew_account_for_user(user_id)
 
+        defer.returnValue(True)
+
     @defer.inlineCallbacks
     def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False):
         """Renews the account attached to a given user by pushing back the
@@ -254,4 +262,4 @@ class AccountValidityHandler(object):
             user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent
         )
 
-        defer.returnValue(expiration_ts)
+        return expiration_ts
diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py
index fbef2f3d38..46ac73106d 100644
--- a/synapse/handlers/acme.py
+++ b/synapse/handlers/acme.py
@@ -100,4 +100,4 @@ class AcmeHandler(object):
             logger.exception("Failed saving!")
             raise
 
-        defer.returnValue(True)
+        return True
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index e8a651e231..2f22f56ca4 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -49,7 +49,7 @@ class AdminHandler(BaseHandler):
             "devices": {"": {"sessions": [{"connections": connections}]}},
         }
 
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def get_users(self):
@@ -61,7 +61,7 @@ class AdminHandler(BaseHandler):
         """
         ret = yield self.store.get_users()
 
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def get_users_paginate(self, order, start, limit):
@@ -78,7 +78,7 @@ class AdminHandler(BaseHandler):
         """
         ret = yield self.store.get_users_paginate(order, start, limit)
 
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def search_users(self, term):
@@ -92,7 +92,7 @@ class AdminHandler(BaseHandler):
         """
         ret = yield self.store.search_users(term)
 
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def export_user_data(self, user_id, writer):
@@ -225,7 +225,7 @@ class AdminHandler(BaseHandler):
                 state = yield self.store.get_state_for_event(event_id)
                 writer.write_state(room_id, event_id, state)
 
-        defer.returnValue(writer.finished())
+        return writer.finished()
 
 
 class ExfiltrationWriter(object):
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 8f089f0e33..d1a51df6f9 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -167,8 +167,8 @@ class ApplicationServicesHandler(object):
         for user_service in user_query_services:
             is_known_user = yield self.appservice_api.query_user(user_service, user_id)
             if is_known_user:
-                defer.returnValue(True)
-        defer.returnValue(False)
+                return True
+        return False
 
     @defer.inlineCallbacks
     def query_room_alias_exists(self, room_alias):
@@ -192,7 +192,7 @@ class ApplicationServicesHandler(object):
             if is_known_alias:
                 # the alias exists now so don't query more ASes.
                 result = yield self.store.get_association_from_room_alias(room_alias)
-                defer.returnValue(result)
+                return result
 
     @defer.inlineCallbacks
     def query_3pe(self, kind, protocol, fields):
@@ -215,7 +215,7 @@ class ApplicationServicesHandler(object):
             if success:
                 ret.extend(result)
 
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def get_3pe_protocols(self, only_protocol=None):
@@ -254,7 +254,7 @@ class ApplicationServicesHandler(object):
         for p in protocols.keys():
             protocols[p] = _merge_instances(protocols[p])
 
-        defer.returnValue(protocols)
+        return protocols
 
     @defer.inlineCallbacks
     def _get_services_for_event(self, event):
@@ -276,7 +276,7 @@ class ApplicationServicesHandler(object):
             if (yield s.is_interested(event, self.store)):
                 interested_list.append(s)
 
-        defer.returnValue(interested_list)
+        return interested_list
 
     def _get_services_for_user(self, user_id):
         services = self.store.get_app_services()
@@ -293,23 +293,23 @@ class ApplicationServicesHandler(object):
         if not self.is_mine_id(user_id):
             # we don't know if they are unknown or not since it isn't one of our
             # users. We can't poke ASes.
-            defer.returnValue(False)
+            return False
             return
 
         user_info = yield self.store.get_user_by_id(user_id)
         if user_info:
-            defer.returnValue(False)
+            return False
             return
 
         # user not found; could be the AS though, so check.
         services = self.store.get_app_services()
         service_list = [s for s in services if s.sender == user_id]
-        defer.returnValue(len(service_list) == 0)
+        return len(service_list) == 0
 
     @defer.inlineCallbacks
     def _check_user_exists(self, user_id):
         unknown_user = yield self._is_unknown_user(user_id)
         if unknown_user:
             exists = yield self.query_user_exists(user_id)
-            defer.returnValue(exists)
-        defer.returnValue(True)
+            return exists
+        return True
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index d4d6574975..0f3ebf7ef8 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -155,7 +155,7 @@ class AuthHandler(BaseHandler):
         if user_id != requester.user.to_string():
             raise AuthError(403, "Invalid auth")
 
-        defer.returnValue(params)
+        return params
 
     @defer.inlineCallbacks
     def check_auth(self, flows, clientdict, clientip, password_servlet=False):
@@ -280,7 +280,7 @@ class AuthHandler(BaseHandler):
                     creds,
                     list(clientdict),
                 )
-                defer.returnValue((creds, clientdict, session["id"]))
+                return (creds, clientdict, session["id"])
 
         ret = self._auth_dict_for_flows(flows, session)
         ret["completed"] = list(creds)
@@ -307,8 +307,8 @@ class AuthHandler(BaseHandler):
         if result:
             creds[stagetype] = result
             self._save_session(sess)
-            defer.returnValue(True)
-        defer.returnValue(False)
+            return True
+        return False
 
     def get_session_id(self, clientdict):
         """
@@ -379,7 +379,7 @@ class AuthHandler(BaseHandler):
             res = yield checker(
                 authdict, clientip=clientip, password_servlet=password_servlet
             )
-            defer.returnValue(res)
+            return res
 
         # build a v1-login-style dict out of the authdict and fall back to the
         # v1 code
@@ -389,7 +389,7 @@ class AuthHandler(BaseHandler):
             raise SynapseError(400, "", Codes.MISSING_PARAM)
 
         (canonical_id, callback) = yield self.validate_login(user_id, authdict)
-        defer.returnValue(canonical_id)
+        return canonical_id
 
     @defer.inlineCallbacks
     def _check_recaptcha(self, authdict, clientip, **kwargs):
@@ -433,7 +433,7 @@ class AuthHandler(BaseHandler):
                 resp_body.get("hostname"),
             )
             if resp_body["success"]:
-                defer.returnValue(True)
+                return True
         raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
 
     def _check_email_identity(self, authdict, **kwargs):
@@ -502,7 +502,7 @@ class AuthHandler(BaseHandler):
 
         threepid["threepid_creds"] = authdict["threepid_creds"]
 
-        defer.returnValue(threepid)
+        return threepid
 
     def _get_params_recaptcha(self):
         return {"public_key": self.hs.config.recaptcha_public_key}
@@ -606,7 +606,7 @@ class AuthHandler(BaseHandler):
                 yield self.store.delete_access_token(access_token)
                 raise StoreError(400, "Login raced against device deletion")
 
-        defer.returnValue(access_token)
+        return access_token
 
     @defer.inlineCallbacks
     def check_user_exists(self, user_id):
@@ -629,8 +629,8 @@ class AuthHandler(BaseHandler):
         self.ratelimit_login_per_account(user_id)
         res = yield self._find_user_id_and_pwd_hash(user_id)
         if res is not None:
-            defer.returnValue(res[0])
-        defer.returnValue(None)
+            return res[0]
+        return None
 
     @defer.inlineCallbacks
     def _find_user_id_and_pwd_hash(self, user_id):
@@ -661,7 +661,7 @@ class AuthHandler(BaseHandler):
                 user_id,
                 user_infos.keys(),
             )
-        defer.returnValue(result)
+        return result
 
     def get_supported_login_types(self):
         """Get a the login types supported for the /login API
@@ -722,7 +722,7 @@ class AuthHandler(BaseHandler):
                 known_login_type = True
                 is_valid = yield provider.check_password(qualified_user_id, password)
                 if is_valid:
-                    defer.returnValue((qualified_user_id, None))
+                    return (qualified_user_id, None)
 
             if not hasattr(provider, "get_supported_login_types") or not hasattr(
                 provider, "check_auth"
@@ -756,7 +756,7 @@ class AuthHandler(BaseHandler):
             if result:
                 if isinstance(result, str):
                     result = (result, None)
-                defer.returnValue(result)
+                return result
 
         if login_type == LoginType.PASSWORD and self.hs.config.password_localdb_enabled:
             known_login_type = True
@@ -766,7 +766,7 @@ class AuthHandler(BaseHandler):
             )
 
             if canonical_user_id:
-                defer.returnValue((canonical_user_id, None))
+                return (canonical_user_id, None)
 
         if not known_login_type:
             raise SynapseError(400, "Unknown login type %s" % login_type)
@@ -814,9 +814,9 @@ class AuthHandler(BaseHandler):
                     if isinstance(result, str):
                         # If it's a str, set callback function to None
                         result = (result, None)
-                    defer.returnValue(result)
+                    return result
 
-        defer.returnValue((None, None))
+        return (None, None)
 
     @defer.inlineCallbacks
     def _check_local_password(self, user_id, password):
@@ -838,7 +838,7 @@ class AuthHandler(BaseHandler):
         """
         lookupres = yield self._find_user_id_and_pwd_hash(user_id)
         if not lookupres:
-            defer.returnValue(None)
+            return None
         (user_id, password_hash) = lookupres
 
         # If the password hash is None, the account has likely been deactivated
@@ -850,8 +850,8 @@ class AuthHandler(BaseHandler):
         result = yield self.validate_hash(password, password_hash)
         if not result:
             logger.warn("Failed password login for user %s", user_id)
-            defer.returnValue(None)
-        defer.returnValue(user_id)
+            return None
+        return user_id
 
     @defer.inlineCallbacks
     def validate_short_term_login_token_and_get_user_id(self, login_token):
@@ -860,12 +860,12 @@ class AuthHandler(BaseHandler):
         try:
             macaroon = pymacaroons.Macaroon.deserialize(login_token)
             user_id = auth_api.get_user_id_from_macaroon(macaroon)
-            auth_api.validate_macaroon(macaroon, "login", True, user_id)
+            auth_api.validate_macaroon(macaroon, "login", user_id)
         except Exception:
             raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
         self.ratelimit_login_per_account(user_id)
         yield self.auth.check_auth_blocking(user_id)
-        defer.returnValue(user_id)
+        return user_id
 
     @defer.inlineCallbacks
     def delete_access_token(self, access_token):
@@ -976,7 +976,7 @@ class AuthHandler(BaseHandler):
         )
 
         yield self.store.user_delete_threepid(user_id, medium, address)
-        defer.returnValue(result)
+        return result
 
     def _save_session(self, session):
         # TODO: Persistent storage
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index e8f9da6098..5f804d1f13 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -125,7 +125,7 @@ class DeactivateAccountHandler(BaseHandler):
         # Mark the user as deactivated.
         yield self.store.set_user_deactivated_status(user_id, True)
 
-        defer.returnValue(identity_server_supports_unbinding)
+        return identity_server_supports_unbinding
 
     def _start_user_parting(self):
         """
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 99e8413092..5c1cf83c9d 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -64,7 +64,7 @@ class DeviceWorkerHandler(BaseHandler):
         for device in devices:
             _update_device_from_client_ips(device, ips)
 
-        defer.returnValue(devices)
+        return devices
 
     @defer.inlineCallbacks
     def get_device(self, user_id, device_id):
@@ -85,7 +85,7 @@ class DeviceWorkerHandler(BaseHandler):
             raise errors.NotFoundError
         ips = yield self.store.get_last_client_ip_by_device(user_id, device_id)
         _update_device_from_client_ips(device, ips)
-        defer.returnValue(device)
+        return device
 
     @measure_func("device.get_user_ids_changed")
     @defer.inlineCallbacks
@@ -200,9 +200,7 @@ class DeviceWorkerHandler(BaseHandler):
             possibly_joined = []
             possibly_left = []
 
-        defer.returnValue(
-            {"changed": list(possibly_joined), "left": list(possibly_left)}
-        )
+        return {"changed": list(possibly_joined), "left": list(possibly_left)}
 
 
 class DeviceHandler(DeviceWorkerHandler):
@@ -211,12 +209,12 @@ class DeviceHandler(DeviceWorkerHandler):
 
         self.federation_sender = hs.get_federation_sender()
 
-        self._edu_updater = DeviceListEduUpdater(hs, self)
+        self.device_list_updater = DeviceListUpdater(hs, self)
 
         federation_registry = hs.get_federation_registry()
 
         federation_registry.register_edu_handler(
-            "m.device_list_update", self._edu_updater.incoming_device_list_update
+            "m.device_list_update", self.device_list_updater.incoming_device_list_update
         )
         federation_registry.register_query_handler(
             "user_devices", self.on_federation_query_user_devices
@@ -250,7 +248,7 @@ class DeviceHandler(DeviceWorkerHandler):
             )
             if new_device:
                 yield self.notify_device_update(user_id, [device_id])
-            defer.returnValue(device_id)
+            return device_id
 
         # if the device id is not specified, we'll autogen one, but loop a few
         # times in case of a clash.
@@ -264,7 +262,7 @@ class DeviceHandler(DeviceWorkerHandler):
             )
             if new_device:
                 yield self.notify_device_update(user_id, [device_id])
-                defer.returnValue(device_id)
+                return device_id
             attempts += 1
 
         raise errors.StoreError(500, "Couldn't generate a device ID.")
@@ -411,9 +409,7 @@ class DeviceHandler(DeviceWorkerHandler):
     @defer.inlineCallbacks
     def on_federation_query_user_devices(self, user_id):
         stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
-        defer.returnValue(
-            {"user_id": user_id, "stream_id": stream_id, "devices": devices}
-        )
+        return {"user_id": user_id, "stream_id": stream_id, "devices": devices}
 
     @defer.inlineCallbacks
     def user_left_room(self, user, room_id):
@@ -430,7 +426,7 @@ def _update_device_from_client_ips(device, client_ips):
     device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")})
 
 
-class DeviceListEduUpdater(object):
+class DeviceListUpdater(object):
     "Handles incoming device list updates from federation and updates the DB"
 
     def __init__(self, hs, device_handler):
@@ -523,75 +519,7 @@ class DeviceListEduUpdater(object):
             logger.debug("Need to re-sync devices for %r? %r", user_id, resync)
 
             if resync:
-                # Fetch all devices for the user.
-                origin = get_domain_from_id(user_id)
-                try:
-                    result = yield self.federation.query_user_devices(origin, user_id)
-                except (
-                    NotRetryingDestination,
-                    RequestSendFailed,
-                    HttpResponseException,
-                ):
-                    # TODO: Remember that we are now out of sync and try again
-                    # later
-                    logger.warn("Failed to handle device list update for %s", user_id)
-                    # We abort on exceptions rather than accepting the update
-                    # as otherwise synapse will 'forget' that its device list
-                    # is out of date. If we bail then we will retry the resync
-                    # next time we get a device list update for this user_id.
-                    # This makes it more likely that the device lists will
-                    # eventually become consistent.
-                    return
-                except FederationDeniedError as e:
-                    logger.info(e)
-                    return
-                except Exception:
-                    # TODO: Remember that we are now out of sync and try again
-                    # later
-                    logger.exception(
-                        "Failed to handle device list update for %s", user_id
-                    )
-                    return
-
-                stream_id = result["stream_id"]
-                devices = result["devices"]
-
-                # If the remote server has more than ~1000 devices for this user
-                # we assume that something is going horribly wrong (e.g. a bot
-                # that logs in and creates a new device every time it tries to
-                # send a message).  Maintaining lots of devices per user in the
-                # cache can cause serious performance issues as if this request
-                # takes more than 60s to complete, internal replication from the
-                # inbound federation worker to the synapse master may time out
-                # causing the inbound federation to fail and causing the remote
-                # server to retry, causing a DoS.  So in this scenario we give
-                # up on storing the total list of devices and only handle the
-                # delta instead.
-                if len(devices) > 1000:
-                    logger.warn(
-                        "Ignoring device list snapshot for %s as it has >1K devs (%d)",
-                        user_id,
-                        len(devices),
-                    )
-                    devices = []
-
-                for device in devices:
-                    logger.debug(
-                        "Handling resync update %r/%r, ID: %r",
-                        user_id,
-                        device["device_id"],
-                        stream_id,
-                    )
-
-                yield self.store.update_remote_device_list_cache(
-                    user_id, devices, stream_id
-                )
-                device_ids = [device["device_id"] for device in devices]
-                yield self.device_handler.notify_device_update(user_id, device_ids)
-
-                # We clobber the seen updates since we've re-synced from a given
-                # point.
-                self._seen_updates[user_id] = set([stream_id])
+                yield self.user_device_resync(user_id)
             else:
                 # Simply update the single device, since we know that is the only
                 # change (because of the single prev_id matching the current cache)
@@ -623,7 +551,7 @@ class DeviceListEduUpdater(object):
         for _, stream_id, prev_ids, _ in updates:
             if not prev_ids:
                 # We always do a resync if there are no previous IDs
-                defer.returnValue(True)
+                return True
 
             for prev_id in prev_ids:
                 if prev_id == extremity:
@@ -633,8 +561,82 @@ class DeviceListEduUpdater(object):
                 elif prev_id in stream_id_in_updates:
                     continue
                 else:
-                    defer.returnValue(True)
+                    return True
 
             stream_id_in_updates.add(stream_id)
 
-        defer.returnValue(False)
+        return False
+
+    @defer.inlineCallbacks
+    def user_device_resync(self, user_id):
+        """Fetches all devices for a user and updates the device cache with them.
+
+        Args:
+            user_id (str): The user's id whose device_list will be updated.
+        Returns:
+            Deferred[dict]: a dict with device info as under the "devices" in the result of this
+            request:
+            https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
+        """
+        # Fetch all devices for the user.
+        origin = get_domain_from_id(user_id)
+        try:
+            result = yield self.federation.query_user_devices(origin, user_id)
+        except (NotRetryingDestination, RequestSendFailed, HttpResponseException):
+            # TODO: Remember that we are now out of sync and try again
+            # later
+            logger.warn("Failed to handle device list update for %s", user_id)
+            # We abort on exceptions rather than accepting the update
+            # as otherwise synapse will 'forget' that its device list
+            # is out of date. If we bail then we will retry the resync
+            # next time we get a device list update for this user_id.
+            # This makes it more likely that the device lists will
+            # eventually become consistent.
+            return
+        except FederationDeniedError as e:
+            logger.info(e)
+            return
+        except Exception:
+            # TODO: Remember that we are now out of sync and try again
+            # later
+            logger.exception("Failed to handle device list update for %s", user_id)
+            return
+        stream_id = result["stream_id"]
+        devices = result["devices"]
+
+        # If the remote server has more than ~1000 devices for this user
+        # we assume that something is going horribly wrong (e.g. a bot
+        # that logs in and creates a new device every time it tries to
+        # send a message).  Maintaining lots of devices per user in the
+        # cache can cause serious performance issues as if this request
+        # takes more than 60s to complete, internal replication from the
+        # inbound federation worker to the synapse master may time out
+        # causing the inbound federation to fail and causing the remote
+        # server to retry, causing a DoS.  So in this scenario we give
+        # up on storing the total list of devices and only handle the
+        # delta instead.
+        if len(devices) > 1000:
+            logger.warn(
+                "Ignoring device list snapshot for %s as it has >1K devs (%d)",
+                user_id,
+                len(devices),
+            )
+            devices = []
+
+        for device in devices:
+            logger.debug(
+                "Handling resync update %r/%r, ID: %r",
+                user_id,
+                device["device_id"],
+                stream_id,
+            )
+
+        yield self.store.update_remote_device_list_cache(user_id, devices, stream_id)
+        device_ids = [device["device_id"] for device in devices]
+        yield self.device_handler.notify_device_update(user_id, device_ids)
+
+        # We clobber the seen updates since we've re-synced from a given
+        # point.
+        self._seen_updates[user_id] = set([stream_id])
+
+        defer.returnValue(result)
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 42d5b3db30..526379c6f7 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -210,7 +210,7 @@ class DirectoryHandler(BaseHandler):
         except AuthError as e:
             logger.info("Failed to update alias events: %s", e)
 
-        defer.returnValue(room_id)
+        return room_id
 
     @defer.inlineCallbacks
     def delete_appservice_association(self, service, room_alias):
@@ -229,7 +229,7 @@ class DirectoryHandler(BaseHandler):
 
         room_id = yield self.store.delete_room_alias(room_alias)
 
-        defer.returnValue(room_id)
+        return room_id
 
     @defer.inlineCallbacks
     def get_association(self, room_alias):
@@ -277,8 +277,7 @@ class DirectoryHandler(BaseHandler):
         else:
             servers = list(servers)
 
-        defer.returnValue({"room_id": room_id, "servers": servers})
-        return
+        return {"room_id": room_id, "servers": servers}
 
     @defer.inlineCallbacks
     def on_directory_query(self, args):
@@ -289,7 +288,7 @@ class DirectoryHandler(BaseHandler):
         result = yield self.get_association_from_room_alias(room_alias)
 
         if result is not None:
-            defer.returnValue({"room_id": result.room_id, "servers": result.servers})
+            return {"room_id": result.room_id, "servers": result.servers}
         else:
             raise SynapseError(
                 404,
@@ -342,7 +341,7 @@ class DirectoryHandler(BaseHandler):
             # Query AS to see if it exists
             as_handler = self.appservice_handler
             result = yield as_handler.query_room_alias_exists(room_alias)
-        defer.returnValue(result)
+        return result
 
     def can_modify_alias(self, alias, user_id=None):
         # Any application service "interested" in an alias they are regexing on
@@ -369,10 +368,10 @@ class DirectoryHandler(BaseHandler):
         creator = yield self.store.get_room_alias_creator(alias.to_string())
 
         if creator is not None and creator == user_id:
-            defer.returnValue(True)
+            return True
 
         is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))
-        defer.returnValue(is_admin)
+        return is_admin
 
     @defer.inlineCallbacks
     def edit_published_room_list(self, requester, room_id, visibility):
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index fdfe8611b6..1f90b0d278 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -25,6 +25,7 @@ from twisted.internet import defer
 from synapse.api.errors import CodeMessageException, SynapseError
 from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.types import UserID, get_domain_from_id
+from synapse.util import unwrapFirstError
 from synapse.util.retryutils import NotRetryingDestination
 
 logger = logging.getLogger(__name__)
@@ -65,6 +66,7 @@ class E2eKeysHandler(object):
             }
         }
         """
+
         device_keys_query = query_body.get("device_keys", {})
 
         # separate users by domain.
@@ -121,7 +123,56 @@ class E2eKeysHandler(object):
         # Now fetch any devices that we don't have in our cache
         @defer.inlineCallbacks
         def do_remote_query(destination):
+            """This is called when we are querying the device list of a user on
+            a remote homeserver and their device list is not in the device list
+            cache. If we share a room with this user and we're not querying for
+            specific user we will update the cache
+            with their device list."""
+
             destination_query = remote_queries_not_in_cache[destination]
+
+            # We first consider whether we wish to update the device list cache with
+            # the users device list. We want to track a user's devices when the
+            # authenticated user shares a room with the queried user and the query
+            # has not specified a particular device.
+            # If we update the cache for the queried user we remove them from further
+            # queries. We use the more efficient batched query_client_keys for all
+            # remaining users
+            user_ids_updated = []
+            for (user_id, device_list) in destination_query.items():
+                if user_id in user_ids_updated:
+                    continue
+
+                if device_list:
+                    continue
+
+                room_ids = yield self.store.get_rooms_for_user(user_id)
+                if not room_ids:
+                    continue
+
+                # We've decided we're sharing a room with this user and should
+                # probably be tracking their device lists. However, we haven't
+                # done an initial sync on the device list so we do it now.
+                try:
+                    user_devices = yield self.device_handler.device_list_updater.user_device_resync(
+                        user_id
+                    )
+                    user_devices = user_devices["devices"]
+                    for device in user_devices:
+                        results[user_id] = {device["device_id"]: device["keys"]}
+                    user_ids_updated.append(user_id)
+                except Exception as e:
+                    failures[destination] = _exception_to_failure(e)
+
+            if len(destination_query) == len(user_ids_updated):
+                # We've updated all the users in the query and we do not need to
+                # make any further remote calls.
+                return
+
+            # Remove all the users from the query which we have updated
+            for user_id in user_ids_updated:
+                destination_query.pop(user_id)
+
             try:
                 remote_result = yield self.federation.query_client_keys(
                     destination, {"device_keys": destination_query}, timeout=timeout
@@ -132,7 +183,8 @@ class E2eKeysHandler(object):
                         results[user_id] = keys
 
             except Exception as e:
-                failures[destination] = _exception_to_failure(e)
+                failure = _exception_to_failure(e)
+                failures[destination] = failure
 
         yield make_deferred_yieldable(
             defer.gatherResults(
@@ -141,10 +193,10 @@ class E2eKeysHandler(object):
                     for destination in remote_queries_not_in_cache
                 ],
                 consumeErrors=True,
-            )
+            ).addErrback(unwrapFirstError)
         )
 
-        defer.returnValue({"device_keys": results, "failures": failures})
+        return {"device_keys": results, "failures": failures}
 
     @defer.inlineCallbacks
     def query_local_devices(self, query):
@@ -189,7 +241,7 @@ class E2eKeysHandler(object):
                     r["unsigned"]["device_display_name"] = display_name
                 result_dict[user_id][device_id] = r
 
-        defer.returnValue(result_dict)
+        return result_dict
 
     @defer.inlineCallbacks
     def on_federation_query_client_keys(self, query_body):
@@ -197,7 +249,7 @@ class E2eKeysHandler(object):
         """
         device_keys_query = query_body.get("device_keys", {})
         res = yield self.query_local_devices(device_keys_query)
-        defer.returnValue({"device_keys": res})
+        return {"device_keys": res}
 
     @defer.inlineCallbacks
     def claim_one_time_keys(self, query, timeout):
@@ -234,8 +286,10 @@ class E2eKeysHandler(object):
                 for user_id, keys in remote_result["one_time_keys"].items():
                     if user_id in device_keys:
                         json_result[user_id] = keys
+
             except Exception as e:
-                failures[destination] = _exception_to_failure(e)
+                failure = _exception_to_failure(e)
+                failures[destination] = failure
 
         yield make_deferred_yieldable(
             defer.gatherResults(
@@ -259,10 +313,11 @@ class E2eKeysHandler(object):
             ),
         )
 
-        defer.returnValue({"one_time_keys": json_result, "failures": failures})
+        return {"one_time_keys": json_result, "failures": failures}
 
     @defer.inlineCallbacks
     def upload_keys_for_user(self, user_id, device_id, keys):
+
         time_now = self.clock.time_msec()
 
         # TODO: Validate the JSON to make sure it has the right keys.
@@ -297,7 +352,7 @@ class E2eKeysHandler(object):
 
         result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
 
-        defer.returnValue({"one_time_key_counts": result})
+        return {"one_time_key_counts": result}
 
     @defer.inlineCallbacks
     def _upload_one_time_keys_for_user(
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index ebd807bca6..41b871fc59 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -84,7 +84,7 @@ class E2eRoomKeysHandler(object):
                 user_id, version, room_id, session_id
             )
 
-            defer.returnValue(results)
+            return results
 
     @defer.inlineCallbacks
     def delete_room_keys(self, user_id, version, room_id=None, session_id=None):
@@ -262,7 +262,7 @@ class E2eRoomKeysHandler(object):
             new_version = yield self.store.create_e2e_room_keys_version(
                 user_id, version_info
             )
-            defer.returnValue(new_version)
+            return new_version
 
     @defer.inlineCallbacks
     def get_version_info(self, user_id, version=None):
@@ -292,7 +292,7 @@ class E2eRoomKeysHandler(object):
                     raise NotFoundError("Unknown backup version")
                 else:
                     raise
-            defer.returnValue(res)
+            return res
 
     @defer.inlineCallbacks
     def delete_version(self, user_id, version=None):
@@ -350,4 +350,4 @@ class E2eRoomKeysHandler(object):
                 user_id, version, version_info
             )
 
-            defer.returnValue({})
+            return {}
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index 6a38328af3..2f1f10a9af 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -143,7 +143,7 @@ class EventStreamHandler(BaseHandler):
                 "end": tokens[1].to_string(),
             }
 
-            defer.returnValue(chunk)
+            return chunk
 
 
 class EventHandler(BaseHandler):
@@ -166,7 +166,7 @@ class EventHandler(BaseHandler):
         event = yield self.store.get_event(event_id, check_room_id=room_id)
 
         if not event:
-            defer.returnValue(None)
+            return None
             return
 
         users = yield self.store.get_users_in_room(event.room_id)
@@ -179,4 +179,4 @@ class EventHandler(BaseHandler):
         if not filtered:
             raise AuthError(403, "You don't have permission to access that event.")
 
-        defer.returnValue(event)
+        return event
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 57be968c67..c86903b98b 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -210,7 +210,7 @@ class FederationHandler(BaseHandler):
                 event_id,
                 origin,
             )
-            defer.returnValue(None)
+            return None
 
         state = None
         auth_chain = []
@@ -676,7 +676,7 @@ class FederationHandler(BaseHandler):
         events = [e for e in events if e.event_id not in seen_events]
 
         if not events:
-            defer.returnValue([])
+            return []
 
         event_map = {e.event_id: e for e in events}
 
@@ -838,7 +838,7 @@ class FederationHandler(BaseHandler):
             # TODO: We can probably do something more clever here.
             yield self._handle_new_event(dest, event, backfilled=True)
 
-        defer.returnValue(events)
+        return events
 
     @defer.inlineCallbacks
     def maybe_backfill(self, room_id, current_depth):
@@ -894,7 +894,7 @@ class FederationHandler(BaseHandler):
         )
 
         if not filtered_extremities:
-            defer.returnValue(False)
+            return False
 
         # Check if we reached a point where we should start backfilling.
         sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1]))
@@ -965,7 +965,7 @@ class FederationHandler(BaseHandler):
                     # If this succeeded then we probably already have the
                     # appropriate stuff.
                     # TODO: We can probably do something more intelligent here.
-                    defer.returnValue(True)
+                    return True
                 except SynapseError as e:
                     logger.info("Failed to backfill from %s because %s", dom, e)
                     continue
@@ -978,6 +978,9 @@ class FederationHandler(BaseHandler):
                 except NotRetryingDestination as e:
                     logger.info(str(e))
                     continue
+                except RequestSendFailed as e:
+                    logger.info("Falied to get backfill from %s because %s", dom, e)
+                    continue
                 except FederationDeniedError as e:
                     logger.info(e)
                     continue
@@ -985,11 +988,11 @@ class FederationHandler(BaseHandler):
                     logger.exception("Failed to backfill from %s because %s", dom, e)
                     continue
 
-            defer.returnValue(False)
+            return False
 
         success = yield try_backfill(likely_domains)
         if success:
-            defer.returnValue(True)
+            return True
 
         # Huh, well *those* domains didn't work out. Lets try some domains
         # from the time.
@@ -1031,11 +1034,11 @@ class FederationHandler(BaseHandler):
                 [dom for dom, _ in likely_domains if dom not in tried_domains]
             )
             if success:
-                defer.returnValue(True)
+                return True
 
             tried_domains.update(dom for dom, _ in likely_domains)
 
-        defer.returnValue(False)
+        return False
 
     def _sanity_check_event(self, ev):
         """
@@ -1082,7 +1085,7 @@ class FederationHandler(BaseHandler):
             pdu=event,
         )
 
-        defer.returnValue(pdu)
+        return pdu
 
     @defer.inlineCallbacks
     def on_event_auth(self, event_id):
@@ -1090,7 +1093,7 @@ class FederationHandler(BaseHandler):
         auth = yield self.store.get_auth_chain(
             [auth_id for auth_id in event.auth_event_ids()], include_given=True
         )
-        defer.returnValue([e for e in auth])
+        return [e for e in auth]
 
     @log_function
     @defer.inlineCallbacks
@@ -1177,7 +1180,7 @@ class FederationHandler(BaseHandler):
 
             run_in_background(self._handle_queued_pdus, room_queue)
 
-        defer.returnValue(True)
+        return True
 
     @defer.inlineCallbacks
     def _handle_queued_pdus(self, room_queue):
@@ -1204,11 +1207,28 @@ class FederationHandler(BaseHandler):
 
     @defer.inlineCallbacks
     @log_function
-    def on_make_join_request(self, room_id, user_id):
+    def on_make_join_request(self, origin, room_id, user_id):
         """ We've received a /make_join/ request, so we create a partial
         join event for the room and return that. We do *not* persist or
         process it until the other server has signed it and sent it back.
+
+        Args:
+            origin (str): The (verified) server name of the requesting server.
+            room_id (str): Room to create join event in
+            user_id (str): The user to create the join for
+
+        Returns:
+            Deferred[FrozenEvent]
         """
+
+        if get_domain_from_id(user_id) != origin:
+            logger.info(
+                "Got /make_join request for user %r from different origin %s, ignoring",
+                user_id,
+                origin,
+            )
+            raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
+
         event_content = {"membership": Membership.JOIN}
 
         room_version = yield self.store.get_room_version(room_id)
@@ -1247,7 +1267,7 @@ class FederationHandler(BaseHandler):
             room_version, event, context, do_sig_check=False
         )
 
-        defer.returnValue(event)
+        return event
 
     @defer.inlineCallbacks
     @log_function
@@ -1308,7 +1328,7 @@ class FederationHandler(BaseHandler):
 
         state = yield self.store.get_events(list(prev_state_ids.values()))
 
-        defer.returnValue({"state": list(state.values()), "auth_chain": auth_chain})
+        return {"state": list(state.values()), "auth_chain": auth_chain}
 
     @defer.inlineCallbacks
     def on_invite_request(self, origin, pdu):
@@ -1364,7 +1384,7 @@ class FederationHandler(BaseHandler):
         context = yield self.state_handler.compute_event_context(event)
         yield self.persist_events_and_notify([(event, context)])
 
-        defer.returnValue(event)
+        return event
 
     @defer.inlineCallbacks
     def do_remotely_reject_invite(self, target_hosts, room_id, user_id):
@@ -1389,7 +1409,7 @@ class FederationHandler(BaseHandler):
         context = yield self.state_handler.compute_event_context(event)
         yield self.persist_events_and_notify([(event, context)])
 
-        defer.returnValue(event)
+        return event
 
     @defer.inlineCallbacks
     def _make_and_verify_event(
@@ -1407,15 +1427,31 @@ class FederationHandler(BaseHandler):
         assert event.user_id == user_id
         assert event.state_key == user_id
         assert event.room_id == room_id
-        defer.returnValue((origin, event, format_ver))
+        return (origin, event, format_ver)
 
     @defer.inlineCallbacks
     @log_function
-    def on_make_leave_request(self, room_id, user_id):
+    def on_make_leave_request(self, origin, room_id, user_id):
         """ We've received a /make_leave/ request, so we create a partial
         leave event for the room and return that. We do *not* persist or
         process it until the other server has signed it and sent it back.
+
+        Args:
+            origin (str): The (verified) server name of the requesting server.
+            room_id (str): Room to create leave event in
+            user_id (str): The user to create the leave for
+
+        Returns:
+            Deferred[FrozenEvent]
         """
+        if get_domain_from_id(user_id) != origin:
+            logger.info(
+                "Got /make_leave request for user %r from different origin %s, ignoring",
+                user_id,
+                origin,
+            )
+            raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
+
         room_version = yield self.store.get_room_version(room_id)
         builder = self.event_builder_factory.new(
             room_version,
@@ -1451,7 +1487,7 @@ class FederationHandler(BaseHandler):
             logger.warn("Failed to create new leave %r because %s", event, e)
             raise e
 
-        defer.returnValue(event)
+        return event
 
     @defer.inlineCallbacks
     @log_function
@@ -1484,7 +1520,7 @@ class FederationHandler(BaseHandler):
             event.signatures,
         )
 
-        defer.returnValue(None)
+        return None
 
     @defer.inlineCallbacks
     def get_state_for_pdu(self, room_id, event_id):
@@ -1512,9 +1548,9 @@ class FederationHandler(BaseHandler):
                     del results[(event.type, event.state_key)]
 
             res = list(results.values())
-            defer.returnValue(res)
+            return res
         else:
-            defer.returnValue([])
+            return []
 
     @defer.inlineCallbacks
     def get_state_ids_for_pdu(self, room_id, event_id):
@@ -1539,9 +1575,9 @@ class FederationHandler(BaseHandler):
                 else:
                     results.pop((event.type, event.state_key), None)
 
-            defer.returnValue(list(results.values()))
+            return list(results.values())
         else:
-            defer.returnValue([])
+            return []
 
     @defer.inlineCallbacks
     @log_function
@@ -1554,7 +1590,7 @@ class FederationHandler(BaseHandler):
 
         events = yield filter_events_for_server(self.store, origin, events)
 
-        defer.returnValue(events)
+        return events
 
     @defer.inlineCallbacks
     @log_function
@@ -1584,9 +1620,9 @@ class FederationHandler(BaseHandler):
 
             events = yield filter_events_for_server(self.store, origin, [event])
             event = events[0]
-            defer.returnValue(event)
+            return event
         else:
-            defer.returnValue(None)
+            return None
 
     def get_min_depth_for_context(self, context):
         return self.store.get_min_depth(context)
@@ -1618,7 +1654,7 @@ class FederationHandler(BaseHandler):
                     self.store.remove_push_actions_from_staging, event.event_id
                 )
 
-        defer.returnValue(context)
+        return context
 
     @defer.inlineCallbacks
     def _handle_new_events(self, origin, event_infos, backfilled=False):
@@ -1641,7 +1677,7 @@ class FederationHandler(BaseHandler):
                     auth_events=ev_info.get("auth_events"),
                     backfilled=backfilled,
                 )
-            defer.returnValue(res)
+            return res
 
         contexts = yield make_deferred_yieldable(
             defer.gatherResults(
@@ -1800,7 +1836,7 @@ class FederationHandler(BaseHandler):
         if event.type == EventTypes.GuestAccess and not context.rejected:
             yield self.maybe_kick_guest_users(event)
 
-        defer.returnValue(context)
+        return context
 
     @defer.inlineCallbacks
     def _check_for_soft_fail(self, event, state, backfilled):
@@ -1919,7 +1955,7 @@ class FederationHandler(BaseHandler):
 
         logger.debug("on_query_auth returning: %s", ret)
 
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def on_get_missing_events(
@@ -1942,7 +1978,7 @@ class FederationHandler(BaseHandler):
             self.store, origin, missing_events
         )
 
-        defer.returnValue(missing_events)
+        return missing_events
 
     @defer.inlineCallbacks
     @log_function
@@ -2418,16 +2454,14 @@ class FederationHandler(BaseHandler):
 
         logger.debug("construct_auth_difference returning")
 
-        defer.returnValue(
-            {
-                "auth_chain": local_auth,
-                "rejects": {
-                    e.event_id: {"reason": reason_map[e.event_id], "proof": None}
-                    for e in base_remote_rejected
-                },
-                "missing": [e.event_id for e in missing_locals],
-            }
-        )
+        return {
+            "auth_chain": local_auth,
+            "rejects": {
+                e.event_id: {"reason": reason_map[e.event_id], "proof": None}
+                for e in base_remote_rejected
+            },
+            "missing": [e.event_id for e in missing_locals],
+        }
 
     @defer.inlineCallbacks
     @log_function
@@ -2575,7 +2609,7 @@ class FederationHandler(BaseHandler):
             builder=builder
         )
         EventValidator().validate_new(event)
-        defer.returnValue((event, context))
+        return (event, context)
 
     @defer.inlineCallbacks
     def _check_signature(self, event, context):
@@ -2765,3 +2799,28 @@ class FederationHandler(BaseHandler):
             )
         else:
             return user_joined_room(self.distributor, user, room_id)
+
+    @defer.inlineCallbacks
+    def get_room_complexity(self, remote_room_hosts, room_id):
+        """
+        Fetch the complexity of a remote room over federation.
+
+        Args:
+            remote_room_hosts (list[str]): The remote servers to ask.
+            room_id (str): The room ID to ask about.
+
+        Returns:
+            Deferred[dict] or Deferred[None]: Dict contains the complexity
+            metric versions, while None means we could not fetch the complexity.
+        """
+
+        for host in remote_room_hosts:
+            res = yield self.federation_client.get_room_complexity(host, room_id)
+
+            # We got a result, return it.
+            if res:
+                defer.returnValue(res)
+
+        # We fell off the bottom, couldn't get the complexity from anyone. Oh
+        # well.
+        defer.returnValue(None)
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index 7da63bb643..46eb9ee88b 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -126,9 +126,12 @@ class GroupsLocalHandler(object):
                 group_id, requester_user_id
             )
         else:
-            res = yield self.transport_client.get_group_summary(
-                get_domain_from_id(group_id), group_id, requester_user_id
-            )
+            try:
+                res = yield self.transport_client.get_group_summary(
+                    get_domain_from_id(group_id), group_id, requester_user_id
+                )
+            except RequestSendFailed:
+                raise SynapseError(502, "Failed to contact group server")
 
             group_server_name = get_domain_from_id(group_id)
 
@@ -162,7 +165,7 @@ class GroupsLocalHandler(object):
 
         res.setdefault("user", {})["is_publicised"] = is_publicised
 
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def create_group(self, group_id, user_id, content):
@@ -183,9 +186,12 @@ class GroupsLocalHandler(object):
 
             content["user_profile"] = yield self.profile_handler.get_profile(user_id)
 
-            res = yield self.transport_client.create_group(
-                get_domain_from_id(group_id), group_id, user_id, content
-            )
+            try:
+                res = yield self.transport_client.create_group(
+                    get_domain_from_id(group_id), group_id, user_id, content
+                )
+            except RequestSendFailed:
+                raise SynapseError(502, "Failed to contact group server")
 
             remote_attestation = res["attestation"]
             yield self.attestations.verify_attestation(
@@ -207,7 +213,7 @@ class GroupsLocalHandler(object):
         )
         self.notifier.on_new_event("groups_key", token, users=[user_id])
 
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def get_users_in_group(self, group_id, requester_user_id):
@@ -217,13 +223,16 @@ class GroupsLocalHandler(object):
             res = yield self.groups_server_handler.get_users_in_group(
                 group_id, requester_user_id
             )
-            defer.returnValue(res)
+            return res
 
         group_server_name = get_domain_from_id(group_id)
 
-        res = yield self.transport_client.get_users_in_group(
-            get_domain_from_id(group_id), group_id, requester_user_id
-        )
+        try:
+            res = yield self.transport_client.get_users_in_group(
+                get_domain_from_id(group_id), group_id, requester_user_id
+            )
+        except RequestSendFailed:
+            raise SynapseError(502, "Failed to contact group server")
 
         chunk = res["chunk"]
         valid_entries = []
@@ -244,7 +253,7 @@ class GroupsLocalHandler(object):
 
         res["chunk"] = valid_entries
 
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def join_group(self, group_id, user_id, content):
@@ -258,9 +267,12 @@ class GroupsLocalHandler(object):
             local_attestation = self.attestations.create_attestation(group_id, user_id)
             content["attestation"] = local_attestation
 
-            res = yield self.transport_client.join_group(
-                get_domain_from_id(group_id), group_id, user_id, content
-            )
+            try:
+                res = yield self.transport_client.join_group(
+                    get_domain_from_id(group_id), group_id, user_id, content
+                )
+            except RequestSendFailed:
+                raise SynapseError(502, "Failed to contact group server")
 
             remote_attestation = res["attestation"]
 
@@ -285,7 +297,7 @@ class GroupsLocalHandler(object):
         )
         self.notifier.on_new_event("groups_key", token, users=[user_id])
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def accept_invite(self, group_id, user_id, content):
@@ -299,9 +311,12 @@ class GroupsLocalHandler(object):
             local_attestation = self.attestations.create_attestation(group_id, user_id)
             content["attestation"] = local_attestation
 
-            res = yield self.transport_client.accept_group_invite(
-                get_domain_from_id(group_id), group_id, user_id, content
-            )
+            try:
+                res = yield self.transport_client.accept_group_invite(
+                    get_domain_from_id(group_id), group_id, user_id, content
+                )
+            except RequestSendFailed:
+                raise SynapseError(502, "Failed to contact group server")
 
             remote_attestation = res["attestation"]
 
@@ -326,7 +341,7 @@ class GroupsLocalHandler(object):
         )
         self.notifier.on_new_event("groups_key", token, users=[user_id])
 
-        defer.returnValue({})
+        return {}
 
     @defer.inlineCallbacks
     def invite(self, group_id, user_id, requester_user_id, config):
@@ -338,15 +353,18 @@ class GroupsLocalHandler(object):
                 group_id, user_id, requester_user_id, content
             )
         else:
-            res = yield self.transport_client.invite_to_group(
-                get_domain_from_id(group_id),
-                group_id,
-                user_id,
-                requester_user_id,
-                content,
-            )
+            try:
+                res = yield self.transport_client.invite_to_group(
+                    get_domain_from_id(group_id),
+                    group_id,
+                    user_id,
+                    requester_user_id,
+                    content,
+                )
+            except RequestSendFailed:
+                raise SynapseError(502, "Failed to contact group server")
 
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def on_invite(self, group_id, user_id, content):
@@ -377,7 +395,7 @@ class GroupsLocalHandler(object):
             logger.warn("No profile for user %s: %s", user_id, e)
             user_profile = {}
 
-        defer.returnValue({"state": "invite", "user_profile": user_profile})
+        return {"state": "invite", "user_profile": user_profile}
 
     @defer.inlineCallbacks
     def remove_user_from_group(self, group_id, user_id, requester_user_id, content):
@@ -398,15 +416,18 @@ class GroupsLocalHandler(object):
             )
         else:
             content["requester_user_id"] = requester_user_id
-            res = yield self.transport_client.remove_user_from_group(
-                get_domain_from_id(group_id),
-                group_id,
-                requester_user_id,
-                user_id,
-                content,
-            )
+            try:
+                res = yield self.transport_client.remove_user_from_group(
+                    get_domain_from_id(group_id),
+                    group_id,
+                    requester_user_id,
+                    user_id,
+                    content,
+                )
+            except RequestSendFailed:
+                raise SynapseError(502, "Failed to contact group server")
 
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def user_removed_from_group(self, group_id, user_id, content):
@@ -421,7 +442,7 @@ class GroupsLocalHandler(object):
     @defer.inlineCallbacks
     def get_joined_groups(self, user_id):
         group_ids = yield self.store.get_joined_groups(user_id)
-        defer.returnValue({"groups": group_ids})
+        return {"groups": group_ids}
 
     @defer.inlineCallbacks
     def get_publicised_groups_for_user(self, user_id):
@@ -433,14 +454,18 @@ class GroupsLocalHandler(object):
             for app_service in self.store.get_app_services():
                 result.extend(app_service.get_groups_for_user(user_id))
 
-            defer.returnValue({"groups": result})
+            return {"groups": result}
         else:
-            bulk_result = yield self.transport_client.bulk_get_publicised_groups(
-                get_domain_from_id(user_id), [user_id]
-            )
+            try:
+                bulk_result = yield self.transport_client.bulk_get_publicised_groups(
+                    get_domain_from_id(user_id), [user_id]
+                )
+            except RequestSendFailed:
+                raise SynapseError(502, "Failed to contact group server")
+
             result = bulk_result.get("users", {}).get(user_id)
             # TODO: Verify attestations
-            defer.returnValue({"groups": result})
+            return {"groups": result}
 
     @defer.inlineCallbacks
     def bulk_get_publicised_groups(self, user_ids, proxy=True):
@@ -475,4 +500,4 @@ class GroupsLocalHandler(object):
             for app_service in self.store.get_app_services():
                 results[uid].extend(app_service.get_groups_for_user(uid))
 
-        defer.returnValue({"users": results})
+        return {"users": results}
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 546d6169e9..d199521b58 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -82,7 +82,7 @@ class IdentityHandler(BaseHandler):
                 "%s is not a trusted ID server: rejecting 3pid " + "credentials",
                 id_server,
             )
-            defer.returnValue(None)
+            return None
 
         try:
             data = yield self.http_client.get_json(
@@ -95,8 +95,8 @@ class IdentityHandler(BaseHandler):
             raise e.to_synapse_error()
 
         if "medium" in data:
-            defer.returnValue(data)
-        defer.returnValue(None)
+            return data
+        return None
 
     @defer.inlineCallbacks
     def bind_threepid(self, creds, mxid):
@@ -133,7 +133,7 @@ class IdentityHandler(BaseHandler):
             )
         except CodeMessageException as e:
             data = json.loads(e.msg)  # XXX WAT?
-        defer.returnValue(data)
+        return data
 
     @defer.inlineCallbacks
     def try_unbind_threepid(self, mxid, threepid):
@@ -161,7 +161,7 @@ class IdentityHandler(BaseHandler):
 
         # We don't know where to unbind, so we don't have a choice but to return
         if not id_servers:
-            defer.returnValue(False)
+            return False
 
         changed = True
         for id_server in id_servers:
@@ -169,7 +169,7 @@ class IdentityHandler(BaseHandler):
                 mxid, threepid, id_server
             )
 
-        defer.returnValue(changed)
+        return changed
 
     @defer.inlineCallbacks
     def try_unbind_threepid_with_id_server(self, mxid, threepid, id_server):
@@ -224,7 +224,7 @@ class IdentityHandler(BaseHandler):
             id_server=id_server,
         )
 
-        defer.returnValue(changed)
+        return changed
 
     @defer.inlineCallbacks
     def requestEmailToken(
@@ -250,7 +250,7 @@ class IdentityHandler(BaseHandler):
                 % (id_server, "/_matrix/identity/api/v1/validate/email/requestToken"),
                 params,
             )
-            defer.returnValue(data)
+            return data
         except HttpResponseException as e:
             logger.info("Proxied requestToken failed: %r", e)
             raise e.to_synapse_error()
@@ -278,7 +278,7 @@ class IdentityHandler(BaseHandler):
                 % (id_server, "/_matrix/identity/api/v1/validate/msisdn/requestToken"),
                 params,
             )
-            defer.returnValue(data)
+            return data
         except HttpResponseException as e:
             logger.info("Proxied requestToken failed: %r", e)
             raise e.to_synapse_error()
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 54c966c8a6..42d6650ed9 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -250,7 +250,7 @@ class InitialSyncHandler(BaseHandler):
             "end": now_token.to_string(),
         }
 
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def room_initial_sync(self, requester, room_id, pagin_config=None):
@@ -301,7 +301,7 @@ class InitialSyncHandler(BaseHandler):
 
         result["account_data"] = account_data_events
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _room_initial_sync_parted(
@@ -330,28 +330,24 @@ class InitialSyncHandler(BaseHandler):
 
         time_now = self.clock.time_msec()
 
-        defer.returnValue(
-            {
-                "membership": membership,
-                "room_id": room_id,
-                "messages": {
-                    "chunk": (
-                        yield self._event_serializer.serialize_events(
-                            messages, time_now
-                        )
-                    ),
-                    "start": start_token.to_string(),
-                    "end": end_token.to_string(),
-                },
-                "state": (
-                    yield self._event_serializer.serialize_events(
-                        room_state.values(), time_now
-                    )
+        return {
+            "membership": membership,
+            "room_id": room_id,
+            "messages": {
+                "chunk": (
+                    yield self._event_serializer.serialize_events(messages, time_now)
                 ),
-                "presence": [],
-                "receipts": [],
-            }
-        )
+                "start": start_token.to_string(),
+                "end": end_token.to_string(),
+            },
+            "state": (
+                yield self._event_serializer.serialize_events(
+                    room_state.values(), time_now
+                )
+            ),
+            "presence": [],
+            "receipts": [],
+        }
 
     @defer.inlineCallbacks
     def _room_initial_sync_joined(
@@ -384,13 +380,13 @@ class InitialSyncHandler(BaseHandler):
         def get_presence():
             # If presence is disabled, return an empty list
             if not self.hs.config.use_presence:
-                defer.returnValue([])
+                return []
 
             states = yield presence_handler.get_states(
                 [m.user_id for m in room_members], as_event=True
             )
 
-            defer.returnValue(states)
+            return states
 
         @defer.inlineCallbacks
         def get_receipts():
@@ -399,7 +395,7 @@ class InitialSyncHandler(BaseHandler):
             )
             if not receipts:
                 receipts = []
-            defer.returnValue(receipts)
+            return receipts
 
         presence, receipts, (messages, token) = yield make_deferred_yieldable(
             defer.gatherResults(
@@ -442,7 +438,7 @@ class InitialSyncHandler(BaseHandler):
         if not is_peeking:
             ret["membership"] = membership
 
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def _check_in_room_or_world_readable(self, room_id, user_id):
@@ -453,7 +449,7 @@ class InitialSyncHandler(BaseHandler):
             #  * The user is a guest user, and has joined the room
             # else it will throw.
             member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
-            defer.returnValue((member_event.membership, member_event.event_id))
+            return (member_event.membership, member_event.event_id)
             return
         except AuthError:
             visibility = yield self.state_handler.get_current_state(
@@ -463,7 +459,7 @@ class InitialSyncHandler(BaseHandler):
                 visibility
                 and visibility.content["history_visibility"] == "world_readable"
             ):
-                defer.returnValue((Membership.JOIN, None))
+                return (Membership.JOIN, None)
                 return
             raise AuthError(
                 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 6d7a987f13..a5e23c4caf 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -87,7 +87,7 @@ class MessageHandler(object):
             )
             data = room_state[membership_event_id].get(key)
 
-        defer.returnValue(data)
+        return data
 
     @defer.inlineCallbacks
     def get_state_events(
@@ -174,7 +174,7 @@ class MessageHandler(object):
             # events, as clients won't use them.
             bundle_aggregations=False,
         )
-        defer.returnValue(events)
+        return events
 
     @defer.inlineCallbacks
     def get_joined_members(self, requester, room_id):
@@ -213,15 +213,13 @@ class MessageHandler(object):
                 # Loop fell through, AS has no interested users in room
                 raise AuthError(403, "Appservice not in room")
 
-        defer.returnValue(
-            {
-                user_id: {
-                    "avatar_url": profile.avatar_url,
-                    "display_name": profile.display_name,
-                }
-                for user_id, profile in iteritems(users_with_profile)
+        return {
+            user_id: {
+                "avatar_url": profile.avatar_url,
+                "display_name": profile.display_name,
             }
-        )
+            for user_id, profile in iteritems(users_with_profile)
+        }
 
 
 class EventCreationHandler(object):
@@ -380,7 +378,11 @@ class EventCreationHandler(object):
             # tolerate them in event_auth.check().
             prev_state_ids = yield context.get_prev_state_ids(self.store)
             prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
-            prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
+            prev_event = (
+                yield self.store.get_event(prev_event_id, allow_none=True)
+                if prev_event_id
+                else None
+            )
             if not prev_event or prev_event.membership != Membership.JOIN:
                 logger.warning(
                     (
@@ -398,7 +400,7 @@ class EventCreationHandler(object):
 
         self.validator.validate_new(event)
 
-        defer.returnValue((event, context))
+        return (event, context)
 
     def _is_exempt_from_privacy_policy(self, builder, requester):
         """"Determine if an event to be sent is exempt from having to consent
@@ -425,9 +427,9 @@ class EventCreationHandler(object):
     @defer.inlineCallbacks
     def _is_server_notices_room(self, room_id):
         if self.config.server_notices_mxid is None:
-            defer.returnValue(False)
+            return False
         user_ids = yield self.store.get_users_in_room(room_id)
-        defer.returnValue(self.config.server_notices_mxid in user_ids)
+        return self.config.server_notices_mxid in user_ids
 
     @defer.inlineCallbacks
     def assert_accepted_privacy_policy(self, requester):
@@ -507,7 +509,7 @@ class EventCreationHandler(object):
                     event.event_id,
                     prev_state.event_id,
                 )
-                defer.returnValue(prev_state)
+                return prev_state
 
         yield self.handle_new_client_event(
             requester=requester, event=event, context=context, ratelimit=ratelimit
@@ -523,6 +525,8 @@ class EventCreationHandler(object):
         """
         prev_state_ids = yield context.get_prev_state_ids(self.store)
         prev_event_id = prev_state_ids.get((event.type, event.state_key))
+        if not prev_event_id:
+            return
         prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
         if not prev_event:
             return
@@ -531,7 +535,7 @@ class EventCreationHandler(object):
             prev_content = encode_canonical_json(prev_event.content)
             next_content = encode_canonical_json(event.content)
             if prev_content == next_content:
-                defer.returnValue(prev_event)
+                return prev_event
         return
 
     @defer.inlineCallbacks
@@ -563,7 +567,7 @@ class EventCreationHandler(object):
             yield self.send_nonmember_event(
                 requester, event, context, ratelimit=ratelimit
             )
-        defer.returnValue(event)
+        return event
 
     @measure_func("create_new_client_event")
     @defer.inlineCallbacks
@@ -626,7 +630,7 @@ class EventCreationHandler(object):
 
         logger.debug("Created event %s", event.event_id)
 
-        defer.returnValue((event, context))
+        return (event, context)
 
     @measure_func("handle_new_client_event")
     @defer.inlineCallbacks
@@ -791,7 +795,6 @@ class EventCreationHandler(object):
                 get_prev_content=False,
                 allow_rejected=False,
                 allow_none=True,
-                check_room_id=event.room_id,
             )
 
             # we can make some additional checks now if we have the original event.
@@ -799,6 +802,9 @@ class EventCreationHandler(object):
                 if original_event.type == EventTypes.Create:
                     raise AuthError(403, "Redacting create events is not permitted")
 
+                if original_event.room_id != event.room_id:
+                    raise SynapseError(400, "Cannot redact event from a different room")
+
             prev_state_ids = yield context.get_prev_state_ids(self.store)
             auth_events_ids = yield self.auth.compute_auth_events(
                 event, prev_state_ids, for_verification=True
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 20bcfed334..d83aab3f74 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -242,13 +242,11 @@ class PaginationHandler(object):
             )
 
         if not events:
-            defer.returnValue(
-                {
-                    "chunk": [],
-                    "start": pagin_config.from_token.to_string(),
-                    "end": next_token.to_string(),
-                }
-            )
+            return {
+                "chunk": [],
+                "start": pagin_config.from_token.to_string(),
+                "end": next_token.to_string(),
+            }
 
         state = None
         if event_filter and event_filter.lazy_load_members() and len(events) > 0:
@@ -286,4 +284,4 @@ class PaginationHandler(object):
                 )
             )
 
-        defer.returnValue(chunk)
+        return chunk
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 6f3537e435..94a9ca0357 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -333,7 +333,7 @@ class PresenceHandler(object):
         """Checks the presence of users that have timed out and updates as
         appropriate.
         """
-        logger.info("Handling presence timeouts")
+        logger.debug("Handling presence timeouts")
         now = self.clock.time_msec()
 
         # Fetch the list of users that *may* have timed out. Things may have
@@ -461,7 +461,7 @@ class PresenceHandler(object):
                 if affect_presence:
                     run_in_background(_end)
 
-        defer.returnValue(_user_syncing())
+        return _user_syncing()
 
     def get_currently_syncing_users(self):
         """Get the set of user ids that are currently syncing on this HS.
@@ -556,7 +556,7 @@ class PresenceHandler(object):
         """Get the current presence state for a user.
         """
         res = yield self.current_state_for_users([user_id])
-        defer.returnValue(res[user_id])
+        return res[user_id]
 
     @defer.inlineCallbacks
     def current_state_for_users(self, user_ids):
@@ -585,7 +585,7 @@ class PresenceHandler(object):
                 states.update(new)
                 self.user_to_current_state.update(new)
 
-        defer.returnValue(states)
+        return states
 
     @defer.inlineCallbacks
     def _persist_and_notify(self, states):
@@ -681,7 +681,7 @@ class PresenceHandler(object):
     def get_state(self, target_user, as_event=False):
         results = yield self.get_states([target_user.to_string()], as_event=as_event)
 
-        defer.returnValue(results[0])
+        return results[0]
 
     @defer.inlineCallbacks
     def get_states(self, target_user_ids, as_event=False):
@@ -703,17 +703,15 @@ class PresenceHandler(object):
 
         now = self.clock.time_msec()
         if as_event:
-            defer.returnValue(
-                [
-                    {
-                        "type": "m.presence",
-                        "content": format_user_presence_state(state, now),
-                    }
-                    for state in updates
-                ]
-            )
+            return [
+                {
+                    "type": "m.presence",
+                    "content": format_user_presence_state(state, now),
+                }
+                for state in updates
+            ]
         else:
-            defer.returnValue(updates)
+            return updates
 
     @defer.inlineCallbacks
     def set_state(self, target_user, state, ignore_status_msg=False):
@@ -757,9 +755,9 @@ class PresenceHandler(object):
         )
 
         if observer_room_ids & observed_room_ids:
-            defer.returnValue(True)
+            return True
 
-        defer.returnValue(False)
+        return False
 
     @defer.inlineCallbacks
     def get_all_presence_updates(self, last_id, current_id):
@@ -778,7 +776,7 @@ class PresenceHandler(object):
         # TODO(markjh): replicate the unpersisted changes.
         # This could use the in-memory stores for recent changes.
         rows = yield self.store.get_all_presence_updates(last_id, current_id)
-        defer.returnValue(rows)
+        return rows
 
     def notify_new_event(self):
         """Called when new events have happened. Handles users and servers
@@ -1034,7 +1032,7 @@ class PresenceEventSource(object):
                 #
                 # Hence this guard where we just return nothing so that the sync
                 # doesn't return. C.f. #5503.
-                defer.returnValue(([], max_token))
+                return ([], max_token)
 
             presence = self.get_presence_handler()
             stream_change_cache = self.store.presence_stream_cache
@@ -1068,17 +1066,11 @@ class PresenceEventSource(object):
             updates = yield presence.current_state_for_users(user_ids_changed)
 
         if include_offline:
-            defer.returnValue((list(updates.values()), max_token))
+            return (list(updates.values()), max_token)
         else:
-            defer.returnValue(
-                (
-                    [
-                        s
-                        for s in itervalues(updates)
-                        if s.state != PresenceState.OFFLINE
-                    ],
-                    max_token,
-                )
+            return (
+                [s for s in itervalues(updates) if s.state != PresenceState.OFFLINE],
+                max_token,
             )
 
     def get_current_key(self):
@@ -1107,7 +1099,7 @@ class PresenceEventSource(object):
             )
             users_interested_in.update(user_ids)
 
-        defer.returnValue(users_interested_in)
+        return users_interested_in
 
 
 def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now):
@@ -1287,7 +1279,7 @@ def get_interested_parties(store, states):
         # Always notify self
         users_to_states.setdefault(state.user_id, []).append(state)
 
-    defer.returnValue((room_ids_to_states, users_to_states))
+    return (room_ids_to_states, users_to_states)
 
 
 @defer.inlineCallbacks
@@ -1321,4 +1313,4 @@ def get_interested_remotes(store, states, state_handler):
         host = get_domain_from_id(user_id)
         hosts_and_states.append(([host], states))
 
-    defer.returnValue(hosts_and_states)
+    return hosts_and_states
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index a2388a7091..2cc237e6a5 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -73,7 +73,7 @@ class BaseProfileHandler(BaseHandler):
                     raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
                 raise
 
-            defer.returnValue({"displayname": displayname, "avatar_url": avatar_url})
+            return {"displayname": displayname, "avatar_url": avatar_url}
         else:
             try:
                 result = yield self.federation.make_query(
@@ -82,7 +82,7 @@ class BaseProfileHandler(BaseHandler):
                     args={"user_id": user_id},
                     ignore_backoff=True,
                 )
-                defer.returnValue(result)
+                return result
             except RequestSendFailed as e:
                 raise_from(SynapseError(502, "Failed to fetch profile"), e)
             except HttpResponseException as e:
@@ -108,10 +108,10 @@ class BaseProfileHandler(BaseHandler):
                     raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
                 raise
 
-            defer.returnValue({"displayname": displayname, "avatar_url": avatar_url})
+            return {"displayname": displayname, "avatar_url": avatar_url}
         else:
             profile = yield self.store.get_from_remote_profile_cache(user_id)
-            defer.returnValue(profile or {})
+            return profile or {}
 
     @defer.inlineCallbacks
     def get_displayname(self, target_user):
@@ -125,7 +125,7 @@ class BaseProfileHandler(BaseHandler):
                     raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
                 raise
 
-            defer.returnValue(displayname)
+            return displayname
         else:
             try:
                 result = yield self.federation.make_query(
@@ -139,7 +139,7 @@ class BaseProfileHandler(BaseHandler):
             except HttpResponseException as e:
                 raise e.to_synapse_error()
 
-            defer.returnValue(result["displayname"])
+            return result["displayname"]
 
     @defer.inlineCallbacks
     def set_displayname(self, target_user, requester, new_displayname, by_admin=False):
@@ -186,7 +186,7 @@ class BaseProfileHandler(BaseHandler):
                 if e.code == 404:
                     raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
                 raise
-            defer.returnValue(avatar_url)
+            return avatar_url
         else:
             try:
                 result = yield self.federation.make_query(
@@ -200,7 +200,7 @@ class BaseProfileHandler(BaseHandler):
             except HttpResponseException as e:
                 raise e.to_synapse_error()
 
-            defer.returnValue(result["avatar_url"])
+            return result["avatar_url"]
 
     @defer.inlineCallbacks
     def set_avatar_url(self, target_user, requester, new_avatar_url, by_admin=False):
@@ -251,7 +251,7 @@ class BaseProfileHandler(BaseHandler):
                 raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
             raise
 
-        defer.returnValue(response)
+        return response
 
     @defer.inlineCallbacks
     def _update_join_states(self, requester, target_user):
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index a85dd8cdee..73973502a4 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -17,7 +17,7 @@ import logging
 from twisted.internet import defer
 
 from synapse.handlers._base import BaseHandler
-from synapse.types import ReadReceipt
+from synapse.types import ReadReceipt, get_domain_from_id
 
 logger = logging.getLogger(__name__)
 
@@ -40,18 +40,27 @@ class ReceiptsHandler(BaseHandler):
     def _received_remote_receipt(self, origin, content):
         """Called when we receive an EDU of type m.receipt from a remote HS.
         """
-        receipts = [
-            ReadReceipt(
-                room_id=room_id,
-                receipt_type=receipt_type,
-                user_id=user_id,
-                event_ids=user_values["event_ids"],
-                data=user_values.get("data", {}),
-            )
-            for room_id, room_values in content.items()
-            for receipt_type, users in room_values.items()
-            for user_id, user_values in users.items()
-        ]
+        receipts = []
+        for room_id, room_values in content.items():
+            for receipt_type, users in room_values.items():
+                for user_id, user_values in users.items():
+                    if get_domain_from_id(user_id) != origin:
+                        logger.info(
+                            "Received receipt for user %r from server %s, ignoring",
+                            user_id,
+                            origin,
+                        )
+                        continue
+
+                    receipts.append(
+                        ReadReceipt(
+                            room_id=room_id,
+                            receipt_type=receipt_type,
+                            user_id=user_id,
+                            event_ids=user_values["event_ids"],
+                            data=user_values.get("data", {}),
+                        )
+                    )
 
         yield self._handle_new_receipts(receipts)
 
@@ -84,7 +93,7 @@ class ReceiptsHandler(BaseHandler):
 
         if min_batch_id is None:
             # no new receipts
-            defer.returnValue(False)
+            return False
 
         affected_room_ids = list(set([r.room_id for r in receipts]))
 
@@ -94,7 +103,7 @@ class ReceiptsHandler(BaseHandler):
             min_batch_id, max_batch_id, affected_room_ids
         )
 
-        defer.returnValue(True)
+        return True
 
     @defer.inlineCallbacks
     def received_client_receipt(self, room_id, receipt_type, user_id, event_id):
@@ -124,9 +133,9 @@ class ReceiptsHandler(BaseHandler):
         )
 
         if not result:
-            defer.returnValue([])
+            return []
 
-        defer.returnValue(result)
+        return result
 
 
 class ReceiptEventSource(object):
@@ -139,13 +148,13 @@ class ReceiptEventSource(object):
         to_key = yield self.get_current_key()
 
         if from_key == to_key:
-            defer.returnValue(([], to_key))
+            return ([], to_key)
 
         events = yield self.store.get_linearized_receipts_for_rooms(
             room_ids, from_key=from_key, to_key=to_key
         )
 
-        defer.returnValue((events, to_key))
+        return (events, to_key)
 
     def get_current_key(self, direction="f"):
         return self.store.get_max_receipt_stream_id()
@@ -164,4 +173,4 @@ class ReceiptEventSource(object):
             room_ids, from_key=from_key, to_key=to_key
         )
 
-        defer.returnValue((events, to_key))
+        return (events, to_key)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index bb7cfd71b9..4631fab94e 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -265,7 +265,7 @@ class RegistrationHandler(BaseHandler):
             # Bind email to new account
             yield self._register_email_threepid(user_id, threepid_dict, None, False)
 
-        defer.returnValue(user_id)
+        return user_id
 
     @defer.inlineCallbacks
     def _auto_join_rooms(self, user_id):
@@ -360,7 +360,7 @@ class RegistrationHandler(BaseHandler):
             appservice_id=service_id,
             create_profile_with_displayname=user.localpart,
         )
-        defer.returnValue(user_id)
+        return user_id
 
     @defer.inlineCallbacks
     def check_recaptcha(self, ip, private_key, challenge, response):
@@ -461,7 +461,7 @@ class RegistrationHandler(BaseHandler):
 
         id = self._next_generated_user_id
         self._next_generated_user_id += 1
-        defer.returnValue(str(id))
+        return str(id)
 
     @defer.inlineCallbacks
     def _validate_captcha(self, ip_addr, private_key, challenge, response):
@@ -481,7 +481,7 @@ class RegistrationHandler(BaseHandler):
             "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?"
             + "error=%s" % lines[1],
         }
-        defer.returnValue(json)
+        return json
 
     @defer.inlineCallbacks
     def _submit_captcha(self, ip_addr, private_key, challenge, response):
@@ -497,7 +497,7 @@ class RegistrationHandler(BaseHandler):
                 "response": response,
             },
         )
-        defer.returnValue(data)
+        return data
 
     @defer.inlineCallbacks
     def _join_user_to_room(self, requester, room_identifier):
@@ -622,7 +622,7 @@ class RegistrationHandler(BaseHandler):
                 initial_display_name=initial_display_name,
                 is_guest=is_guest,
             )
-            defer.returnValue((r["device_id"], r["access_token"]))
+            return (r["device_id"], r["access_token"])
 
         valid_until_ms = None
         if self.session_lifetime is not None:
@@ -645,7 +645,7 @@ class RegistrationHandler(BaseHandler):
                 user_id, device_id=device_id, valid_until_ms=valid_until_ms
             )
 
-        defer.returnValue((device_id, access_token))
+        return (device_id, access_token)
 
     @defer.inlineCallbacks
     def post_registration_actions(
@@ -798,7 +798,7 @@ class RegistrationHandler(BaseHandler):
             if ex.errcode == Codes.MISSING_PARAM:
                 # This will only happen if the ID server returns a malformed response
                 logger.info("Can't add incomplete 3pid")
-                defer.returnValue(None)
+                return None
             raise
 
         yield self._auth_handler.add_threepid(
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index db3f8cb76b..5caa90c3b7 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -128,7 +128,7 @@ class RoomCreationHandler(BaseHandler):
             old_room_id,
             new_version,  # args for _upgrade_room
         )
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def _upgrade_room(self, requester, old_room_id, new_version):
@@ -193,7 +193,7 @@ class RoomCreationHandler(BaseHandler):
             requester, old_room_id, new_room_id, old_room_state
         )
 
-        defer.returnValue(new_room_id)
+        return new_room_id
 
     @defer.inlineCallbacks
     def _update_upgraded_room_pls(
@@ -671,7 +671,7 @@ class RoomCreationHandler(BaseHandler):
             result["room_alias"] = room_alias.to_string()
             yield directory_handler.send_room_alias_update_event(requester, room_id)
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _send_events_for_new_room(
@@ -796,7 +796,7 @@ class RoomCreationHandler(BaseHandler):
                     room_creator_user_id=creator_id,
                     is_public=is_public,
                 )
-                defer.returnValue(gen_room_id)
+                return gen_room_id
             except StoreError:
                 attempts += 1
         raise StoreError(500, "Couldn't generate a room ID.")
@@ -839,7 +839,7 @@ class RoomContextHandler(object):
             event_id, get_prev_content=True, allow_none=True
         )
         if not event:
-            defer.returnValue(None)
+            return None
             return
 
         filtered = yield (filter_evts([event]))
@@ -890,7 +890,7 @@ class RoomContextHandler(object):
 
         results["end"] = token.copy_and_replace("room_key", results["end"]).to_string()
 
-        defer.returnValue(results)
+        return results
 
 
 class RoomEventSource(object):
@@ -941,7 +941,7 @@ class RoomEventSource(object):
             else:
                 end_key = to_key
 
-        defer.returnValue((events, end_key))
+        return (events, end_key)
 
     def get_current_key(self):
         return self.store.get_room_events_max_id()
@@ -959,4 +959,4 @@ class RoomEventSource(object):
             limit=config.limit,
         )
 
-        defer.returnValue((events, next_key))
+        return (events, next_key)
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index aae696a7e8..e9094ad02b 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -325,7 +325,7 @@ class RoomListHandler(BaseHandler):
                     current_limit=since_token.current_limit - 1,
                 ).to_token()
 
-        defer.returnValue(results)
+        return results
 
     @defer.inlineCallbacks
     def _append_room_entry_to_chunk(
@@ -420,7 +420,7 @@ class RoomListHandler(BaseHandler):
         if join_rules_event:
             join_rule = join_rules_event.content.get("join_rule", None)
             if not allow_private and join_rule and join_rule != JoinRules.PUBLIC:
-                defer.returnValue(None)
+                return None
 
         # Return whether this room is open to federation users or not
         create_event = current_state.get((EventTypes.Create, ""))
@@ -469,7 +469,7 @@ class RoomListHandler(BaseHandler):
             if avatar_url:
                 result["avatar_url"] = avatar_url
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def get_remote_public_room_list(
@@ -482,7 +482,7 @@ class RoomListHandler(BaseHandler):
         third_party_instance_id=None,
     ):
         if not self.enable_room_list_search:
-            defer.returnValue({"chunk": [], "total_room_count_estimate": 0})
+            return {"chunk": [], "total_room_count_estimate": 0}
 
         if search_filter:
             # We currently don't support searching across federation, so we have
@@ -507,7 +507,7 @@ class RoomListHandler(BaseHandler):
                 ]
             }
 
-        defer.returnValue(res)
+        return res
 
     def _get_remote_list_cached(
         self,
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index e0196ef83e..249a6d9c5d 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -26,8 +26,7 @@ from unpaddedbase64 import decode_base64
 
 from twisted.internet import defer
 
-import synapse.server
-import synapse.types
+from synapse import types
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError
 from synapse.types import RoomID, UserID
@@ -191,7 +190,7 @@ class RoomMemberHandler(object):
         )
         if duplicate is not None:
             # Discard the new event since this membership change is a no-op.
-            defer.returnValue(duplicate)
+            return duplicate
 
         yield self.event_creation_handler.handle_new_client_event(
             requester, event, context, extra_users=[target], ratelimit=ratelimit
@@ -233,7 +232,7 @@ class RoomMemberHandler(object):
                 if prev_member_event.membership == Membership.JOIN:
                     yield self._user_left_room(target, room_id)
 
-        defer.returnValue(event)
+        return event
 
     @defer.inlineCallbacks
     def copy_room_tags_and_direct_to_room(self, old_room_id, new_room_id, user_id):
@@ -303,7 +302,7 @@ class RoomMemberHandler(object):
                 require_consent=require_consent,
             )
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _update_membership(
@@ -423,7 +422,7 @@ class RoomMemberHandler(object):
                 same_membership = old_membership == effective_membership_state
                 same_sender = requester.user.to_string() == old_state.sender
                 if same_sender and same_membership and same_content:
-                    defer.returnValue(old_state)
+                    return old_state
 
             if old_membership in ["ban", "leave"] and action == "kick":
                 raise AuthError(403, "The target user is not in the room")
@@ -473,7 +472,7 @@ class RoomMemberHandler(object):
                 ret = yield self._remote_join(
                     requester, remote_room_hosts, room_id, target, content
                 )
-                defer.returnValue(ret)
+                return ret
 
         elif effective_membership_state == Membership.LEAVE:
             if not is_host_in_room:
@@ -495,7 +494,7 @@ class RoomMemberHandler(object):
                     res = yield self._remote_reject_invite(
                         requester, remote_room_hosts, room_id, target
                     )
-                    defer.returnValue(res)
+                    return res
 
         res = yield self._local_membership_update(
             requester=requester,
@@ -508,7 +507,7 @@ class RoomMemberHandler(object):
             content=content,
             require_consent=require_consent,
         )
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def send_membership_event(
@@ -543,7 +542,7 @@ class RoomMemberHandler(object):
             ), "Sender (%s) must be same as requester (%s)" % (sender, requester.user)
             assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
         else:
-            requester = synapse.types.create_requester(target_user)
+            requester = types.create_requester(target_user)
 
         prev_event = yield self.event_creation_handler.deduplicate_state_event(
             event, context
@@ -596,11 +595,11 @@ class RoomMemberHandler(object):
         """
         guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None)
         if not guest_access_id:
-            defer.returnValue(False)
+            return False
 
         guest_access = yield self.store.get_event(guest_access_id)
 
-        defer.returnValue(
+        return (
             guest_access
             and guest_access.content
             and "guest_access" in guest_access.content
@@ -635,7 +634,7 @@ class RoomMemberHandler(object):
             servers.remove(room_alias.domain)
         servers.insert(0, room_alias.domain)
 
-        defer.returnValue((RoomID.from_string(room_id), servers))
+        return (RoomID.from_string(room_id), servers)
 
     @defer.inlineCallbacks
     def _get_inviter(self, user_id, room_id):
@@ -643,7 +642,7 @@ class RoomMemberHandler(object):
             user_id=user_id, room_id=room_id
         )
         if invite:
-            defer.returnValue(UserID.from_string(invite.sender))
+            return UserID.from_string(invite.sender)
 
     @defer.inlineCallbacks
     def do_3pid_invite(
@@ -708,11 +707,11 @@ class RoomMemberHandler(object):
                 if "signatures" not in data:
                     raise AuthError(401, "No signatures on 3pid binding")
                 yield self._verify_any_signature(data, id_server)
-                defer.returnValue(data["mxid"])
+                return data["mxid"]
 
         except IOError as e:
             logger.warn("Error from identity server lookup: %s" % (e,))
-            defer.returnValue(None)
+            return None
 
     @defer.inlineCallbacks
     def _verify_any_signature(self, data, server_hostname):
@@ -904,7 +903,7 @@ class RoomMemberHandler(object):
         if not public_keys:
             public_keys.append(fallback_public_key)
         display_name = data["display_name"]
-        defer.returnValue((token, public_keys, fallback_public_key, display_name))
+        return (token, public_keys, fallback_public_key, display_name)
 
     @defer.inlineCallbacks
     def _is_host_in_room(self, current_state_ids):
@@ -913,7 +912,7 @@ class RoomMemberHandler(object):
         create_event_id = current_state_ids.get(("m.room.create", ""))
         if len(current_state_ids) == 1 and create_event_id:
             # We can only get here if we're in the process of creating the room
-            defer.returnValue(True)
+            return True
 
         for etype, state_key in current_state_ids:
             if etype != EventTypes.Member or not self.hs.is_mine_id(state_key):
@@ -925,16 +924,16 @@ class RoomMemberHandler(object):
                 continue
 
             if event.membership == Membership.JOIN:
-                defer.returnValue(True)
+                return True
 
-        defer.returnValue(False)
+        return False
 
     @defer.inlineCallbacks
     def _is_server_notice_room(self, room_id):
         if self._server_notices_mxid is None:
-            defer.returnValue(False)
+            return False
         user_ids = yield self.store.get_users_in_room(room_id)
-        defer.returnValue(self._server_notices_mxid in user_ids)
+        return self._server_notices_mxid in user_ids
 
 
 class RoomMemberMasterHandler(RoomMemberHandler):
@@ -946,13 +945,53 @@ class RoomMemberMasterHandler(RoomMemberHandler):
         self.distributor.declare("user_left_room")
 
     @defer.inlineCallbacks
+    def _is_remote_room_too_complex(self, room_id, remote_room_hosts):
+        """
+        Check if complexity of a remote room is too great.
+
+        Args:
+            room_id (str)
+            remote_room_hosts (list[str])
+
+        Returns: bool of whether the complexity is too great, or None
+            if unable to be fetched
+        """
+        max_complexity = self.hs.config.limit_remote_rooms.complexity
+        complexity = yield self.federation_handler.get_room_complexity(
+            remote_room_hosts, room_id
+        )
+
+        if complexity:
+            if complexity["v1"] > max_complexity:
+                return True
+            return False
+        return None
+
+    @defer.inlineCallbacks
+    def _is_local_room_too_complex(self, room_id):
+        """
+        Check if the complexity of a local room is too great.
+
+        Args:
+            room_id (str)
+
+        Returns: bool
+        """
+        max_complexity = self.hs.config.limit_remote_rooms.complexity
+        complexity = yield self.store.get_room_complexity(room_id)
+
+        if complexity["v1"] > max_complexity:
+            return True
+
+        return False
+
+    @defer.inlineCallbacks
     def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
         """Implements RoomMemberHandler._remote_join
         """
         # filter ourselves out of remote_room_hosts: do_invite_join ignores it
         # and if it is the only entry we'd like to return a 404 rather than a
         # 500.
-
         remote_room_hosts = [
             host for host in remote_room_hosts if host != self.hs.hostname
         ]
@@ -960,6 +999,18 @@ class RoomMemberMasterHandler(RoomMemberHandler):
         if len(remote_room_hosts) == 0:
             raise SynapseError(404, "No known servers")
 
+        if self.hs.config.limit_remote_rooms.enabled:
+            # Fetch the room complexity
+            too_complex = yield self._is_remote_room_too_complex(
+                room_id, remote_room_hosts
+            )
+            if too_complex is True:
+                raise SynapseError(
+                    code=400,
+                    msg=self.hs.config.limit_remote_rooms.complexity_error,
+                    errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
+                )
+
         # We don't do an auth check if we are doing an invite
         # join dance for now, since we're kinda implicitly checking
         # that we are allowed to join when we decide whether or not we
@@ -969,6 +1020,31 @@ class RoomMemberMasterHandler(RoomMemberHandler):
         )
         yield self._user_joined_room(user, room_id)
 
+        # Check the room we just joined wasn't too large, if we didn't fetch the
+        # complexity of it before.
+        if self.hs.config.limit_remote_rooms.enabled:
+            if too_complex is False:
+                # We checked, and we're under the limit.
+                return
+
+            # Check again, but with the local state events
+            too_complex = yield self._is_local_room_too_complex(room_id)
+
+            if too_complex is False:
+                # We're under the limit.
+                return
+
+            # The room is too large. Leave.
+            requester = types.create_requester(user, None, False, None)
+            yield self.update_membership(
+                requester=requester, target=user, room_id=room_id, action="leave"
+            )
+            raise SynapseError(
+                code=400,
+                msg=self.hs.config.limit_remote_rooms.complexity_error,
+                errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
+            )
+
     @defer.inlineCallbacks
     def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
         """Implements RoomMemberHandler._remote_reject_invite
@@ -978,7 +1054,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
             ret = yield fed_handler.do_remotely_reject_invite(
                 remote_room_hosts, room_id, target.to_string()
             )
-            defer.returnValue(ret)
+            return ret
         except Exception as e:
             # if we were unable to reject the exception, just mark
             # it as rejected on our end and plough ahead.
@@ -989,7 +1065,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
             logger.warn("Failed to reject invite: %s", e)
 
             yield self.store.locally_reject_invite(target.to_string(), room_id)
-            defer.returnValue({})
+            return {}
 
     def _user_joined_room(self, target, room_id):
         """Implements RoomMemberHandler._user_joined_room
diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py
index fc873a3ba6..75e96ae1a2 100644
--- a/synapse/handlers/room_member_worker.py
+++ b/synapse/handlers/room_member_worker.py
@@ -53,7 +53,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
 
         yield self._user_joined_room(user, room_id)
 
-        defer.returnValue(ret)
+        return ret
 
     def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
         """Implements RoomMemberHandler._remote_reject_invite
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index ddc4430d03..cd5e90bacb 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -69,7 +69,7 @@ class SearchHandler(BaseHandler):
             # Scan through the old room for further predecessors
             room_id = predecessor["room_id"]
 
-        defer.returnValue(historical_room_ids)
+        return historical_room_ids
 
     @defer.inlineCallbacks
     def search(self, user, content, batch=None):
@@ -186,13 +186,11 @@ class SearchHandler(BaseHandler):
             room_ids.intersection_update({batch_group_key})
 
         if not room_ids:
-            defer.returnValue(
-                {
-                    "search_categories": {
-                        "room_events": {"results": [], "count": 0, "highlights": []}
-                    }
+            return {
+                "search_categories": {
+                    "room_events": {"results": [], "count": 0, "highlights": []}
                 }
-            )
+            }
 
         rank_map = {}  # event_id -> rank of event
         allowed_events = []
@@ -455,4 +453,4 @@ class SearchHandler(BaseHandler):
         if global_next_batch:
             rooms_cat_res["next_batch"] = global_next_batch
 
-        defer.returnValue({"search_categories": {"room_events": rooms_cat_res}})
+        return {"search_categories": {"room_events": rooms_cat_res}}
diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py
index 6b364befd5..f065970c40 100644
--- a/synapse/handlers/state_deltas.py
+++ b/synapse/handlers/state_deltas.py
@@ -48,7 +48,7 @@ class StateDeltasHandler(object):
 
         if not event and not prev_event:
             logger.debug("Neither event exists: %r %r", prev_event_id, event_id)
-            defer.returnValue(None)
+            return None
 
         prev_value = None
         value = None
@@ -62,8 +62,8 @@ class StateDeltasHandler(object):
         logger.debug("prev_value: %r -> value: %r", prev_value, value)
 
         if value == public_value and prev_value != public_value:
-            defer.returnValue(True)
+            return True
         elif value != public_value and prev_value == public_value:
-            defer.returnValue(False)
+            return False
         else:
-            defer.returnValue(None)
+            return None
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 3590133a32..dee19cdcec 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -89,7 +89,7 @@ class StatsHandler(StateDeltasHandler):
 
         # If still None then the initial background update hasn't started yet
         if self.pos is None or None in self.pos.values():
-            defer.returnValue(None)
+            return None
 
         # Loop round handling deltas until we're up to date
         with Measure(self.clock, "stats_delta"):
@@ -380,6 +380,6 @@ class StatsHandler(StateDeltasHandler):
                 == "world_readable"
             )
         ):
-            defer.returnValue(True)
+            return True
         else:
-            defer.returnValue(False)
+            return False
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index cd1ac0a27a..98da2318a0 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -263,7 +263,7 @@ class SyncHandler(object):
             timeout,
             full_state,
         )
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def _wait_for_sync_for_user(self, sync_config, since_token, timeout, full_state):
@@ -303,7 +303,7 @@ class SyncHandler(object):
                 lazy_loaded = "false"
             non_empty_sync_counter.labels(sync_type, lazy_loaded).inc()
 
-        defer.returnValue(result)
+        return result
 
     def current_sync_for_user(self, sync_config, since_token=None, full_state=False):
         """Get the sync for client needed to match what the server has now.
@@ -317,7 +317,7 @@ class SyncHandler(object):
         user_id = user.to_string()
         rules = yield self.store.get_push_rules_for_user(user_id)
         rules = format_push_rules_for_user(user, rules)
-        defer.returnValue(rules)
+        return rules
 
     @defer.inlineCallbacks
     def ephemeral_by_room(self, sync_result_builder, now_token, since_token=None):
@@ -378,7 +378,7 @@ class SyncHandler(object):
                 event_copy = {k: v for (k, v) in iteritems(event) if k != "room_id"}
                 ephemeral_by_room.setdefault(room_id, []).append(event_copy)
 
-        defer.returnValue((now_token, ephemeral_by_room))
+        return (now_token, ephemeral_by_room)
 
     @defer.inlineCallbacks
     def _load_filtered_recents(
@@ -426,8 +426,8 @@ class SyncHandler(object):
                 recents = []
 
             if not limited or block_all_timeline:
-                defer.returnValue(
-                    TimelineBatch(events=recents, prev_batch=now_token, limited=False)
+                return TimelineBatch(
+                    events=recents, prev_batch=now_token, limited=False
                 )
 
             filtering_factor = 2
@@ -490,12 +490,10 @@ class SyncHandler(object):
 
             prev_batch_token = now_token.copy_and_replace("room_key", room_key)
 
-        defer.returnValue(
-            TimelineBatch(
-                events=recents,
-                prev_batch=prev_batch_token,
-                limited=limited or newly_joined_room,
-            )
+        return TimelineBatch(
+            events=recents,
+            prev_batch=prev_batch_token,
+            limited=limited or newly_joined_room,
         )
 
     @defer.inlineCallbacks
@@ -517,7 +515,7 @@ class SyncHandler(object):
         if event.is_state():
             state_ids = state_ids.copy()
             state_ids[(event.type, event.state_key)] = event.event_id
-        defer.returnValue(state_ids)
+        return state_ids
 
     @defer.inlineCallbacks
     def get_state_at(self, room_id, stream_position, state_filter=StateFilter.all()):
@@ -549,7 +547,7 @@ class SyncHandler(object):
         else:
             # no events in this room - so presumably no state
             state = {}
-        defer.returnValue(state)
+        return state
 
     @defer.inlineCallbacks
     def compute_summary(self, room_id, sync_config, batch, state, now_token):
@@ -579,7 +577,7 @@ class SyncHandler(object):
         )
 
         if not last_events:
-            defer.returnValue(None)
+            return None
             return
 
         last_event = last_events[-1]
@@ -611,14 +609,14 @@ class SyncHandler(object):
         if name_id:
             name = yield self.store.get_event(name_id, allow_none=True)
             if name and name.content.get("name"):
-                defer.returnValue(summary)
+                return summary
 
         if canonical_alias_id:
             canonical_alias = yield self.store.get_event(
                 canonical_alias_id, allow_none=True
             )
             if canonical_alias and canonical_alias.content.get("alias"):
-                defer.returnValue(summary)
+                return summary
 
         me = sync_config.user.to_string()
 
@@ -652,7 +650,7 @@ class SyncHandler(object):
             summary["m.heroes"] = sorted([user_id for user_id in gone_user_ids])[0:5]
 
         if not sync_config.filter_collection.lazy_load_members():
-            defer.returnValue(summary)
+            return summary
 
         # ensure we send membership events for heroes if needed
         cache_key = (sync_config.user.to_string(), sync_config.device_id)
@@ -686,7 +684,7 @@ class SyncHandler(object):
             cache.set(s.state_key, s.event_id)
             state[(EventTypes.Member, s.state_key)] = s
 
-        defer.returnValue(summary)
+        return summary
 
     def get_lazy_loaded_members_cache(self, cache_key):
         cache = self.lazy_loaded_members_cache.get(cache_key)
@@ -783,9 +781,17 @@ class SyncHandler(object):
                     lazy_load_members=lazy_load_members,
                 )
             elif batch.limited:
-                state_at_timeline_start = yield self.store.get_state_ids_for_event(
-                    batch.events[0].event_id, state_filter=state_filter
-                )
+                if batch:
+                    state_at_timeline_start = yield self.store.get_state_ids_for_event(
+                        batch.events[0].event_id, state_filter=state_filter
+                    )
+                else:
+                    # Its not clear how we get here, but empirically we do
+                    # (#5407). Logging has been added elsewhere to try and
+                    # figure out where this state comes from.
+                    state_at_timeline_start = yield self.get_state_at(
+                        room_id, stream_position=now_token, state_filter=state_filter
+                    )
 
                 # for now, we disable LL for gappy syncs - see
                 # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
@@ -805,9 +811,17 @@ class SyncHandler(object):
                     room_id, stream_position=since_token, state_filter=state_filter
                 )
 
-                current_state_ids = yield self.store.get_state_ids_for_event(
-                    batch.events[-1].event_id, state_filter=state_filter
-                )
+                if batch:
+                    current_state_ids = yield self.store.get_state_ids_for_event(
+                        batch.events[-1].event_id, state_filter=state_filter
+                    )
+                else:
+                    # Its not clear how we get here, but empirically we do
+                    # (#5407). Logging has been added elsewhere to try and
+                    # figure out where this state comes from.
+                    current_state_ids = yield self.get_state_at(
+                        room_id, stream_position=now_token, state_filter=state_filter
+                    )
 
                 state_ids = _calculate_state(
                     timeline_contains=timeline_state,
@@ -871,14 +885,12 @@ class SyncHandler(object):
         if state_ids:
             state = yield self.store.get_events(list(state_ids.values()))
 
-        defer.returnValue(
-            {
-                (e.type, e.state_key): e
-                for e in sync_config.filter_collection.filter_room_state(
-                    list(state.values())
-                )
-            }
-        )
+        return {
+            (e.type, e.state_key): e
+            for e in sync_config.filter_collection.filter_room_state(
+                list(state.values())
+            )
+        }
 
     @defer.inlineCallbacks
     def unread_notifs_for_room_id(self, room_id, sync_config):
@@ -894,11 +906,11 @@ class SyncHandler(object):
                 notifs = yield self.store.get_unread_event_push_actions_by_room_for_user(
                     room_id, sync_config.user.to_string(), last_unread_event_id
                 )
-                defer.returnValue(notifs)
+                return notifs
 
         # There is no new information in this period, so your notification
         # count is whatever it was last time.
-        defer.returnValue(None)
+        return None
 
     @defer.inlineCallbacks
     def generate_sync_result(self, sync_config, since_token=None, full_state=False):
@@ -989,19 +1001,17 @@ class SyncHandler(object):
                     "Sync result for newly joined room %s: %r", room_id, joined_room
                 )
 
-        defer.returnValue(
-            SyncResult(
-                presence=sync_result_builder.presence,
-                account_data=sync_result_builder.account_data,
-                joined=sync_result_builder.joined,
-                invited=sync_result_builder.invited,
-                archived=sync_result_builder.archived,
-                to_device=sync_result_builder.to_device,
-                device_lists=device_lists,
-                groups=sync_result_builder.groups,
-                device_one_time_keys_count=one_time_key_counts,
-                next_batch=sync_result_builder.now_token,
-            )
+        return SyncResult(
+            presence=sync_result_builder.presence,
+            account_data=sync_result_builder.account_data,
+            joined=sync_result_builder.joined,
+            invited=sync_result_builder.invited,
+            archived=sync_result_builder.archived,
+            to_device=sync_result_builder.to_device,
+            device_lists=device_lists,
+            groups=sync_result_builder.groups,
+            device_one_time_keys_count=one_time_key_counts,
+            next_batch=sync_result_builder.now_token,
         )
 
     @measure_func("_generate_sync_entry_for_groups")
@@ -1124,11 +1134,9 @@ class SyncHandler(object):
             # Remove any users that we still share a room with.
             newly_left_users -= users_who_share_room
 
-            defer.returnValue(
-                DeviceLists(changed=users_that_have_changed, left=newly_left_users)
-            )
+            return DeviceLists(changed=users_that_have_changed, left=newly_left_users)
         else:
-            defer.returnValue(DeviceLists(changed=[], left=[]))
+            return DeviceLists(changed=[], left=[])
 
     @defer.inlineCallbacks
     def _generate_sync_entry_for_to_device(self, sync_result_builder):
@@ -1225,7 +1233,7 @@ class SyncHandler(object):
 
         sync_result_builder.account_data = account_data_for_user
 
-        defer.returnValue(account_data_by_room)
+        return account_data_by_room
 
     @defer.inlineCallbacks
     def _generate_sync_entry_for_presence(
@@ -1325,7 +1333,7 @@ class SyncHandler(object):
                     )
                     if not tags_by_room:
                         logger.debug("no-oping sync")
-                        defer.returnValue(([], [], [], []))
+                        return ([], [], [], [])
 
         ignored_account_data = yield self.store.get_global_account_data_by_type_for_user(
             "m.ignored_user_list", user_id=user_id
@@ -1388,13 +1396,11 @@ class SyncHandler(object):
 
         newly_left_users -= newly_joined_or_invited_users
 
-        defer.returnValue(
-            (
-                newly_joined_rooms,
-                newly_joined_or_invited_users,
-                newly_left_rooms,
-                newly_left_users,
-            )
+        return (
+            newly_joined_rooms,
+            newly_joined_or_invited_users,
+            newly_left_rooms,
+            newly_left_users,
         )
 
     @defer.inlineCallbacks
@@ -1414,13 +1420,13 @@ class SyncHandler(object):
         )
 
         if rooms_changed:
-            defer.returnValue(True)
+            return True
 
         stream_id = RoomStreamToken.parse_stream_token(since_token.room_key).stream
         for room_id in sync_result_builder.joined_room_ids:
             if self.store.has_room_changed_since(room_id, stream_id):
-                defer.returnValue(True)
-        defer.returnValue(False)
+                return True
+        return False
 
     @defer.inlineCallbacks
     def _get_rooms_changed(self, sync_result_builder, ignored_users):
@@ -1637,7 +1643,7 @@ class SyncHandler(object):
                 )
             room_entries.append(entry)
 
-        defer.returnValue((room_entries, invited, newly_joined_rooms, newly_left_rooms))
+        return (room_entries, invited, newly_joined_rooms, newly_left_rooms)
 
     @defer.inlineCallbacks
     def _get_all_rooms(self, sync_result_builder, ignored_users):
@@ -1711,7 +1717,7 @@ class SyncHandler(object):
                     )
                 )
 
-        defer.returnValue((room_entries, invited, []))
+        return (room_entries, invited, [])
 
     @defer.inlineCallbacks
     def _generate_room_entry(
@@ -1765,6 +1771,21 @@ class SyncHandler(object):
             newly_joined_room=newly_joined,
         )
 
+        if not batch and batch.limited:
+            # This resulted in #5407, which is weird, so lets log! We do it
+            # here as we have the maximum amount of information.
+            user_id = sync_result_builder.sync_config.user.to_string()
+            logger.info(
+                "Issue #5407: Found limited batch with no events. user %s, room %s,"
+                " sync_config %s, newly_joined %s, events %s, batch %s.",
+                user_id,
+                room_id,
+                sync_config,
+                newly_joined,
+                events,
+                batch,
+            )
+
         if newly_joined:
             # debug for https://github.com/matrix-org/synapse/issues/4422
             issue4422_logger.debug(
@@ -1912,7 +1933,7 @@ class SyncHandler(object):
                 joined_room_ids.add(room_id)
 
         joined_room_ids = frozenset(joined_room_ids)
-        defer.returnValue(joined_room_ids)
+        return joined_room_ids
 
 
 def _action_has_highlight(actions):
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index c3e0c8fc7e..f882330293 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -83,7 +83,7 @@ class TypingHandler(object):
         self._room_typing = {}
 
     def _handle_timeouts(self):
-        logger.info("Checking for typing timeouts")
+        logger.debug("Checking for typing timeouts")
 
         now = self.clock.time_msec()
 
@@ -140,7 +140,7 @@ class TypingHandler(object):
 
         if was_present:
             # No point sending another notification
-            defer.returnValue(None)
+            return None
 
         self._push_update(member=member, typing=True)
 
@@ -173,7 +173,7 @@ class TypingHandler(object):
     def _stopped_typing(self, member):
         if member.user_id not in self._room_typing.get(member.room_id, set()):
             # No point
-            defer.returnValue(None)
+            return None
 
         self._member_typing_until.pop(member, None)
         self._member_last_federation_poke.pop(member, None)
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 5de9630950..e53669e40d 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -133,7 +133,7 @@ class UserDirectoryHandler(StateDeltasHandler):
 
         # If still None then the initial background update hasn't happened yet
         if self.pos is None:
-            defer.returnValue(None)
+            return None
 
         # Loop round handling deltas until we're up to date
         while True:
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 45d5010952..0ac20ebefc 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -294,7 +294,7 @@ class SimpleHttpClient(object):
             logger.info(
                 "Received response to %s %s: %s", method, redact_uri(uri), response.code
             )
-            defer.returnValue(response)
+            return response
         except Exception as e:
             incoming_responses_counter.labels(method, "ERR").inc()
             logger.info(
@@ -345,7 +345,7 @@ class SimpleHttpClient(object):
         body = yield make_deferred_yieldable(readBody(response))
 
         if 200 <= response.code < 300:
-            defer.returnValue(json.loads(body))
+            return json.loads(body)
         else:
             raise HttpResponseException(response.code, response.phrase, body)
 
@@ -385,7 +385,7 @@ class SimpleHttpClient(object):
         body = yield make_deferred_yieldable(readBody(response))
 
         if 200 <= response.code < 300:
-            defer.returnValue(json.loads(body))
+            return json.loads(body)
         else:
             raise HttpResponseException(response.code, response.phrase, body)
 
@@ -410,7 +410,7 @@ class SimpleHttpClient(object):
             ValueError: if the response was not JSON
         """
         body = yield self.get_raw(uri, args, headers=headers)
-        defer.returnValue(json.loads(body))
+        return json.loads(body)
 
     @defer.inlineCallbacks
     def put_json(self, uri, json_body, args={}, headers=None):
@@ -453,7 +453,7 @@ class SimpleHttpClient(object):
         body = yield make_deferred_yieldable(readBody(response))
 
         if 200 <= response.code < 300:
-            defer.returnValue(json.loads(body))
+            return json.loads(body)
         else:
             raise HttpResponseException(response.code, response.phrase, body)
 
@@ -488,7 +488,7 @@ class SimpleHttpClient(object):
         body = yield make_deferred_yieldable(readBody(response))
 
         if 200 <= response.code < 300:
-            defer.returnValue(body)
+            return body
         else:
             raise HttpResponseException(response.code, response.phrase, body)
 
@@ -545,13 +545,11 @@ class SimpleHttpClient(object):
         except Exception as e:
             raise_from(SynapseError(502, ("Failed to download remote body: %s" % e)), e)
 
-        defer.returnValue(
-            (
-                length,
-                resp_headers,
-                response.request.absoluteURI.decode("ascii"),
-                response.code,
-            )
+        return (
+            length,
+            resp_headers,
+            response.request.absoluteURI.decode("ascii"),
+            response.code,
         )
 
 
@@ -627,10 +625,10 @@ class CaptchaServerHttpClient(SimpleHttpClient):
 
         try:
             body = yield make_deferred_yieldable(readBody(response))
-            defer.returnValue(body)
+            return body
         except PartialDownloadError as e:
             # twisted dislikes google's response, no content length.
-            defer.returnValue(e.response)
+            return e.response
 
 
 def encode_urlencode_args(args):
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 054c321a20..71a15f434d 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -12,10 +12,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import json
+
 import logging
-import random
-import time
 
 import attr
 from netaddr import IPAddress
@@ -24,31 +22,16 @@ from zope.interface import implementer
 from twisted.internet import defer
 from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
 from twisted.internet.interfaces import IStreamClientEndpoint
-from twisted.web.client import URI, Agent, HTTPConnectionPool, RedirectAgent, readBody
-from twisted.web.http import stringToDatetime
+from twisted.web.client import URI, Agent, HTTPConnectionPool
 from twisted.web.http_headers import Headers
 from twisted.web.iweb import IAgent
 
 from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
+from synapse.http.federation.well_known_resolver import WellKnownResolver
 from synapse.logging.context import make_deferred_yieldable
 from synapse.util import Clock
-from synapse.util.caches.ttlcache import TTLCache
-from synapse.util.metrics import Measure
-
-# period to cache .well-known results for by default
-WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
-
-# jitter to add to the .well-known default cache ttl
-WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60
-
-# period to cache failure to fetch .well-known for
-WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
-
-# cap for .well-known cache period
-WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
 
 logger = logging.getLogger(__name__)
-well_known_cache = TTLCache("well-known")
 
 
 @implementer(IAgent)
@@ -64,10 +47,6 @@ class MatrixFederationAgent(object):
         tls_client_options_factory (ClientTLSOptionsFactory|None):
             factory to use for fetching client tls options, or none to disable TLS.
 
-        _well_known_tls_policy (IPolicyForHTTPS|None):
-            TLS policy to use for fetching .well-known files. None to use a default
-            (browser-like) implementation.
-
         _srv_resolver (SrvResolver|None):
             SRVResolver impl to use for looking up SRV records. None to use a default
             implementation.
@@ -81,9 +60,8 @@ class MatrixFederationAgent(object):
         self,
         reactor,
         tls_client_options_factory,
-        _well_known_tls_policy=None,
         _srv_resolver=None,
-        _well_known_cache=well_known_cache,
+        _well_known_cache=None,
     ):
         self._reactor = reactor
         self._clock = Clock(reactor)
@@ -98,21 +76,15 @@ class MatrixFederationAgent(object):
         self._pool.maxPersistentPerHost = 5
         self._pool.cachedConnectionTimeout = 2 * 60
 
-        agent_args = {}
-        if _well_known_tls_policy is not None:
-            # the param is called 'contextFactory', but actually passing a
-            # contextfactory is deprecated, and it expects an IPolicyForHTTPS.
-            agent_args["contextFactory"] = _well_known_tls_policy
-        _well_known_agent = RedirectAgent(
-            Agent(self._reactor, pool=self._pool, **agent_args)
+        self._well_known_resolver = WellKnownResolver(
+            self._reactor,
+            agent=Agent(
+                self._reactor,
+                pool=self._pool,
+                contextFactory=tls_client_options_factory,
+            ),
+            well_known_cache=_well_known_cache,
         )
-        self._well_known_agent = _well_known_agent
-
-        # our cache of .well-known lookup results, mapping from server name
-        # to delegated name. The values can be:
-        #   `bytes`:     a valid server-name
-        #   `None`:      there is no (valid) .well-known here
-        self._well_known_cache = _well_known_cache
 
     @defer.inlineCallbacks
     def request(self, method, uri, headers=None, bodyProducer=None):
@@ -177,7 +149,7 @@ class MatrixFederationAgent(object):
         res = yield make_deferred_yieldable(
             agent.request(method, uri, headers, bodyProducer)
         )
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def _route_matrix_uri(self, parsed_uri, lookup_well_known=True):
@@ -205,29 +177,28 @@ class MatrixFederationAgent(object):
             port = parsed_uri.port
             if port == -1:
                 port = 8448
-            defer.returnValue(
-                _RoutingResult(
-                    host_header=parsed_uri.netloc,
-                    tls_server_name=parsed_uri.host,
-                    target_host=parsed_uri.host,
-                    target_port=port,
-                )
+            return _RoutingResult(
+                host_header=parsed_uri.netloc,
+                tls_server_name=parsed_uri.host,
+                target_host=parsed_uri.host,
+                target_port=port,
             )
 
         if parsed_uri.port != -1:
             # there is an explicit port
-            defer.returnValue(
-                _RoutingResult(
-                    host_header=parsed_uri.netloc,
-                    tls_server_name=parsed_uri.host,
-                    target_host=parsed_uri.host,
-                    target_port=parsed_uri.port,
-                )
+            return _RoutingResult(
+                host_header=parsed_uri.netloc,
+                tls_server_name=parsed_uri.host,
+                target_host=parsed_uri.host,
+                target_port=parsed_uri.port,
             )
 
         if lookup_well_known:
             # try a .well-known lookup
-            well_known_server = yield self._get_well_known(parsed_uri.host)
+            well_known_result = yield self._well_known_resolver.get_well_known(
+                parsed_uri.host
+            )
+            well_known_server = well_known_result.delegated_server
 
             if well_known_server:
                 # if we found a .well-known, start again, but don't do another
@@ -259,7 +230,7 @@ class MatrixFederationAgent(object):
                 )
 
                 res = yield self._route_matrix_uri(new_uri, lookup_well_known=False)
-                defer.returnValue(res)
+                return res
 
         # try a SRV lookup
         service_name = b"_matrix._tcp.%s" % (parsed_uri.host,)
@@ -283,93 +254,12 @@ class MatrixFederationAgent(object):
                 parsed_uri.host.decode("ascii"),
             )
 
-        defer.returnValue(
-            _RoutingResult(
-                host_header=parsed_uri.netloc,
-                tls_server_name=parsed_uri.host,
-                target_host=target_host,
-                target_port=port,
-            )
-        )
-
-    @defer.inlineCallbacks
-    def _get_well_known(self, server_name):
-        """Attempt to fetch and parse a .well-known file for the given server
-
-        Args:
-            server_name (bytes): name of the server, from the requested url
-
-        Returns:
-            Deferred[bytes|None]: either the new server name, from the .well-known, or
-                None if there was no .well-known file.
-        """
-        try:
-            result = self._well_known_cache[server_name]
-        except KeyError:
-            # TODO: should we linearise so that we don't end up doing two .well-known
-            # requests for the same server in parallel?
-            with Measure(self._clock, "get_well_known"):
-                result, cache_period = yield self._do_get_well_known(server_name)
-
-            if cache_period > 0:
-                self._well_known_cache.set(server_name, result, cache_period)
-
-        defer.returnValue(result)
-
-    @defer.inlineCallbacks
-    def _do_get_well_known(self, server_name):
-        """Actually fetch and parse a .well-known, without checking the cache
-
-        Args:
-            server_name (bytes): name of the server, from the requested url
-
-        Returns:
-            Deferred[Tuple[bytes|None|object],int]:
-                result, cache period, where result is one of:
-                 - the new server name from the .well-known (as a `bytes`)
-                 - None if there was no .well-known file.
-                 - INVALID_WELL_KNOWN if the .well-known was invalid
-        """
-        uri = b"https://%s/.well-known/matrix/server" % (server_name,)
-        uri_str = uri.decode("ascii")
-        logger.info("Fetching %s", uri_str)
-        try:
-            response = yield make_deferred_yieldable(
-                self._well_known_agent.request(b"GET", uri)
-            )
-            body = yield make_deferred_yieldable(readBody(response))
-            if response.code != 200:
-                raise Exception("Non-200 response %s" % (response.code,))
-
-            parsed_body = json.loads(body.decode("utf-8"))
-            logger.info("Response from .well-known: %s", parsed_body)
-            if not isinstance(parsed_body, dict):
-                raise Exception("not a dict")
-            if "m.server" not in parsed_body:
-                raise Exception("Missing key 'm.server'")
-        except Exception as e:
-            logger.info("Error fetching %s: %s", uri_str, e)
-
-            # add some randomness to the TTL to avoid a stampeding herd every hour
-            # after startup
-            cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
-            cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
-            defer.returnValue((None, cache_period))
-
-        result = parsed_body["m.server"].encode("ascii")
-
-        cache_period = _cache_period_from_headers(
-            response.headers, time_now=self._reactor.seconds
+        return _RoutingResult(
+            host_header=parsed_uri.netloc,
+            tls_server_name=parsed_uri.host,
+            target_host=target_host,
+            target_port=port,
         )
-        if cache_period is None:
-            cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
-            # add some randomness to the TTL to avoid a stampeding herd every 24 hours
-            # after startup
-            cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
-        else:
-            cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
-
-        defer.returnValue((result, cache_period))
 
 
 @implementer(IStreamClientEndpoint)
@@ -386,44 +276,6 @@ class LoggingHostnameEndpoint(object):
         return self.ep.connect(protocol_factory)
 
 
-def _cache_period_from_headers(headers, time_now=time.time):
-    cache_controls = _parse_cache_control(headers)
-
-    if b"no-store" in cache_controls:
-        return 0
-
-    if b"max-age" in cache_controls:
-        try:
-            max_age = int(cache_controls[b"max-age"])
-            return max_age
-        except ValueError:
-            pass
-
-    expires = headers.getRawHeaders(b"expires")
-    if expires is not None:
-        try:
-            expires_date = stringToDatetime(expires[-1])
-            return expires_date - time_now()
-        except ValueError:
-            # RFC7234 says 'A cache recipient MUST interpret invalid date formats,
-            # especially the value "0", as representing a time in the past (i.e.,
-            # "already expired").
-            return 0
-
-    return None
-
-
-def _parse_cache_control(headers):
-    cache_controls = {}
-    for hdr in headers.getRawHeaders(b"cache-control", []):
-        for directive in hdr.split(b","):
-            splits = [x.strip() for x in directive.split(b"=", 1)]
-            k = splits[0].lower()
-            v = splits[1] if len(splits) > 1 else None
-            cache_controls[k] = v
-    return cache_controls
-
-
 @attr.s
 class _RoutingResult(object):
     """The result returned by `_route_matrix_uri`.
diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py
index ecc88f9b96..b32188766d 100644
--- a/synapse/http/federation/srv_resolver.py
+++ b/synapse/http/federation/srv_resolver.py
@@ -120,7 +120,7 @@ class SrvResolver(object):
         if cache_entry:
             if all(s.expires > now for s in cache_entry):
                 servers = list(cache_entry)
-                defer.returnValue(servers)
+                return servers
 
         try:
             answers, _, _ = yield make_deferred_yieldable(
@@ -129,7 +129,7 @@ class SrvResolver(object):
         except DNSNameError:
             # TODO: cache this. We can get the SOA out of the exception, and use
             # the negative-TTL value.
-            defer.returnValue([])
+            return []
         except DomainError as e:
             # We failed to resolve the name (other than a NameError)
             # Try something in the cache, else rereaise
@@ -138,7 +138,7 @@ class SrvResolver(object):
                 logger.warn(
                     "Failed to resolve %r, falling back to cache. %r", service_name, e
                 )
-                defer.returnValue(list(cache_entry))
+                return list(cache_entry)
             else:
                 raise e
 
@@ -169,4 +169,4 @@ class SrvResolver(object):
             )
 
         self._cache[service_name] = list(servers)
-        defer.returnValue(servers)
+        return servers
diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py
new file mode 100644
index 0000000000..d2866ff67d
--- /dev/null
+++ b/synapse/http/federation/well_known_resolver.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import logging
+import random
+import time
+
+import attr
+
+from twisted.internet import defer
+from twisted.web.client import RedirectAgent, readBody
+from twisted.web.http import stringToDatetime
+
+from synapse.logging.context import make_deferred_yieldable
+from synapse.util import Clock
+from synapse.util.caches.ttlcache import TTLCache
+from synapse.util.metrics import Measure
+
+# period to cache .well-known results for by default
+WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
+
+# jitter to add to the .well-known default cache ttl
+WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60
+
+# period to cache failure to fetch .well-known for
+WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
+
+# cap for .well-known cache period
+WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
+
+# lower bound for .well-known cache period
+WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60
+
+logger = logging.getLogger(__name__)
+
+
+_well_known_cache = TTLCache("well-known")
+
+
+@attr.s(slots=True, frozen=True)
+class WellKnownLookupResult(object):
+    delegated_server = attr.ib()
+
+
+class WellKnownResolver(object):
+    """Handles well-known lookups for matrix servers.
+    """
+
+    def __init__(self, reactor, agent, well_known_cache=None):
+        self._reactor = reactor
+        self._clock = Clock(reactor)
+
+        if well_known_cache is None:
+            well_known_cache = _well_known_cache
+
+        self._well_known_cache = well_known_cache
+        self._well_known_agent = RedirectAgent(agent)
+
+    @defer.inlineCallbacks
+    def get_well_known(self, server_name):
+        """Attempt to fetch and parse a .well-known file for the given server
+
+        Args:
+            server_name (bytes): name of the server, from the requested url
+
+        Returns:
+            Deferred[WellKnownLookupResult]: The result of the lookup
+        """
+        try:
+            result = self._well_known_cache[server_name]
+        except KeyError:
+            # TODO: should we linearise so that we don't end up doing two .well-known
+            # requests for the same server in parallel?
+            with Measure(self._clock, "get_well_known"):
+                result, cache_period = yield self._do_get_well_known(server_name)
+
+            if cache_period > 0:
+                self._well_known_cache.set(server_name, result, cache_period)
+
+        return WellKnownLookupResult(delegated_server=result)
+
+    @defer.inlineCallbacks
+    def _do_get_well_known(self, server_name):
+        """Actually fetch and parse a .well-known, without checking the cache
+
+        Args:
+            server_name (bytes): name of the server, from the requested url
+
+        Returns:
+            Deferred[Tuple[bytes|None|object],int]:
+                result, cache period, where result is one of:
+                 - the new server name from the .well-known (as a `bytes`)
+                 - None if there was no .well-known file.
+                 - INVALID_WELL_KNOWN if the .well-known was invalid
+        """
+        uri = b"https://%s/.well-known/matrix/server" % (server_name,)
+        uri_str = uri.decode("ascii")
+        logger.info("Fetching %s", uri_str)
+        try:
+            response = yield make_deferred_yieldable(
+                self._well_known_agent.request(b"GET", uri)
+            )
+            body = yield make_deferred_yieldable(readBody(response))
+            if response.code != 200:
+                raise Exception("Non-200 response %s" % (response.code,))
+
+            parsed_body = json.loads(body.decode("utf-8"))
+            logger.info("Response from .well-known: %s", parsed_body)
+            if not isinstance(parsed_body, dict):
+                raise Exception("not a dict")
+            if "m.server" not in parsed_body:
+                raise Exception("Missing key 'm.server'")
+        except Exception as e:
+            logger.info("Error fetching %s: %s", uri_str, e)
+
+            # add some randomness to the TTL to avoid a stampeding herd every hour
+            # after startup
+            cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
+            cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
+            return (None, cache_period)
+
+        result = parsed_body["m.server"].encode("ascii")
+
+        cache_period = _cache_period_from_headers(
+            response.headers, time_now=self._reactor.seconds
+        )
+        if cache_period is None:
+            cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
+            # add some randomness to the TTL to avoid a stampeding herd every 24 hours
+            # after startup
+            cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
+        else:
+            cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
+            cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD)
+
+        return (result, cache_period)
+
+
+def _cache_period_from_headers(headers, time_now=time.time):
+    cache_controls = _parse_cache_control(headers)
+
+    if b"no-store" in cache_controls:
+        return 0
+
+    if b"max-age" in cache_controls:
+        try:
+            max_age = int(cache_controls[b"max-age"])
+            return max_age
+        except ValueError:
+            pass
+
+    expires = headers.getRawHeaders(b"expires")
+    if expires is not None:
+        try:
+            expires_date = stringToDatetime(expires[-1])
+            return expires_date - time_now()
+        except ValueError:
+            # RFC7234 says 'A cache recipient MUST interpret invalid date formats,
+            # especially the value "0", as representing a time in the past (i.e.,
+            # "already expired").
+            return 0
+
+    return None
+
+
+def _parse_cache_control(headers):
+    cache_controls = {}
+    for hdr in headers.getRawHeaders(b"cache-control", []):
+        for directive in hdr.split(b","):
+            splits = [x.strip() for x in directive.split(b"=", 1)]
+            k = splits[0].lower()
+            v = splits[1] if len(splits) > 1 else None
+            cache_controls[k] = v
+    return cache_controls
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index e60334547e..d07d356464 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -158,7 +158,7 @@ def _handle_json_response(reactor, timeout_sec, request, response):
         response.code,
         response.phrase.decode("ascii", errors="replace"),
     )
-    defer.returnValue(body)
+    return body
 
 
 class MatrixFederationHttpClient(object):
@@ -256,7 +256,7 @@ class MatrixFederationHttpClient(object):
 
             response = yield self._send_request(request, **send_request_args)
 
-        defer.returnValue(response)
+        return response
 
     @defer.inlineCallbacks
     def _send_request(
@@ -520,7 +520,7 @@ class MatrixFederationHttpClient(object):
                         _flatten_response_never_received(e),
                     )
                     raise
-        defer.returnValue(response)
+        return response
 
     def build_auth_headers(
         self, destination, method, url_bytes, content=None, destination_is=None
@@ -644,7 +644,7 @@ class MatrixFederationHttpClient(object):
             self.reactor, self.default_timeout, request, response
         )
 
-        defer.returnValue(body)
+        return body
 
     @defer.inlineCallbacks
     def post_json(
@@ -713,7 +713,7 @@ class MatrixFederationHttpClient(object):
         body = yield _handle_json_response(
             self.reactor, _sec_timeout, request, response
         )
-        defer.returnValue(body)
+        return body
 
     @defer.inlineCallbacks
     def get_json(
@@ -778,7 +778,7 @@ class MatrixFederationHttpClient(object):
             self.reactor, self.default_timeout, request, response
         )
 
-        defer.returnValue(body)
+        return body
 
     @defer.inlineCallbacks
     def delete_json(
@@ -836,7 +836,7 @@ class MatrixFederationHttpClient(object):
         body = yield _handle_json_response(
             self.reactor, self.default_timeout, request, response
         )
-        defer.returnValue(body)
+        return body
 
     @defer.inlineCallbacks
     def get_file(
@@ -902,7 +902,7 @@ class MatrixFederationHttpClient(object):
             response.phrase.decode("ascii", errors="replace"),
             length,
         )
-        defer.returnValue((length, headers))
+        return (length, headers)
 
 
 class _ReadBodyToFileProtocol(protocol.Protocol):
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 72a3d67eb6..e6f351ba3b 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -245,7 +245,9 @@ class JsonResource(HttpServer, resource.Resource):
 
     isLeaf = True
 
-    _PathEntry = collections.namedtuple("_PathEntry", ["pattern", "callback"])
+    _PathEntry = collections.namedtuple(
+        "_PathEntry", ["pattern", "callback", "servlet_classname"]
+    )
 
     def __init__(self, hs, canonical_json=True):
         resource.Resource.__init__(self)
@@ -255,12 +257,28 @@ class JsonResource(HttpServer, resource.Resource):
         self.path_regexs = {}
         self.hs = hs
 
-    def register_paths(self, method, path_patterns, callback):
+    def register_paths(self, method, path_patterns, callback, servlet_classname):
+        """
+        Registers a request handler against a regular expression. Later request URLs are
+        checked against these regular expressions in order to identify an appropriate
+        handler for that request.
+
+        Args:
+            method (str): GET, POST etc
+
+            path_patterns (Iterable[str]): A list of regular expressions to which
+                the request URLs are compared.
+
+            callback (function): The handler for the request. Usually a Servlet
+
+            servlet_classname (str): The name of the handler to be used in prometheus
+                and opentracing logs.
+        """
         method = method.encode("utf-8")  # method is bytes on py3
         for path_pattern in path_patterns:
             logger.debug("Registering for %s %s", method, path_pattern.pattern)
             self.path_regexs.setdefault(method, []).append(
-                self._PathEntry(path_pattern, callback)
+                self._PathEntry(path_pattern, callback, servlet_classname)
             )
 
     def render(self, request):
@@ -275,13 +293,9 @@ class JsonResource(HttpServer, resource.Resource):
             This checks if anyone has registered a callback for that method and
             path.
         """
-        callback, group_dict = self._get_handler_for_request(request)
+        callback, servlet_classname, group_dict = self._get_handler_for_request(request)
 
-        servlet_instance = getattr(callback, "__self__", None)
-        if servlet_instance is not None:
-            servlet_classname = servlet_instance.__class__.__name__
-        else:
-            servlet_classname = "%r" % callback
+        # Make sure we have a name for this handler in prometheus.
         request.request_metrics.name = servlet_classname
 
         # Now trigger the callback. If it returns a response, we send it
@@ -311,7 +325,8 @@ class JsonResource(HttpServer, resource.Resource):
             request (twisted.web.http.Request):
 
         Returns:
-            Tuple[Callable, dict[unicode, unicode]]: callback method, and the
+            Tuple[Callable, str, dict[unicode, unicode]]: callback method, the
+                label to use for that method in prometheus metrics, and the
                 dict mapping keys to path components as specified in the
                 handler's path match regexp.
 
@@ -320,7 +335,7 @@ class JsonResource(HttpServer, resource.Resource):
                 None, or a tuple of (http code, response body).
         """
         if request.method == b"OPTIONS":
-            return _options_handler, {}
+            return _options_handler, "options_request_handler", {}
 
         # Loop through all the registered callbacks to check if the method
         # and path regex match
@@ -328,10 +343,10 @@ class JsonResource(HttpServer, resource.Resource):
             m = path_entry.pattern.match(request.path.decode("ascii"))
             if m:
                 # We found a match!
-                return path_entry.callback, m.groupdict()
+                return path_entry.callback, path_entry.servlet_classname, m.groupdict()
 
         # Huh. No one wanted to handle that? Fiiiiiine. Send 400.
-        return _unrecognised_request_handler, {}
+        return _unrecognised_request_handler, "unrecognised_request_handler", {}
 
     def _send_response(
         self, request, code, response_json_object, response_code_message=None
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 889038ff25..fd07bf7b8e 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -166,7 +166,12 @@ def parse_string_from_args(
         value = args[name][0]
 
         if encoding:
-            value = value.decode(encoding)
+            try:
+                value = value.decode(encoding)
+            except ValueError:
+                raise SynapseError(
+                    400, "Query parameter %r must be %s" % (name, encoding)
+                )
 
         if allowed_values is not None and value not in allowed_values:
             message = "Query parameter %r must be one of [%s]" % (
@@ -290,11 +295,13 @@ class RestServlet(object):
 
             for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"):
                 if hasattr(self, "on_%s" % (method,)):
+                    servlet_classname = self.__class__.__name__
                     method_handler = getattr(self, "on_%s" % (method,))
                     http_server.register_paths(
                         method,
                         patterns,
-                        trace_servlet(self.__class__.__name__, method_handler),
+                        trace_servlet(servlet_classname, method_handler),
+                        servlet_classname,
                     )
 
         else:
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index 04393697c0..d2c209c471 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -11,7 +11,7 @@
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
-# limitations under the License.import opentracing
+# limitations under the License.
 
 
 # NOTE
@@ -89,7 +89,7 @@ the function becomes the operation name for the span.
        # We start
        yield we_wait
        # we finish
-       defer.returnValue(something_usual_and_useful)
+       return something_usual_and_useful
 
 Operation names can be explicitly set for functions by using
 ``trace_using_operation_name`` and
@@ -113,7 +113,7 @@ Operation names can be explicitly set for functions by using
        # We start
        yield we_wait
        # we finish
-       defer.returnValue(something_usual_and_useful)
+       return something_usual_and_useful
 
 Contexts and carriers
 ---------------------
@@ -150,10 +150,13 @@ Gotchas
 """
 
 import contextlib
+import inspect
 import logging
 import re
 from functools import wraps
 
+from canonicaljson import json
+
 from twisted.internet import defer
 
 from synapse.config import ConfigError
@@ -173,36 +176,12 @@ except ImportError:
 logger = logging.getLogger(__name__)
 
 
-class _DumTagNames(object):
-    """wrapper of opentracings tags. We need to have them if we
-    want to reference them without opentracing around. Clearly they
-    should never actually show up in a trace. `set_tags` overwrites
-    these with the correct ones."""
+# Block everything by default
+# A regex which matches the server_names to expose traces for.
+# None means 'block everything'.
+_homeserver_whitelist = None
 
-    INVALID_TAG = "invalid-tag"
-    COMPONENT = INVALID_TAG
-    DATABASE_INSTANCE = INVALID_TAG
-    DATABASE_STATEMENT = INVALID_TAG
-    DATABASE_TYPE = INVALID_TAG
-    DATABASE_USER = INVALID_TAG
-    ERROR = INVALID_TAG
-    HTTP_METHOD = INVALID_TAG
-    HTTP_STATUS_CODE = INVALID_TAG
-    HTTP_URL = INVALID_TAG
-    MESSAGE_BUS_DESTINATION = INVALID_TAG
-    PEER_ADDRESS = INVALID_TAG
-    PEER_HOSTNAME = INVALID_TAG
-    PEER_HOST_IPV4 = INVALID_TAG
-    PEER_HOST_IPV6 = INVALID_TAG
-    PEER_PORT = INVALID_TAG
-    PEER_SERVICE = INVALID_TAG
-    SAMPLING_PRIORITY = INVALID_TAG
-    SERVICE = INVALID_TAG
-    SPAN_KIND = INVALID_TAG
-    SPAN_KIND_CONSUMER = INVALID_TAG
-    SPAN_KIND_PRODUCER = INVALID_TAG
-    SPAN_KIND_RPC_CLIENT = INVALID_TAG
-    SPAN_KIND_RPC_SERVER = INVALID_TAG
+# Util methods
 
 
 def only_if_tracing(func):
@@ -219,11 +198,13 @@ def only_if_tracing(func):
     return _only_if_tracing_inner
 
 
-# A regex which matches the server_names to expose traces for.
-# None means 'block everything'.
-_homeserver_whitelist = None
+@contextlib.contextmanager
+def _noop_context_manager(*args, **kwargs):
+    """Does exactly what it says on the tin"""
+    yield
+
 
-tags = _DumTagNames
+# Setup
 
 
 def init_tracer(config):
@@ -247,26 +228,55 @@ def init_tracer(config):
     # Include the worker name
     name = config.worker_name if config.worker_name else "master"
 
+    # Pull out the jaeger config if it was given. Otherwise set it to something sensible.
+    # See https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/config.py
+
     set_homeserver_whitelist(config.opentracer_whitelist)
-    jaeger_config = JaegerConfig(
-        config={"sampler": {"type": "const", "param": 1}, "logging": True},
+
+    JaegerConfig(
+        config=config.jaeger_config,
         service_name="{} {}".format(config.server_name, name),
         scope_manager=LogContextScopeManager(config),
-    )
-    jaeger_config.initialize_tracer()
+    ).initialize_tracer()
 
     # Set up tags to be opentracing's tags
     global tags
     tags = opentracing.tags
 
 
-@contextlib.contextmanager
-def _noop_context_manager(*args, **kwargs):
-    """Does absolutely nothing really well. Can be entered and exited arbitrarily.
-    Good substitute for an opentracing scope."""
-    yield
+# Whitelisting
+
+
+@only_if_tracing
+def set_homeserver_whitelist(homeserver_whitelist):
+    """Sets the homeserver whitelist
+
+    Args:
+        homeserver_whitelist (Iterable[str]): regex of whitelisted homeservers
+    """
+    global _homeserver_whitelist
+    if homeserver_whitelist:
+        # Makes a single regex which accepts all passed in regexes in the list
+        _homeserver_whitelist = re.compile(
+            "({})".format(")|(".join(homeserver_whitelist))
+        )
+
+
+@only_if_tracing
+def whitelisted_homeserver(destination):
+    """Checks if a destination matches the whitelist
+
+    Args:
+        destination (str)
+        """
+    _homeserver_whitelist
+    if _homeserver_whitelist:
+        return _homeserver_whitelist.match(destination)
+    return False
 
 
+# Start spans and scopes
+
 # Could use kwargs but I want these to be explicit
 def start_active_span(
     operation_name,
@@ -285,8 +295,10 @@ def start_active_span(
     Returns:
         scope (Scope) or noop_context_manager
     """
+
     if opentracing is None:
         return _noop_context_manager()
+
     else:
         # We need to enter the scope here for the logcontext to become active
         return opentracing.tracer.start_active_span(
@@ -300,63 +312,13 @@ def start_active_span(
         )
 
 
-@only_if_tracing
-def close_active_span():
-    """Closes the active span. This will close it's logcontext if the context
-    was made for the span"""
-    opentracing.tracer.scope_manager.active.__exit__(None, None, None)
-
-
-@only_if_tracing
-def set_tag(key, value):
-    """Set's a tag on the active span"""
-    opentracing.tracer.active_span.set_tag(key, value)
-
-
-@only_if_tracing
-def log_kv(key_values, timestamp=None):
-    """Log to the active span"""
-    opentracing.tracer.active_span.log_kv(key_values, timestamp)
-
-
-# Note: we don't have a get baggage items because we're trying to hide all
-# scope and span state from synapse. I think this method may also be useless
-# as a result
-@only_if_tracing
-def set_baggage_item(key, value):
-    """Attach baggage to the active span"""
-    opentracing.tracer.active_span.set_baggage_item(key, value)
-
-
-@only_if_tracing
-def set_operation_name(operation_name):
-    """Sets the operation name of the active span"""
-    opentracing.tracer.active_span.set_operation_name(operation_name)
-
-
-@only_if_tracing
-def set_homeserver_whitelist(homeserver_whitelist):
-    """Sets the whitelist
-
-    Args:
-        homeserver_whitelist (iterable of strings): regex of whitelisted homeservers
-    """
-    global _homeserver_whitelist
-    if homeserver_whitelist:
-        # Makes a single regex which accepts all passed in regexes in the list
-        _homeserver_whitelist = re.compile(
-            "({})".format(")|(".join(homeserver_whitelist))
-        )
-
-
-@only_if_tracing
-def whitelisted_homeserver(destination):
-    """Checks if a destination matches the whitelist
-    Args:
-        destination (String)"""
-    if _homeserver_whitelist:
-        return _homeserver_whitelist.match(destination)
-    return False
+def start_active_span_follows_from(operation_name, contexts):
+    if opentracing is None:
+        return _noop_context_manager()
+    else:
+        references = [opentracing.follows_from(context) for context in contexts]
+        scope = start_active_span(operation_name, references=references)
+        return scope
 
 
 def start_active_span_from_context(
@@ -372,12 +334,16 @@ def start_active_span_from_context(
     Extracts a span context from Twisted Headers.
     args:
         headers (twisted.web.http_headers.Headers)
+
+        For the other args see opentracing.tracer
+
     returns:
         span_context (opentracing.span.SpanContext)
     """
     # Twisted encodes the values as lists whereas opentracing doesn't.
     # So, we take the first item in the list.
     # Also, twisted uses byte arrays while opentracing expects strings.
+
     if opentracing is None:
         return _noop_context_manager()
 
@@ -395,17 +361,90 @@ def start_active_span_from_context(
     )
 
 
+def start_active_span_from_edu(
+    edu_content,
+    operation_name,
+    references=[],
+    tags=None,
+    start_time=None,
+    ignore_active_span=False,
+    finish_on_close=True,
+):
+    """
+    Extracts a span context from an edu and uses it to start a new active span
+
+    Args:
+        edu_content (dict): and edu_content with a `context` field whose value is
+        canonical json for a dict which contains opentracing information.
+
+        For the other args see opentracing.tracer
+    """
+
+    if opentracing is None:
+        return _noop_context_manager()
+
+    carrier = json.loads(edu_content.get("context", "{}")).get("opentracing", {})
+    context = opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier)
+    _references = [
+        opentracing.child_of(span_context_from_string(x))
+        for x in carrier.get("references", [])
+    ]
+
+    # For some reason jaeger decided not to support the visualization of multiple parent
+    # spans or explicitely show references. I include the span context as a tag here as
+    # an aid to people debugging but it's really not an ideal solution.
+
+    references += _references
+
+    scope = opentracing.tracer.start_active_span(
+        operation_name,
+        child_of=context,
+        references=references,
+        tags=tags,
+        start_time=start_time,
+        ignore_active_span=ignore_active_span,
+        finish_on_close=finish_on_close,
+    )
+
+    scope.span.set_tag("references", carrier.get("references", []))
+    return scope
+
+
+# Opentracing setters for tags, logs, etc
+
+
+@only_if_tracing
+def set_tag(key, value):
+    """Sets a tag on the active span"""
+    opentracing.tracer.active_span.set_tag(key, value)
+
+
+@only_if_tracing
+def log_kv(key_values, timestamp=None):
+    """Log to the active span"""
+    opentracing.tracer.active_span.log_kv(key_values, timestamp)
+
+
+@only_if_tracing
+def set_operation_name(operation_name):
+    """Sets the operation name of the active span"""
+    opentracing.tracer.active_span.set_operation_name(operation_name)
+
+
+# Injection and extraction
+
+
 @only_if_tracing
 def inject_active_span_twisted_headers(headers, destination):
     """
-    Injects a span context into twisted headers inplace
+    Injects a span context into twisted headers in-place
 
     Args:
         headers (twisted.web.http_headers.Headers)
         span (opentracing.Span)
 
     Returns:
-        Inplace modification of headers
+        In-place modification of headers
 
     Note:
         The headers set by the tracer are custom to the tracer implementation which
@@ -437,7 +476,7 @@ def inject_active_span_byte_dict(headers, destination):
         span (opentracing.Span)
 
     Returns:
-        Inplace modification of headers
+        In-place modification of headers
 
     Note:
         The headers set by the tracer are custom to the tracer implementation which
@@ -458,9 +497,190 @@ def inject_active_span_byte_dict(headers, destination):
         headers[key.encode()] = [value.encode()]
 
 
+@only_if_tracing
+def inject_active_span_text_map(carrier, destination=None):
+    """
+    Injects a span context into a dict
+
+    Args:
+        carrier (dict)
+        destination (str): the name of the remote server. The span context
+        will only be injected if the destination matches the homeserver_whitelist
+        or destination is None.
+
+    Returns:
+        In-place modification of carrier
+
+    Note:
+        The headers set by the tracer are custom to the tracer implementation which
+        should be unique enough that they don't interfere with any headers set by
+        synapse or twisted. If we're still using jaeger these headers would be those
+        here:
+        https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
+    """
+
+    if destination and not whitelisted_homeserver(destination):
+        return
+
+    opentracing.tracer.inject(
+        opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier
+    )
+
+
+def active_span_context_as_string():
+    """
+    Returns:
+        The active span context encoded as a string.
+    """
+    carrier = {}
+    if opentracing:
+        opentracing.tracer.inject(
+            opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier
+        )
+    return json.dumps(carrier)
+
+
+@only_if_tracing
+def span_context_from_string(carrier):
+    """
+    Returns:
+        The active span context decoded from a string.
+    """
+    carrier = json.loads(carrier)
+    return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier)
+
+
+@only_if_tracing
+def extract_text_map(carrier):
+    """
+    Wrapper method for opentracing's tracer.extract for TEXT_MAP.
+    Args:
+        carrier (dict): a dict possibly containing a span context.
+
+    Returns:
+        The active span context extracted from carrier.
+    """
+    return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier)
+
+
+# Tracing decorators
+
+
+def trace(func):
+    """
+    Decorator to trace a function.
+    Sets the operation name to that of the function's.
+    """
+    if opentracing is None:
+        return func
+
+    @wraps(func)
+    def _trace_inner(self, *args, **kwargs):
+        if opentracing is None:
+            return func(self, *args, **kwargs)
+
+        scope = start_active_span(func.__name__)
+        scope.__enter__()
+
+        try:
+            result = func(self, *args, **kwargs)
+            if isinstance(result, defer.Deferred):
+
+                def call_back(result):
+                    scope.__exit__(None, None, None)
+                    return result
+
+                def err_back(result):
+                    scope.span.set_tag(tags.ERROR, True)
+                    scope.__exit__(None, None, None)
+                    return result
+
+                result.addCallbacks(call_back, err_back)
+
+            else:
+                scope.__exit__(None, None, None)
+
+            return result
+
+        except Exception as e:
+            scope.__exit__(type(e), None, e.__traceback__)
+            raise
+
+    return _trace_inner
+
+
+def trace_using_operation_name(operation_name):
+    """Decorator to trace a function. Explicitely sets the operation_name."""
+
+    def trace(func):
+        """
+        Decorator to trace a function.
+        Sets the operation name to that of the function's.
+        """
+        if opentracing is None:
+            return func
+
+        @wraps(func)
+        def _trace_inner(self, *args, **kwargs):
+            if opentracing is None:
+                return func(self, *args, **kwargs)
+
+            scope = start_active_span(operation_name)
+            scope.__enter__()
+
+            try:
+                result = func(self, *args, **kwargs)
+                if isinstance(result, defer.Deferred):
+
+                    def call_back(result):
+                        scope.__exit__(None, None, None)
+                        return result
+
+                    def err_back(result):
+                        scope.span.set_tag(tags.ERROR, True)
+                        scope.__exit__(None, None, None)
+                        return result
+
+                    result.addCallbacks(call_back, err_back)
+                else:
+                    scope.__exit__(None, None, None)
+
+                return result
+
+            except Exception as e:
+                scope.__exit__(type(e), None, e.__traceback__)
+                raise
+
+        return _trace_inner
+
+    return trace
+
+
+def tag_args(func):
+    """
+    Tags all of the args to the active span.
+    """
+
+    if not opentracing:
+        return func
+
+    @wraps(func)
+    def _tag_args_inner(self, *args, **kwargs):
+        argspec = inspect.getargspec(func)
+        for i, arg in enumerate(argspec.args[1:]):
+            set_tag("ARG_" + arg, args[i])
+        set_tag("args", args[len(argspec.args) :])
+        set_tag("kwargs", kwargs)
+        return func(self, *args, **kwargs)
+
+    return _tag_args_inner
+
+
 def trace_servlet(servlet_name, func):
     """Decorator which traces a serlet. It starts a span with some servlet specific
     tags such as the servlet_name and request information"""
+    if not opentracing:
+        return func
 
     @wraps(func)
     @defer.inlineCallbacks
@@ -477,6 +697,44 @@ def trace_servlet(servlet_name, func):
             },
         ):
             result = yield defer.maybeDeferred(func, request, *args, **kwargs)
-        defer.returnValue(result)
+            return result
 
     return _trace_servlet_inner
+
+
+# Helper class
+
+
+class _DummyTagNames(object):
+    """wrapper of opentracings tags. We need to have them if we
+    want to reference them without opentracing around. Clearly they
+    should never actually show up in a trace. `set_tags` overwrites
+    these with the correct ones."""
+
+    INVALID_TAG = "invalid-tag"
+    COMPONENT = INVALID_TAG
+    DATABASE_INSTANCE = INVALID_TAG
+    DATABASE_STATEMENT = INVALID_TAG
+    DATABASE_TYPE = INVALID_TAG
+    DATABASE_USER = INVALID_TAG
+    ERROR = INVALID_TAG
+    HTTP_METHOD = INVALID_TAG
+    HTTP_STATUS_CODE = INVALID_TAG
+    HTTP_URL = INVALID_TAG
+    MESSAGE_BUS_DESTINATION = INVALID_TAG
+    PEER_ADDRESS = INVALID_TAG
+    PEER_HOSTNAME = INVALID_TAG
+    PEER_HOST_IPV4 = INVALID_TAG
+    PEER_HOST_IPV6 = INVALID_TAG
+    PEER_PORT = INVALID_TAG
+    PEER_SERVICE = INVALID_TAG
+    SAMPLING_PRIORITY = INVALID_TAG
+    SERVICE = INVALID_TAG
+    SPAN_KIND = INVALID_TAG
+    SPAN_KIND_CONSUMER = INVALID_TAG
+    SPAN_KIND_PRODUCER = INVALID_TAG
+    SPAN_KIND_RPC_CLIENT = INVALID_TAG
+    SPAN_KIND_RPC_SERVER = INVALID_TAG
+
+
+tags = _DummyTagNames
diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py
index 8c661302c9..4eed4f2338 100644
--- a/synapse/logging/scopecontextmanager.py
+++ b/synapse/logging/scopecontextmanager.py
@@ -131,7 +131,7 @@ class _LogContextScope(Scope):
 
     def close(self):
         if self.manager.active is not self:
-            logger.error("Tried to close a none active scope!")
+            logger.error("Tried to close a non-active scope!")
             return
 
         if self._finish_on_close:
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 7bb020cb45..41147d4292 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -101,7 +101,7 @@ class ModuleApi(object):
         )
         user_id = yield self.register_user(localpart, displayname, emails)
         _, access_token = yield self.register_device(user_id)
-        defer.returnValue((user_id, access_token))
+        return (user_id, access_token)
 
     def register_user(self, localpart, displayname=None, emails=[]):
         """Registers a new user with given localpart and optional displayname, emails.
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 918ef64897..bd80c801b6 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -365,7 +365,7 @@ class Notifier(object):
             current_token = user_stream.current_token
             result = yield callback(prev_token, current_token)
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def get_events_for(
@@ -400,7 +400,7 @@ class Notifier(object):
         @defer.inlineCallbacks
         def check_for_updates(before_token, after_token):
             if not after_token.is_after(before_token):
-                defer.returnValue(EventStreamResult([], (from_token, from_token)))
+                return EventStreamResult([], (from_token, from_token))
 
             events = []
             end_token = from_token
@@ -440,7 +440,7 @@ class Notifier(object):
                 events.extend(new_events)
                 end_token = end_token.copy_and_replace(keyname, new_key)
 
-            defer.returnValue(EventStreamResult(events, (from_token, end_token)))
+            return EventStreamResult(events, (from_token, end_token))
 
         user_id_for_stream = user.to_string()
         if is_peeking:
@@ -465,18 +465,18 @@ class Notifier(object):
             from_token=from_token,
         )
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _get_room_ids(self, user, explicit_room_id):
         joined_room_ids = yield self.store.get_rooms_for_user(user.to_string())
         if explicit_room_id:
             if explicit_room_id in joined_room_ids:
-                defer.returnValue(([explicit_room_id], True))
+                return ([explicit_room_id], True)
             if (yield self._is_world_readable(explicit_room_id)):
-                defer.returnValue(([explicit_room_id], False))
+                return ([explicit_room_id], False)
             raise AuthError(403, "Non-joined access not allowed")
-        defer.returnValue((joined_room_ids, True))
+        return (joined_room_ids, True)
 
     @defer.inlineCallbacks
     def _is_world_readable(self, room_id):
@@ -484,9 +484,9 @@ class Notifier(object):
             room_id, EventTypes.RoomHistoryVisibility, ""
         )
         if state and "history_visibility" in state.content:
-            defer.returnValue(state.content["history_visibility"] == "world_readable")
+            return state.content["history_visibility"] == "world_readable"
         else:
-            defer.returnValue(False)
+            return False
 
     @log_function
     def remove_expired_streams(self):
diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
index 134bf805eb..286374d0b5 100644
--- a/synapse/push/baserules.py
+++ b/synapse/push/baserules.py
@@ -245,7 +245,13 @@ BASE_APPEND_OVERRIDE_RULES = [
                 "key": "type",
                 "pattern": "m.room.tombstone",
                 "_id": "_tombstone",
-            }
+            },
+            {
+                "kind": "event_match",
+                "key": "state_key",
+                "pattern": "",
+                "_id": "_tombstone_statekey",
+            },
         ],
         "actions": ["notify", {"set_tweak": "highlight", "value": True}],
     },
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index c8a5b381da..c831975635 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -95,7 +95,7 @@ class BulkPushRuleEvaluator(object):
                         invited
                     )
 
-        defer.returnValue(rules_by_user)
+        return rules_by_user
 
     @cached()
     def _get_rules_for_room(self, room_id):
@@ -134,7 +134,7 @@ class BulkPushRuleEvaluator(object):
 
         pl_event = auth_events.get(POWER_KEY)
 
-        defer.returnValue((pl_event.content if pl_event else {}, sender_level))
+        return (pl_event.content if pl_event else {}, sender_level)
 
     @defer.inlineCallbacks
     def action_for_event_by_user(self, event, context):
@@ -283,13 +283,13 @@ class RulesForRoom(object):
         if state_group and self.state_group == state_group:
             logger.debug("Using cached rules for %r", self.room_id)
             self.room_push_rule_cache_metrics.inc_hits()
-            defer.returnValue(self.rules_by_user)
+            return self.rules_by_user
 
         with (yield self.linearizer.queue(())):
             if state_group and self.state_group == state_group:
                 logger.debug("Using cached rules for %r", self.room_id)
                 self.room_push_rule_cache_metrics.inc_hits()
-                defer.returnValue(self.rules_by_user)
+                return self.rules_by_user
 
             self.room_push_rule_cache_metrics.inc_misses()
 
@@ -366,7 +366,7 @@ class RulesForRoom(object):
             logger.debug(
                 "Returning push rules for %r %r", self.room_id, ret_rules_by_user.keys()
             )
-        defer.returnValue(ret_rules_by_user)
+        return ret_rules_by_user
 
     @defer.inlineCallbacks
     def _update_rules_with_member_event_ids(
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index 424ffa8b68..42e5b0c0a5 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -234,13 +234,19 @@ class EmailPusher(object):
             return
 
         self.last_stream_ordering = last_stream_ordering
-        yield self.store.update_pusher_last_stream_ordering_and_success(
-            self.app_id,
-            self.email,
-            self.user_id,
-            last_stream_ordering,
-            self.clock.time_msec(),
+        pusher_still_exists = (
+            yield self.store.update_pusher_last_stream_ordering_and_success(
+                self.app_id,
+                self.email,
+                self.user_id,
+                last_stream_ordering,
+                self.clock.time_msec(),
+            )
         )
+        if not pusher_still_exists:
+            # The pusher has been deleted while we were processing, so
+            # lets just stop and return.
+            self.on_stop()
 
     def seconds_until(self, ts_msec):
         secs = (ts_msec - self.clock.time_msec()) / 1000
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 4e7b6a5531..bd5d53af91 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -199,13 +199,21 @@ class HttpPusher(object):
                 http_push_processed_counter.inc()
                 self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
                 self.last_stream_ordering = push_action["stream_ordering"]
-                yield self.store.update_pusher_last_stream_ordering_and_success(
-                    self.app_id,
-                    self.pushkey,
-                    self.user_id,
-                    self.last_stream_ordering,
-                    self.clock.time_msec(),
+                pusher_still_exists = (
+                    yield self.store.update_pusher_last_stream_ordering_and_success(
+                        self.app_id,
+                        self.pushkey,
+                        self.user_id,
+                        self.last_stream_ordering,
+                        self.clock.time_msec(),
+                    )
                 )
+                if not pusher_still_exists:
+                    # The pusher has been deleted while we were processing, so
+                    # lets just stop and return.
+                    self.on_stop()
+                    return
+
                 if self.failing_since:
                     self.failing_since = None
                     yield self.store.update_pusher_failing_since(
@@ -234,12 +242,17 @@ class HttpPusher(object):
                     )
                     self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
                     self.last_stream_ordering = push_action["stream_ordering"]
-                    yield self.store.update_pusher_last_stream_ordering(
+                    pusher_still_exists = yield self.store.update_pusher_last_stream_ordering(
                         self.app_id,
                         self.pushkey,
                         self.user_id,
                         self.last_stream_ordering,
                     )
+                    if not pusher_still_exists:
+                        # The pusher has been deleted while we were processing, so
+                        # lets just stop and return.
+                        self.on_stop()
+                        return
 
                     self.failing_since = None
                     yield self.store.update_pusher_failing_since(
@@ -258,17 +271,17 @@ class HttpPusher(object):
     @defer.inlineCallbacks
     def _process_one(self, push_action):
         if "notify" not in push_action["actions"]:
-            defer.returnValue(True)
+            return True
 
         tweaks = push_rule_evaluator.tweaks_for_actions(push_action["actions"])
         badge = yield push_tools.get_badge_count(self.hs.get_datastore(), self.user_id)
 
         event = yield self.store.get_event(push_action["event_id"], allow_none=True)
         if event is None:
-            defer.returnValue(True)  # It's been redacted
+            return True  # It's been redacted
         rejected = yield self.dispatch_push(event, tweaks, badge)
         if rejected is False:
-            defer.returnValue(False)
+            return False
 
         if isinstance(rejected, list) or isinstance(rejected, tuple):
             for pk in rejected:
@@ -282,7 +295,7 @@ class HttpPusher(object):
                 else:
                     logger.info("Pushkey %s was rejected: removing", pk)
                     yield self.hs.remove_pusher(self.app_id, pk, self.user_id)
-        defer.returnValue(True)
+        return True
 
     @defer.inlineCallbacks
     def _build_notification_dict(self, event, tweaks, badge):
@@ -302,7 +315,7 @@ class HttpPusher(object):
                     ],
                 }
             }
-            defer.returnValue(d)
+            return d
 
         ctx = yield push_tools.get_context_for_event(
             self.store, self.state_handler, event, self.user_id
@@ -345,13 +358,13 @@ class HttpPusher(object):
         if "name" in ctx and len(ctx["name"]) > 0:
             d["notification"]["room_name"] = ctx["name"]
 
-        defer.returnValue(d)
+        return d
 
     @defer.inlineCallbacks
     def dispatch_push(self, event, tweaks, badge):
         notification_dict = yield self._build_notification_dict(event, tweaks, badge)
         if not notification_dict:
-            defer.returnValue([])
+            return []
         try:
             resp = yield self.http_client.post_json_get_json(
                 self.url, notification_dict
@@ -364,11 +377,11 @@ class HttpPusher(object):
                 type(e),
                 e,
             )
-            defer.returnValue(False)
+            return False
         rejected = []
         if "rejected" in resp:
             rejected = resp["rejected"]
-        defer.returnValue(rejected)
+        return rejected
 
     @defer.inlineCallbacks
     def _send_badge(self, badge):
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 521c6e2cd7..4245ce26f3 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -316,7 +316,7 @@ class Mailer(object):
                 if not merge:
                     room_vars["notifs"].append(notifvars)
 
-        defer.returnValue(room_vars)
+        return room_vars
 
     @defer.inlineCallbacks
     def get_notif_vars(self, notif, user_id, notif_event, room_state_ids):
@@ -343,7 +343,7 @@ class Mailer(object):
             if messagevars is not None:
                 ret["messages"].append(messagevars)
 
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def get_message_vars(self, notif, event, room_state_ids):
@@ -379,7 +379,7 @@ class Mailer(object):
         if "body" in event.content:
             ret["body_text_plain"] = event.content["body"]
 
-        defer.returnValue(ret)
+        return ret
 
     def add_text_message_vars(self, messagevars, event):
         msgformat = event.content.get("format")
@@ -428,19 +428,16 @@ class Mailer(object):
                 inviter_name = name_from_member_event(inviter_member_event)
 
                 if room_name is None:
-                    defer.returnValue(
-                        INVITE_FROM_PERSON
-                        % {"person": inviter_name, "app": self.app_name}
-                    )
+                    return INVITE_FROM_PERSON % {
+                        "person": inviter_name,
+                        "app": self.app_name,
+                    }
                 else:
-                    defer.returnValue(
-                        INVITE_FROM_PERSON_TO_ROOM
-                        % {
-                            "person": inviter_name,
-                            "room": room_name,
-                            "app": self.app_name,
-                        }
-                    )
+                    return INVITE_FROM_PERSON_TO_ROOM % {
+                        "person": inviter_name,
+                        "room": room_name,
+                        "app": self.app_name,
+                    }
 
             sender_name = None
             if len(notifs_by_room[room_id]) == 1:
@@ -454,26 +451,21 @@ class Mailer(object):
                     sender_name = name_from_member_event(state_event)
 
                 if sender_name is not None and room_name is not None:
-                    defer.returnValue(
-                        MESSAGE_FROM_PERSON_IN_ROOM
-                        % {
-                            "person": sender_name,
-                            "room": room_name,
-                            "app": self.app_name,
-                        }
-                    )
+                    return MESSAGE_FROM_PERSON_IN_ROOM % {
+                        "person": sender_name,
+                        "room": room_name,
+                        "app": self.app_name,
+                    }
                 elif sender_name is not None:
-                    defer.returnValue(
-                        MESSAGE_FROM_PERSON
-                        % {"person": sender_name, "app": self.app_name}
-                    )
+                    return MESSAGE_FROM_PERSON % {
+                        "person": sender_name,
+                        "app": self.app_name,
+                    }
             else:
                 # There's more than one notification for this room, so just
                 # say there are several
                 if room_name is not None:
-                    defer.returnValue(
-                        MESSAGES_IN_ROOM % {"room": room_name, "app": self.app_name}
-                    )
+                    return MESSAGES_IN_ROOM % {"room": room_name, "app": self.app_name}
                 else:
                     # If the room doesn't have a name, say who the messages
                     # are from explicitly to avoid, "messages in the Bob room"
@@ -493,24 +485,19 @@ class Mailer(object):
                         ]
                     )
 
-                    defer.returnValue(
-                        MESSAGES_FROM_PERSON
-                        % {
-                            "person": descriptor_from_member_events(
-                                member_events.values()
-                            ),
-                            "app": self.app_name,
-                        }
-                    )
+                    return MESSAGES_FROM_PERSON % {
+                        "person": descriptor_from_member_events(member_events.values()),
+                        "app": self.app_name,
+                    }
         else:
             # Stuff's happened in multiple different rooms
 
             # ...but we still refer to the 'reason' room which triggered the mail
             if reason["room_name"] is not None:
-                defer.returnValue(
-                    MESSAGES_IN_ROOM_AND_OTHERS
-                    % {"room": reason["room_name"], "app": self.app_name}
-                )
+                return MESSAGES_IN_ROOM_AND_OTHERS % {
+                    "room": reason["room_name"],
+                    "app": self.app_name,
+                }
             else:
                 # If the reason room doesn't have a name, say who the messages
                 # are from explicitly to avoid, "messages in the Bob room"
@@ -527,13 +514,10 @@ class Mailer(object):
                     [room_state_ids[room_id][("m.room.member", s)] for s in sender_ids]
                 )
 
-                defer.returnValue(
-                    MESSAGES_FROM_PERSON_AND_OTHERS
-                    % {
-                        "person": descriptor_from_member_events(member_events.values()),
-                        "app": self.app_name,
-                    }
-                )
+                return MESSAGES_FROM_PERSON_AND_OTHERS % {
+                    "person": descriptor_from_member_events(member_events.values()),
+                    "app": self.app_name,
+                }
 
     def make_room_link(self, room_id):
         if self.hs.config.email_riot_base_url:
diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py
index 06056fbf4f..16a7e8e31d 100644
--- a/synapse/push/presentable_names.py
+++ b/synapse/push/presentable_names.py
@@ -55,7 +55,7 @@ def calculate_room_name(
             room_state_ids[("m.room.name", "")], allow_none=True
         )
         if m_room_name and m_room_name.content and m_room_name.content["name"]:
-            defer.returnValue(m_room_name.content["name"])
+            return m_room_name.content["name"]
 
     # does it have a canonical alias?
     if ("m.room.canonical_alias", "") in room_state_ids:
@@ -68,7 +68,7 @@ def calculate_room_name(
             and canon_alias.content["alias"]
             and _looks_like_an_alias(canon_alias.content["alias"])
         ):
-            defer.returnValue(canon_alias.content["alias"])
+            return canon_alias.content["alias"]
 
     # at this point we're going to need to search the state by all state keys
     # for an event type, so rearrange the data structure
@@ -82,10 +82,10 @@ def calculate_room_name(
             if alias_event and alias_event.content.get("aliases"):
                 the_aliases = alias_event.content["aliases"]
                 if len(the_aliases) > 0 and _looks_like_an_alias(the_aliases[0]):
-                    defer.returnValue(the_aliases[0])
+                    return the_aliases[0]
 
     if not fallback_to_members:
-        defer.returnValue(None)
+        return None
 
     my_member_event = None
     if ("m.room.member", user_id) in room_state_ids:
@@ -104,14 +104,13 @@ def calculate_room_name(
             )
             if inviter_member_event:
                 if fallback_to_single_member:
-                    defer.returnValue(
-                        "Invite from %s"
-                        % (name_from_member_event(inviter_member_event),)
+                    return "Invite from %s" % (
+                        name_from_member_event(inviter_member_event),
                     )
                 else:
                     return
         else:
-            defer.returnValue("Room Invite")
+            return "Room Invite"
 
     # we're going to have to generate a name based on who's in the room,
     # so find out who is in the room that isn't the user.
@@ -154,17 +153,17 @@ def calculate_room_name(
                         # return "Inviting %s" % (
                         #     descriptor_from_member_events(third_party_invites)
                         # )
-                        defer.returnValue("Inviting email address")
+                        return "Inviting email address"
                     else:
-                        defer.returnValue(ALL_ALONE)
+                        return ALL_ALONE
             else:
-                defer.returnValue(name_from_member_event(all_members[0]))
+                return name_from_member_event(all_members[0])
         else:
-            defer.returnValue(ALL_ALONE)
+            return ALL_ALONE
     elif len(other_members) == 1 and not fallback_to_single_member:
         return
     else:
-        defer.returnValue(descriptor_from_member_events(other_members))
+        return descriptor_from_member_events(other_members)
 
 
 def descriptor_from_member_events(member_events):
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index e37269cdb9..a54051a726 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -39,7 +39,7 @@ def get_badge_count(store, user_id):
             # return one badge count per conversation, as count per
             # message is so noisy as to be almost useless
             badge += 1 if notifs["notify_count"] else 0
-    defer.returnValue(badge)
+    return badge
 
 
 @defer.inlineCallbacks
@@ -61,4 +61,4 @@ def get_context_for_event(store, state_handler, ev, user_id):
     sender_state_event = yield store.get_event(sender_state_event_id)
     ctx["sender_display_name"] = name_from_member_event(sender_state_event)
 
-    defer.returnValue(ctx)
+    return ctx
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index df6f670740..08e840fdc2 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -123,7 +123,7 @@ class PusherPool:
         )
         pusher = yield self.start_pusher_by_id(app_id, pushkey, user_id)
 
-        defer.returnValue(pusher)
+        return pusher
 
     @defer.inlineCallbacks
     def remove_pushers_by_app_id_and_pushkey_not_user(
@@ -224,7 +224,7 @@ class PusherPool:
         if pusher_dict:
             pusher = yield self._start_pusher(pusher_dict)
 
-        defer.returnValue(pusher)
+        return pusher
 
     @defer.inlineCallbacks
     def _start_pushers(self):
@@ -293,7 +293,7 @@ class PusherPool:
 
         p.on_started(have_notifs)
 
-        defer.returnValue(p)
+        return p
 
     @defer.inlineCallbacks
     def remove_pusher(self, app_id, pushkey, user_id):
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index c6465c0386..195a7a70c8 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -72,6 +72,7 @@ REQUIREMENTS = [
     "netaddr>=0.7.18",
     "Jinja2>=2.9",
     "bleach>=1.4.3",
+    "sdnotify>=0.3",
 ]
 
 CONDITIONAL_REQUIREMENTS = {
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index fe482e279f..2e0594e581 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -185,7 +185,7 @@ class ReplicationEndpoint(object):
             except RequestSendFailed as e:
                 raise_from(SynapseError(502, "Failed to talk to master"), e)
 
-            defer.returnValue(result)
+            return result
 
         return send_request
 
@@ -205,7 +205,7 @@ class ReplicationEndpoint(object):
         args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args)
         pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args))
 
-        http_server.register_paths(method, [pattern], handler)
+        http_server.register_paths(method, [pattern], handler, self.__class__.__name__)
 
     def _cached_handler(self, request, txn_id, **kwargs):
         """Called on new incoming requests when caching is enabled. Checks
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index 61eafbe708..fed4f08820 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -80,7 +80,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
 
         payload = {"events": event_payloads, "backfilled": backfilled}
 
-        defer.returnValue(payload)
+        return payload
 
     @defer.inlineCallbacks
     def _handle_request(self, request):
@@ -113,7 +113,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
             event_and_contexts, backfilled
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
@@ -156,7 +156,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
 
         result = yield self.registry.on_edu(edu_type, origin, edu_content)
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class ReplicationGetQueryRestServlet(ReplicationEndpoint):
@@ -204,7 +204,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
 
         result = yield self.registry.on_query(query_type, args)
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
@@ -238,7 +238,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
     def _handle_request(self, request, room_id):
         yield self.store.clean_room_for_join(room_id)
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py
index 7c1197e5dd..f17d3a2da4 100644
--- a/synapse/replication/http/login.py
+++ b/synapse/replication/http/login.py
@@ -64,7 +64,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
             user_id, device_id, initial_display_name, is_guest
         )
 
-        defer.returnValue((200, {"device_id": device_id, "access_token": access_token}))
+        return (200, {"device_id": device_id, "access_token": access_token})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py
index 2d9cbbaefc..4217335d88 100644
--- a/synapse/replication/http/membership.py
+++ b/synapse/replication/http/membership.py
@@ -83,7 +83,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
             remote_room_hosts, room_id, user_id, event_content
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
@@ -153,7 +153,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
             yield self.store.locally_reject_invite(user_id, room_id)
             ret = {}
 
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py
index 2bf2173895..3341320a87 100644
--- a/synapse/replication/http/register.py
+++ b/synapse/replication/http/register.py
@@ -90,7 +90,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
             address=content["address"],
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
@@ -143,7 +143,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
             bind_msisdn=bind_msisdn,
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index 034763fe99..eff7bd7305 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -85,7 +85,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
             "extra_users": [u.to_string() for u in extra_users],
         }
 
-        defer.returnValue(payload)
+        return payload
 
     @defer.inlineCallbacks
     def _handle_request(self, request, event_id):
@@ -117,7 +117,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
             requester, event, context, ratelimit=ratelimit, extra_users=extra_users
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 7ef67a5a73..c10b85d2ff 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -158,7 +158,7 @@ class Stream(object):
         updates, current_token = yield self.get_updates_since(self.last_token)
         self.last_token = current_token
 
-        defer.returnValue((updates, current_token))
+        return (updates, current_token)
 
     @defer.inlineCallbacks
     def get_updates_since(self, from_token):
@@ -172,14 +172,14 @@ class Stream(object):
                 sent over the replication steam.
         """
         if from_token in ("NOW", "now"):
-            defer.returnValue(([], self.upto_token))
+            return ([], self.upto_token)
 
         current_token = self.upto_token
 
         from_token = int(from_token)
 
         if from_token == current_token:
-            defer.returnValue(([], current_token))
+            return ([], current_token)
 
         if self._LIMITED:
             rows = yield self.update_function(
@@ -198,7 +198,7 @@ class Stream(object):
         if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND:
             raise Exception("stream %s has fallen behind" % (self.NAME))
 
-        defer.returnValue((updates, current_token))
+        return (updates, current_token)
 
     def current_token(self):
         """Gets the current token of the underlying streams. Should be provided
@@ -297,7 +297,7 @@ class PushRulesStream(Stream):
     @defer.inlineCallbacks
     def update_function(self, from_token, to_token, limit):
         rows = yield self.store.get_all_push_rule_updates(from_token, to_token, limit)
-        defer.returnValue([(row[0], row[2]) for row in rows])
+        return [(row[0], row[2]) for row in rows]
 
 
 class PushersStream(Stream):
@@ -424,7 +424,7 @@ class AccountDataStream(Stream):
             for stream_id, user_id, account_data_type, content in global_results
         )
 
-        defer.returnValue(results)
+        return results
 
 
 class GroupServerStream(Stream):
diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py
index 3d0694bb11..d97669c886 100644
--- a/synapse/replication/tcp/streams/events.py
+++ b/synapse/replication/tcp/streams/events.py
@@ -134,7 +134,7 @@ class EventsStream(Stream):
 
         all_updates = heapq.merge(event_updates, state_updates)
 
-        defer.returnValue(all_updates)
+        return all_updates
 
     @classmethod
     def parse_row(cls, row):
diff --git a/synapse/res/templates/account_renewed.html b/synapse/res/templates/account_renewed.html
new file mode 100644
index 0000000000..894da030af
--- /dev/null
+++ b/synapse/res/templates/account_renewed.html
@@ -0,0 +1 @@
+<html><body>Your account has been successfully renewed.</body><html>
diff --git a/synapse/res/templates/invalid_token.html b/synapse/res/templates/invalid_token.html
new file mode 100644
index 0000000000..6bd2b98364
--- /dev/null
+++ b/synapse/res/templates/invalid_token.html
@@ -0,0 +1 @@
+<html><body>Invalid renewal token.</body><html>
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 6888ae5590..5720cab425 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -27,7 +27,7 @@ from twisted.internet import defer
 
 import synapse
 from synapse.api.constants import Membership, UserTypes
-from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
+from synapse.api.errors import Codes, NotFoundError, SynapseError
 from synapse.http.server import JsonResource
 from synapse.http.servlet import (
     RestServlet,
@@ -36,7 +36,12 @@ from synapse.http.servlet import (
     parse_json_object_from_request,
     parse_string,
 )
-from synapse.rest.admin._base import assert_requester_is_admin, assert_user_is_admin
+from synapse.rest.admin._base import (
+    assert_requester_is_admin,
+    assert_user_is_admin,
+    historical_admin_path_patterns,
+)
+from synapse.rest.admin.media import register_servlets_for_media_repo
 from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
 from synapse.types import UserID, create_requester
 from synapse.util.versionstring import get_version_string
@@ -44,28 +49,6 @@ from synapse.util.versionstring import get_version_string
 logger = logging.getLogger(__name__)
 
 
-def historical_admin_path_patterns(path_regex):
-    """Returns the list of patterns for an admin endpoint, including historical ones
-
-    This is a backwards-compatibility hack. Previously, the Admin API was exposed at
-    various paths under /_matrix/client. This function returns a list of patterns
-    matching those paths (as well as the new one), so that existing scripts which rely
-    on the endpoints being available there are not broken.
-
-    Note that this should only be used for existing endpoints: new ones should just
-    register for the /_synapse/admin path.
-    """
-    return list(
-        re.compile(prefix + path_regex)
-        for prefix in (
-            "^/_synapse/admin/v1",
-            "^/_matrix/client/api/v1/admin",
-            "^/_matrix/client/unstable/admin",
-            "^/_matrix/client/r0/admin",
-        )
-    )
-
-
 class UsersRestServlet(RestServlet):
     PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)")
 
@@ -84,7 +67,7 @@ class UsersRestServlet(RestServlet):
 
         ret = yield self.handlers.admin_handler.get_users()
 
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 class VersionServlet(RestServlet):
@@ -227,7 +210,7 @@ class UserRegisterServlet(RestServlet):
         )
 
         result = yield register._create_registration_details(user_id, body)
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class WhoisRestServlet(RestServlet):
@@ -252,26 +235,7 @@ class WhoisRestServlet(RestServlet):
 
         ret = yield self.handlers.admin_handler.get_whois(target_user)
 
-        defer.returnValue((200, ret))
-
-
-class PurgeMediaCacheRestServlet(RestServlet):
-    PATTERNS = historical_admin_path_patterns("/purge_media_cache")
-
-    def __init__(self, hs):
-        self.media_repository = hs.get_media_repository()
-        self.auth = hs.get_auth()
-
-    @defer.inlineCallbacks
-    def on_POST(self, request):
-        yield assert_requester_is_admin(self.auth, request)
-
-        before_ts = parse_integer(request, "before_ts", required=True)
-        logger.info("before_ts: %r", before_ts)
-
-        ret = yield self.media_repository.delete_old_remote_media(before_ts)
-
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 class PurgeHistoryRestServlet(RestServlet):
@@ -356,7 +320,7 @@ class PurgeHistoryRestServlet(RestServlet):
             room_id, token, delete_local_events=delete_local_events
         )
 
-        defer.returnValue((200, {"purge_id": purge_id}))
+        return (200, {"purge_id": purge_id})
 
 
 class PurgeHistoryStatusRestServlet(RestServlet):
@@ -381,7 +345,7 @@ class PurgeHistoryStatusRestServlet(RestServlet):
         if purge_status is None:
             raise NotFoundError("purge id '%s' not found" % purge_id)
 
-        defer.returnValue((200, purge_status.asdict()))
+        return (200, purge_status.asdict())
 
 
 class DeactivateAccountRestServlet(RestServlet):
@@ -413,7 +377,7 @@ class DeactivateAccountRestServlet(RestServlet):
         else:
             id_server_unbind_result = "no-support"
 
-        defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result}))
+        return (200, {"id_server_unbind_result": id_server_unbind_result})
 
 
 class ShutdownRoomRestServlet(RestServlet):
@@ -531,62 +495,16 @@ class ShutdownRoomRestServlet(RestServlet):
             room_id, new_room_id, requester_user_id
         )
 
-        defer.returnValue(
-            (
-                200,
-                {
-                    "kicked_users": kicked_users,
-                    "failed_to_kick_users": failed_to_kick_users,
-                    "local_aliases": aliases_for_room,
-                    "new_room_id": new_room_id,
-                },
-            )
-        )
-
-
-class QuarantineMediaInRoom(RestServlet):
-    """Quarantines all media in a room so that no one can download it via
-    this server.
-    """
-
-    PATTERNS = historical_admin_path_patterns("/quarantine_media/(?P<room_id>[^/]+)")
-
-    def __init__(self, hs):
-        self.store = hs.get_datastore()
-        self.auth = hs.get_auth()
-
-    @defer.inlineCallbacks
-    def on_POST(self, request, room_id):
-        requester = yield self.auth.get_user_by_req(request)
-        yield assert_user_is_admin(self.auth, requester.user)
-
-        num_quarantined = yield self.store.quarantine_media_ids_in_room(
-            room_id, requester.user.to_string()
+        return (
+            200,
+            {
+                "kicked_users": kicked_users,
+                "failed_to_kick_users": failed_to_kick_users,
+                "local_aliases": aliases_for_room,
+                "new_room_id": new_room_id,
+            },
         )
 
-        defer.returnValue((200, {"num_quarantined": num_quarantined}))
-
-
-class ListMediaInRoom(RestServlet):
-    """Lists all of the media in a given room.
-    """
-
-    PATTERNS = historical_admin_path_patterns("/room/(?P<room_id>[^/]+)/media")
-
-    def __init__(self, hs):
-        self.store = hs.get_datastore()
-
-    @defer.inlineCallbacks
-    def on_GET(self, request, room_id):
-        requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
-
-        local_mxcs, remote_mxcs = yield self.store.get_media_mxcs_in_room(room_id)
-
-        defer.returnValue((200, {"local": local_mxcs, "remote": remote_mxcs}))
-
 
 class ResetPasswordRestServlet(RestServlet):
     """Post request to allow an administrator reset password for a user.
@@ -629,7 +547,7 @@ class ResetPasswordRestServlet(RestServlet):
         yield self._set_password_handler.set_password(
             target_user_id, new_password, requester
         )
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class GetUsersPaginatedRestServlet(RestServlet):
@@ -671,7 +589,7 @@ class GetUsersPaginatedRestServlet(RestServlet):
         logger.info("limit: %s, start: %s", limit, start)
 
         ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit)
-        defer.returnValue((200, ret))
+        return (200, ret)
 
     @defer.inlineCallbacks
     def on_POST(self, request, target_user_id):
@@ -699,7 +617,7 @@ class GetUsersPaginatedRestServlet(RestServlet):
         logger.info("limit: %s, start: %s", limit, start)
 
         ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit)
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 class SearchUsersRestServlet(RestServlet):
@@ -742,7 +660,7 @@ class SearchUsersRestServlet(RestServlet):
         logger.info("term: %s ", term)
 
         ret = yield self.handlers.admin_handler.search_users(term)
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 class DeleteGroupAdminRestServlet(RestServlet):
@@ -765,7 +683,7 @@ class DeleteGroupAdminRestServlet(RestServlet):
             raise SynapseError(400, "Can only delete local groups")
 
         yield self.group_server.delete_group(group_id, requester.user.to_string())
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class AccountValidityRenewServlet(RestServlet):
@@ -796,7 +714,7 @@ class AccountValidityRenewServlet(RestServlet):
         )
 
         res = {"expiration_ts": expiration_ts}
-        defer.returnValue((200, res))
+        return (200, res)
 
 
 ########################################################################################
@@ -827,7 +745,6 @@ def register_servlets(hs, http_server):
 def register_servlets_for_client_rest_resource(hs, http_server):
     """Register only the servlets which need to be exposed on /_matrix/client/xxx"""
     WhoisRestServlet(hs).register(http_server)
-    PurgeMediaCacheRestServlet(hs).register(http_server)
     PurgeHistoryStatusRestServlet(hs).register(http_server)
     DeactivateAccountRestServlet(hs).register(http_server)
     PurgeHistoryRestServlet(hs).register(http_server)
@@ -836,10 +753,13 @@ def register_servlets_for_client_rest_resource(hs, http_server):
     GetUsersPaginatedRestServlet(hs).register(http_server)
     SearchUsersRestServlet(hs).register(http_server)
     ShutdownRoomRestServlet(hs).register(http_server)
-    QuarantineMediaInRoom(hs).register(http_server)
-    ListMediaInRoom(hs).register(http_server)
     UserRegisterServlet(hs).register(http_server)
     DeleteGroupAdminRestServlet(hs).register(http_server)
     AccountValidityRenewServlet(hs).register(http_server)
+
+    # Load the media repo ones if we're using them.
+    if hs.config.can_load_media_repo:
+        register_servlets_for_media_repo(hs, http_server)
+
     # don't add more things here: new servlets should only be exposed on
     # /_synapse/admin so should not go here. Instead register them in AdminRestResource.
diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py
index 881d67b89c..5a9b08d3ef 100644
--- a/synapse/rest/admin/_base.py
+++ b/synapse/rest/admin/_base.py
@@ -12,11 +12,36 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
+import re
+
 from twisted.internet import defer
 
 from synapse.api.errors import AuthError
 
 
+def historical_admin_path_patterns(path_regex):
+    """Returns the list of patterns for an admin endpoint, including historical ones
+
+    This is a backwards-compatibility hack. Previously, the Admin API was exposed at
+    various paths under /_matrix/client. This function returns a list of patterns
+    matching those paths (as well as the new one), so that existing scripts which rely
+    on the endpoints being available there are not broken.
+
+    Note that this should only be used for existing endpoints: new ones should just
+    register for the /_synapse/admin path.
+    """
+    return list(
+        re.compile(prefix + path_regex)
+        for prefix in (
+            "^/_synapse/admin/v1",
+            "^/_matrix/client/api/v1/admin",
+            "^/_matrix/client/unstable/admin",
+            "^/_matrix/client/r0/admin",
+        )
+    )
+
+
 @defer.inlineCallbacks
 def assert_requester_is_admin(auth, request):
     """Verify that the requester is an admin user
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
new file mode 100644
index 0000000000..824df919f2
--- /dev/null
+++ b/synapse/rest/admin/media.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018-2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.errors import AuthError
+from synapse.http.servlet import RestServlet, parse_integer
+from synapse.rest.admin._base import (
+    assert_requester_is_admin,
+    assert_user_is_admin,
+    historical_admin_path_patterns,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class QuarantineMediaInRoom(RestServlet):
+    """Quarantines all media in a room so that no one can download it via
+    this server.
+    """
+
+    PATTERNS = historical_admin_path_patterns("/quarantine_media/(?P<room_id>[^/]+)")
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.auth = hs.get_auth()
+
+    @defer.inlineCallbacks
+    def on_POST(self, request, room_id):
+        requester = yield self.auth.get_user_by_req(request)
+        yield assert_user_is_admin(self.auth, requester.user)
+
+        num_quarantined = yield self.store.quarantine_media_ids_in_room(
+            room_id, requester.user.to_string()
+        )
+
+        return (200, {"num_quarantined": num_quarantined})
+
+
+class ListMediaInRoom(RestServlet):
+    """Lists all of the media in a given room.
+    """
+
+    PATTERNS = historical_admin_path_patterns("/room/(?P<room_id>[^/]+)/media")
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def on_GET(self, request, room_id):
+        requester = yield self.auth.get_user_by_req(request)
+        is_admin = yield self.auth.is_server_admin(requester.user)
+        if not is_admin:
+            raise AuthError(403, "You are not a server admin")
+
+        local_mxcs, remote_mxcs = yield self.store.get_media_mxcs_in_room(room_id)
+
+        return (200, {"local": local_mxcs, "remote": remote_mxcs})
+
+
+class PurgeMediaCacheRestServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns("/purge_media_cache")
+
+    def __init__(self, hs):
+        self.media_repository = hs.get_media_repository()
+        self.auth = hs.get_auth()
+
+    @defer.inlineCallbacks
+    def on_POST(self, request):
+        yield assert_requester_is_admin(self.auth, request)
+
+        before_ts = parse_integer(request, "before_ts", required=True)
+        logger.info("before_ts: %r", before_ts)
+
+        ret = yield self.media_repository.delete_old_remote_media(before_ts)
+
+        return (200, ret)
+
+
+def register_servlets_for_media_repo(hs, http_server):
+    """
+    Media repo specific APIs.
+    """
+    PurgeMediaCacheRestServlet(hs).register(http_server)
+    QuarantineMediaInRoom(hs).register(http_server)
+    ListMediaInRoom(hs).register(http_server)
diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py
index ee66838a0d..656526fea5 100644
--- a/synapse/rest/admin/server_notice_servlet.py
+++ b/synapse/rest/admin/server_notice_servlet.py
@@ -59,9 +59,14 @@ class SendServerNoticeServlet(RestServlet):
 
     def register(self, json_resource):
         PATTERN = "^/_synapse/admin/v1/send_server_notice"
-        json_resource.register_paths("POST", (re.compile(PATTERN + "$"),), self.on_POST)
         json_resource.register_paths(
-            "PUT", (re.compile(PATTERN + "/(?P<txn_id>[^/]*)$"),), self.on_PUT
+            "POST", (re.compile(PATTERN + "$"),), self.on_POST, self.__class__.__name__
+        )
+        json_resource.register_paths(
+            "PUT",
+            (re.compile(PATTERN + "/(?P<txn_id>[^/]*)$"),),
+            self.on_PUT,
+            self.__class__.__name__,
         )
 
     @defer.inlineCallbacks
@@ -87,7 +92,7 @@ class SendServerNoticeServlet(RestServlet):
             event_content=body["content"],
         )
 
-        defer.returnValue((200, {"event_id": event.event_id}))
+        return (200, {"event_id": event.event_id})
 
     def on_PUT(self, request, txn_id):
         return self.txns.fetch_or_execute_request(
diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py
index 57542c2b4b..4284738021 100644
--- a/synapse/rest/client/v1/directory.py
+++ b/synapse/rest/client/v1/directory.py
@@ -54,7 +54,7 @@ class ClientDirectoryServer(RestServlet):
         dir_handler = self.handlers.directory_handler
         res = yield dir_handler.get_association(room_alias)
 
-        defer.returnValue((200, res))
+        return (200, res)
 
     @defer.inlineCallbacks
     def on_PUT(self, request, room_alias):
@@ -87,7 +87,7 @@ class ClientDirectoryServer(RestServlet):
             requester, room_alias, room_id, servers
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     @defer.inlineCallbacks
     def on_DELETE(self, request, room_alias):
@@ -102,7 +102,7 @@ class ClientDirectoryServer(RestServlet):
                 service.url,
                 room_alias.to_string(),
             )
-            defer.returnValue((200, {}))
+            return (200, {})
         except InvalidClientCredentialsError:
             # fallback to default user behaviour if they aren't an AS
             pass
@@ -118,7 +118,7 @@ class ClientDirectoryServer(RestServlet):
             "User %s deleted alias %s", user.to_string(), room_alias.to_string()
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class ClientDirectoryListServer(RestServlet):
@@ -136,9 +136,7 @@ class ClientDirectoryListServer(RestServlet):
         if room is None:
             raise NotFoundError("Unknown room")
 
-        defer.returnValue(
-            (200, {"visibility": "public" if room["is_public"] else "private"})
-        )
+        return (200, {"visibility": "public" if room["is_public"] else "private"})
 
     @defer.inlineCallbacks
     def on_PUT(self, request, room_id):
@@ -151,7 +149,7 @@ class ClientDirectoryListServer(RestServlet):
             requester, room_id, visibility
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     @defer.inlineCallbacks
     def on_DELETE(self, request, room_id):
@@ -161,7 +159,7 @@ class ClientDirectoryListServer(RestServlet):
             requester, room_id, "private"
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class ClientAppserviceDirectoryListServer(RestServlet):
@@ -195,4 +193,4 @@ class ClientAppserviceDirectoryListServer(RestServlet):
             requester.app_service.id, network_id, room_id, visibility
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index d6de2b7360..53ebed2203 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -67,7 +67,7 @@ class EventStreamRestServlet(RestServlet):
             is_guest=is_guest,
         )
 
-        defer.returnValue((200, chunk))
+        return (200, chunk)
 
     def on_OPTIONS(self, request):
         return (200, {})
@@ -91,9 +91,9 @@ class EventRestServlet(RestServlet):
         time_now = self.clock.time_msec()
         if event:
             event = yield self._event_serializer.serialize_event(event, time_now)
-            defer.returnValue((200, event))
+            return (200, event)
         else:
-            defer.returnValue((404, "Event not found."))
+            return (404, "Event not found.")
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py
index 0fe5f2d79b..70b8478e90 100644
--- a/synapse/rest/client/v1/initial_sync.py
+++ b/synapse/rest/client/v1/initial_sync.py
@@ -42,7 +42,7 @@ class InitialSyncRestServlet(RestServlet):
             include_archived=include_archived,
         )
 
-        defer.returnValue((200, content))
+        return (200, content)
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 0d05945f0a..5762b9fd06 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -152,7 +152,7 @@ class LoginRestServlet(RestServlet):
         well_known_data = self._well_known_builder.get_well_known()
         if well_known_data:
             result["well_known"] = well_known_data
-        defer.returnValue((200, result))
+        return (200, result)
 
     @defer.inlineCallbacks
     def _do_other_login(self, login_submission):
@@ -212,7 +212,7 @@ class LoginRestServlet(RestServlet):
                 result = yield self._register_device_with_callback(
                     canonical_user_id, login_submission, callback_3pid
                 )
-                defer.returnValue(result)
+                return result
 
             # No password providers were able to handle this 3pid
             # Check local store
@@ -241,7 +241,7 @@ class LoginRestServlet(RestServlet):
         result = yield self._register_device_with_callback(
             canonical_user_id, login_submission, callback
         )
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _register_device_with_callback(self, user_id, login_submission, callback=None):
@@ -273,7 +273,7 @@ class LoginRestServlet(RestServlet):
         if callback is not None:
             yield callback(result)
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def do_token_login(self, login_submission):
@@ -284,7 +284,7 @@ class LoginRestServlet(RestServlet):
         )
 
         result = yield self._register_device_with_callback(user_id, login_submission)
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def do_jwt_login(self, login_submission):
@@ -321,7 +321,7 @@ class LoginRestServlet(RestServlet):
         result = yield self._register_device_with_callback(
             registered_user_id, login_submission
         )
-        defer.returnValue(result)
+        return result
 
 
 class BaseSSORedirectServlet(RestServlet):
@@ -395,7 +395,7 @@ class CasTicketServlet(RestServlet):
             # even if that's being used old-http style to signal end-of-data
             body = pde.response
         result = yield self.handle_cas_response(request, body, client_redirect_url)
-        defer.returnValue(result)
+        return result
 
     def handle_cas_response(self, request, cas_response_body, client_redirect_url):
         user, attributes = self.parse_cas_response(cas_response_body)
diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py
index cd711be519..2769f3a189 100644
--- a/synapse/rest/client/v1/logout.py
+++ b/synapse/rest/client/v1/logout.py
@@ -49,7 +49,7 @@ class LogoutRestServlet(RestServlet):
                 requester.user.to_string(), requester.device_id
             )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class LogoutAllRestServlet(RestServlet):
@@ -75,7 +75,7 @@ class LogoutAllRestServlet(RestServlet):
         # .. and then delete any access tokens which weren't associated with
         # devices.
         yield self._auth_handler.delete_access_tokens_for_user(user_id)
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py
index 3e87f0fdb3..1eb1068c98 100644
--- a/synapse/rest/client/v1/presence.py
+++ b/synapse/rest/client/v1/presence.py
@@ -56,7 +56,7 @@ class PresenceStatusRestServlet(RestServlet):
         state = yield self.presence_handler.get_state(target_user=user)
         state = format_user_presence_state(state, self.clock.time_msec())
 
-        defer.returnValue((200, state))
+        return (200, state)
 
     @defer.inlineCallbacks
     def on_PUT(self, request, user_id):
@@ -88,7 +88,7 @@ class PresenceStatusRestServlet(RestServlet):
         if self.hs.config.use_presence:
             yield self.presence_handler.set_state(user, state)
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     def on_OPTIONS(self, request):
         return (200, {})
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
index 4d8ab1f47e..2657ae45bb 100644
--- a/synapse/rest/client/v1/profile.py
+++ b/synapse/rest/client/v1/profile.py
@@ -48,7 +48,7 @@ class ProfileDisplaynameRestServlet(RestServlet):
         if displayname is not None:
             ret["displayname"] = displayname
 
-        defer.returnValue((200, ret))
+        return (200, ret)
 
     @defer.inlineCallbacks
     def on_PUT(self, request, user_id):
@@ -61,11 +61,11 @@ class ProfileDisplaynameRestServlet(RestServlet):
         try:
             new_name = content["displayname"]
         except Exception:
-            defer.returnValue((400, "Unable to parse name"))
+            return (400, "Unable to parse name")
 
         yield self.profile_handler.set_displayname(user, requester, new_name, is_admin)
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     def on_OPTIONS(self, request, user_id):
         return (200, {})
@@ -98,7 +98,7 @@ class ProfileAvatarURLRestServlet(RestServlet):
         if avatar_url is not None:
             ret["avatar_url"] = avatar_url
 
-        defer.returnValue((200, ret))
+        return (200, ret)
 
     @defer.inlineCallbacks
     def on_PUT(self, request, user_id):
@@ -110,11 +110,11 @@ class ProfileAvatarURLRestServlet(RestServlet):
         try:
             new_name = content["avatar_url"]
         except Exception:
-            defer.returnValue((400, "Unable to parse name"))
+            return (400, "Unable to parse name")
 
         yield self.profile_handler.set_avatar_url(user, requester, new_name, is_admin)
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     def on_OPTIONS(self, request, user_id):
         return (200, {})
@@ -150,7 +150,7 @@ class ProfileRestServlet(RestServlet):
         if avatar_url is not None:
             ret["avatar_url"] = avatar_url
 
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index e635efb420..c3ae8b98a8 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/v1/push_rule.py
@@ -69,7 +69,7 @@ class PushRuleRestServlet(RestServlet):
         if "attr" in spec:
             yield self.set_rule_attr(user_id, spec, content)
             self.notify_user(user_id)
-            defer.returnValue((200, {}))
+            return (200, {})
 
         if spec["rule_id"].startswith("."):
             # Rule ids starting with '.' are reserved for server default rules.
@@ -106,7 +106,7 @@ class PushRuleRestServlet(RestServlet):
         except RuleNotFoundException as e:
             raise SynapseError(400, str(e))
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     @defer.inlineCallbacks
     def on_DELETE(self, request, path):
@@ -123,7 +123,7 @@ class PushRuleRestServlet(RestServlet):
         try:
             yield self.store.delete_push_rule(user_id, namespaced_rule_id)
             self.notify_user(user_id)
-            defer.returnValue((200, {}))
+            return (200, {})
         except StoreError as e:
             if e.code == 404:
                 raise NotFoundError()
@@ -151,10 +151,10 @@ class PushRuleRestServlet(RestServlet):
             )
 
         if path[0] == "":
-            defer.returnValue((200, rules))
+            return (200, rules)
         elif path[0] == "global":
             result = _filter_ruleset_with_path(rules["global"], path[1:])
-            defer.returnValue((200, result))
+            return (200, result)
         else:
             raise UnrecognizedRequestError()
 
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index e9246018df..ebc3dec516 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -62,7 +62,7 @@ class PushersRestServlet(RestServlet):
                 if k not in allowed_keys:
                     del p[k]
 
-        defer.returnValue((200, {"pushers": pushers}))
+        return (200, {"pushers": pushers})
 
     def on_OPTIONS(self, _):
         return 200, {}
@@ -94,7 +94,7 @@ class PushersSetRestServlet(RestServlet):
             yield self.pusher_pool.remove_pusher(
                 content["app_id"], content["pushkey"], user_id=user.to_string()
             )
-            defer.returnValue((200, {}))
+            return (200, {})
 
         assert_params_in_dict(
             content,
@@ -143,7 +143,7 @@ class PushersSetRestServlet(RestServlet):
 
         self.notifier.on_new_replication_data()
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     def on_OPTIONS(self, _):
         return 200, {}
@@ -190,7 +190,7 @@ class PushersRemoveRestServlet(RestServlet):
         )
         request.write(PushersRemoveRestServlet.SUCCESS_HTML)
         finish_request(request)
-        defer.returnValue(None)
+        return None
 
     def on_OPTIONS(self, _):
         return 200, {}
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 7709c2d705..4b2344e696 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -67,11 +67,17 @@ class RoomCreateRestServlet(TransactionRestServlet):
         register_txn_path(self, PATTERNS, http_server)
         # define CORS for all of /rooms in RoomCreateRestServlet for simplicity
         http_server.register_paths(
-            "OPTIONS", client_patterns("/rooms(?:/.*)?$", v1=True), self.on_OPTIONS
+            "OPTIONS",
+            client_patterns("/rooms(?:/.*)?$", v1=True),
+            self.on_OPTIONS,
+            self.__class__.__name__,
         )
         # define CORS for /createRoom[/txnid]
         http_server.register_paths(
-            "OPTIONS", client_patterns("/createRoom(?:/.*)?$", v1=True), self.on_OPTIONS
+            "OPTIONS",
+            client_patterns("/createRoom(?:/.*)?$", v1=True),
+            self.on_OPTIONS,
+            self.__class__.__name__,
         )
 
     def on_PUT(self, request, txn_id):
@@ -85,7 +91,7 @@ class RoomCreateRestServlet(TransactionRestServlet):
             requester, self.get_room_config(request)
         )
 
-        defer.returnValue((200, info))
+        return (200, info)
 
     def get_room_config(self, request):
         user_supplied_config = parse_json_object_from_request(request)
@@ -116,16 +122,28 @@ class RoomStateEventRestServlet(TransactionRestServlet):
         )
 
         http_server.register_paths(
-            "GET", client_patterns(state_key, v1=True), self.on_GET
+            "GET",
+            client_patterns(state_key, v1=True),
+            self.on_GET,
+            self.__class__.__name__,
         )
         http_server.register_paths(
-            "PUT", client_patterns(state_key, v1=True), self.on_PUT
+            "PUT",
+            client_patterns(state_key, v1=True),
+            self.on_PUT,
+            self.__class__.__name__,
         )
         http_server.register_paths(
-            "GET", client_patterns(no_state_key, v1=True), self.on_GET_no_state_key
+            "GET",
+            client_patterns(no_state_key, v1=True),
+            self.on_GET_no_state_key,
+            self.__class__.__name__,
         )
         http_server.register_paths(
-            "PUT", client_patterns(no_state_key, v1=True), self.on_PUT_no_state_key
+            "PUT",
+            client_patterns(no_state_key, v1=True),
+            self.on_PUT_no_state_key,
+            self.__class__.__name__,
         )
 
     def on_GET_no_state_key(self, request, room_id, event_type):
@@ -155,9 +173,9 @@ class RoomStateEventRestServlet(TransactionRestServlet):
 
         if format == "event":
             event = format_event_for_client_v2(data.get_dict())
-            defer.returnValue((200, event))
+            return (200, event)
         elif format == "content":
-            defer.returnValue((200, data.get_dict()["content"]))
+            return (200, data.get_dict()["content"])
 
     @defer.inlineCallbacks
     def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
@@ -192,7 +210,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
         ret = {}
         if event:
             ret = {"event_id": event.event_id}
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 # TODO: Needs unit testing for generic events + feedback
@@ -226,7 +244,7 @@ class RoomSendEventRestServlet(TransactionRestServlet):
             requester, event_dict, txn_id=txn_id
         )
 
-        defer.returnValue((200, {"event_id": event.event_id}))
+        return (200, {"event_id": event.event_id})
 
     def on_GET(self, request, room_id, event_type, txn_id):
         return (200, "Not implemented")
@@ -289,7 +307,7 @@ class JoinRoomAliasServlet(TransactionRestServlet):
             third_party_signed=content.get("third_party_signed", None),
         )
 
-        defer.returnValue((200, {"room_id": room_id}))
+        return (200, {"room_id": room_id})
 
     def on_PUT(self, request, room_identifier, txn_id):
         return self.txns.fetch_or_execute_request(
@@ -342,7 +360,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
                 limit=limit, since_token=since_token
             )
 
-        defer.returnValue((200, data))
+        return (200, data)
 
     @defer.inlineCallbacks
     def on_POST(self, request):
@@ -387,7 +405,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
                 network_tuple=network_tuple,
             )
 
-        defer.returnValue((200, data))
+        return (200, data)
 
 
 # TODO: Needs unit testing
@@ -438,7 +456,7 @@ class RoomMemberListRestServlet(RestServlet):
                 continue
             chunk.append(event)
 
-        defer.returnValue((200, {"chunk": chunk}))
+        return (200, {"chunk": chunk})
 
 
 # deprecated in favour of /members?membership=join?
@@ -459,7 +477,7 @@ class JoinedRoomMemberListRestServlet(RestServlet):
             requester, room_id
         )
 
-        defer.returnValue((200, {"joined": users_with_profile}))
+        return (200, {"joined": users_with_profile})
 
 
 # TODO: Needs better unit testing
@@ -492,7 +510,7 @@ class RoomMessageListRestServlet(RestServlet):
             event_filter=event_filter,
         )
 
-        defer.returnValue((200, msgs))
+        return (200, msgs)
 
 
 # TODO: Needs unit testing
@@ -513,7 +531,7 @@ class RoomStateRestServlet(RestServlet):
             user_id=requester.user.to_string(),
             is_guest=requester.is_guest,
         )
-        defer.returnValue((200, events))
+        return (200, events)
 
 
 # TODO: Needs unit testing
@@ -532,7 +550,7 @@ class RoomInitialSyncRestServlet(RestServlet):
         content = yield self.initial_sync_handler.room_initial_sync(
             room_id=room_id, requester=requester, pagin_config=pagination_config
         )
-        defer.returnValue((200, content))
+        return (200, content)
 
 
 class RoomEventServlet(RestServlet):
@@ -550,14 +568,22 @@ class RoomEventServlet(RestServlet):
     @defer.inlineCallbacks
     def on_GET(self, request, room_id, event_id):
         requester = yield self.auth.get_user_by_req(request, allow_guest=True)
-        event = yield self.event_handler.get_event(requester.user, room_id, event_id)
+        try:
+            event = yield self.event_handler.get_event(
+                requester.user, room_id, event_id
+            )
+        except AuthError:
+            # This endpoint is supposed to return a 404 when the requester does
+            # not have permission to access the event
+            # https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-rooms-roomid-event-eventid
+            raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
 
         time_now = self.clock.time_msec()
         if event:
             event = yield self._event_serializer.serialize_event(event, time_now)
-            defer.returnValue((200, event))
-        else:
-            defer.returnValue((404, "Event not found."))
+            return (200, event)
+
+        return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
 
 
 class RoomEventContextServlet(RestServlet):
@@ -607,7 +633,7 @@ class RoomEventContextServlet(RestServlet):
             results["state"], time_now
         )
 
-        defer.returnValue((200, results))
+        return (200, results)
 
 
 class RoomForgetRestServlet(TransactionRestServlet):
@@ -626,7 +652,7 @@ class RoomForgetRestServlet(TransactionRestServlet):
 
         yield self.room_member_handler.forget(user=requester.user, room_id=room_id)
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     def on_PUT(self, request, room_id, txn_id):
         return self.txns.fetch_or_execute_request(
@@ -676,7 +702,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
                 requester,
                 txn_id,
             )
-            defer.returnValue((200, {}))
+            return (200, {})
             return
 
         target = requester.user
@@ -703,7 +729,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
         if membership_action == "join":
             return_value["room_id"] = room_id
 
-        defer.returnValue((200, return_value))
+        return (200, return_value)
 
     def _has_3pid_invite_keys(self, content):
         for key in {"id_server", "medium", "address"}:
@@ -745,7 +771,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
             txn_id=txn_id,
         )
 
-        defer.returnValue((200, {"event_id": event.event_id}))
+        return (200, {"event_id": event.event_id})
 
     def on_PUT(self, request, room_id, event_id, txn_id):
         return self.txns.fetch_or_execute_request(
@@ -790,7 +816,7 @@ class RoomTypingRestServlet(RestServlet):
                 target_user=target_user, auth_user=requester.user, room_id=room_id
             )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class SearchRestServlet(RestServlet):
@@ -812,7 +838,7 @@ class SearchRestServlet(RestServlet):
             requester.user, content, batch
         )
 
-        defer.returnValue((200, results))
+        return (200, results)
 
 
 class JoinedRoomsRestServlet(RestServlet):
@@ -828,7 +854,7 @@ class JoinedRoomsRestServlet(RestServlet):
         requester = yield self.auth.get_user_by_req(request, allow_guest=True)
 
         room_ids = yield self.store.get_rooms_for_user(requester.user.to_string())
-        defer.returnValue((200, {"joined_rooms": list(room_ids)}))
+        return (200, {"joined_rooms": list(room_ids)})
 
 
 def register_txn_path(servlet, regex_string, http_server, with_get=False):
@@ -845,18 +871,23 @@ def register_txn_path(servlet, regex_string, http_server, with_get=False):
         with_get: True to also register respective GET paths for the PUTs.
     """
     http_server.register_paths(
-        "POST", client_patterns(regex_string + "$", v1=True), servlet.on_POST
+        "POST",
+        client_patterns(regex_string + "$", v1=True),
+        servlet.on_POST,
+        servlet.__class__.__name__,
     )
     http_server.register_paths(
         "PUT",
         client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
         servlet.on_PUT,
+        servlet.__class__.__name__,
     )
     if with_get:
         http_server.register_paths(
             "GET",
             client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
             servlet.on_GET,
+            servlet.__class__.__name__,
         )
 
 
diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py
index 41b3171ac8..497cddf8b8 100644
--- a/synapse/rest/client/v1/voip.py
+++ b/synapse/rest/client/v1/voip.py
@@ -60,18 +60,16 @@ class VoipRestServlet(RestServlet):
             password = turnPassword
 
         else:
-            defer.returnValue((200, {}))
-
-        defer.returnValue(
-            (
-                200,
-                {
-                    "username": username,
-                    "password": password,
-                    "ttl": userLifetime / 1000,
-                    "uris": turnUris,
-                },
-            )
+            return (200, {})
+
+        return (
+            200,
+            {
+                "username": username,
+                "password": password,
+                "ttl": userLifetime / 1000,
+                "uris": turnUris,
+            },
         )
 
     def on_OPTIONS(self, request):
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index f143d8b85c..7ac456812a 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -117,7 +117,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
             # Wrap the session id in a JSON object
             ret = {"sid": sid}
 
-        defer.returnValue((200, ret))
+        return (200, ret)
 
     @defer.inlineCallbacks
     def send_password_reset(self, email, client_secret, send_attempt, next_link=None):
@@ -149,7 +149,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
             # Check that the send_attempt is higher than previous attempts
             if send_attempt <= last_send_attempt:
                 # If not, just return a success without sending an email
-                defer.returnValue(session_id)
+                return session_id
         else:
             # An non-validated session does not exist yet.
             # Generate a session id
@@ -185,7 +185,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
             token_expires,
         )
 
-        defer.returnValue(session_id)
+        return session_id
 
 
 class MsisdnPasswordRequestTokenRestServlet(RestServlet):
@@ -221,7 +221,7 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
             raise SynapseError(400, "MSISDN not found", Codes.THREEPID_NOT_FOUND)
 
         ret = yield self.identity_handler.requestMsisdnToken(**body)
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 class PasswordResetSubmitTokenServlet(RestServlet):
@@ -279,7 +279,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
                     request.setResponseCode(302)
                     request.setHeader("Location", next_link)
                     finish_request(request)
-                    defer.returnValue(None)
+                    return None
 
             # Otherwise show the success template
             html = self.config.email_password_reset_success_html_content
@@ -295,7 +295,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
 
         request.write(html.encode("utf-8"))
         finish_request(request)
-        defer.returnValue(None)
+        return None
 
     def load_jinja2_template(self, template_dir, template_filename, template_vars):
         """Loads a jinja2 template with variables to insert
@@ -330,7 +330,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
         )
         response_code = 200 if valid else 400
 
-        defer.returnValue((response_code, {"success": valid}))
+        return (response_code, {"success": valid})
 
 
 class PasswordRestServlet(RestServlet):
@@ -399,7 +399,7 @@ class PasswordRestServlet(RestServlet):
 
         yield self._set_password_handler.set_password(user_id, new_password, requester)
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     def on_OPTIONS(self, _):
         return 200, {}
@@ -434,7 +434,7 @@ class DeactivateAccountRestServlet(RestServlet):
             yield self._deactivate_account_handler.deactivate_account(
                 requester.user.to_string(), erase
             )
-            defer.returnValue((200, {}))
+            return (200, {})
 
         yield self.auth_handler.validate_user_via_ui_auth(
             requester, body, self.hs.get_ip_from_request(request)
@@ -447,7 +447,7 @@ class DeactivateAccountRestServlet(RestServlet):
         else:
             id_server_unbind_result = "no-support"
 
-        defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result}))
+        return (200, {"id_server_unbind_result": id_server_unbind_result})
 
 
 class EmailThreepidRequestTokenRestServlet(RestServlet):
@@ -481,7 +481,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
             raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
 
         ret = yield self.identity_handler.requestEmailToken(**body)
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 class MsisdnThreepidRequestTokenRestServlet(RestServlet):
@@ -516,7 +516,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
             raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE)
 
         ret = yield self.identity_handler.requestMsisdnToken(**body)
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 class ThreepidRestServlet(RestServlet):
@@ -536,7 +536,7 @@ class ThreepidRestServlet(RestServlet):
 
         threepids = yield self.datastore.user_get_threepids(requester.user.to_string())
 
-        defer.returnValue((200, {"threepids": threepids}))
+        return (200, {"threepids": threepids})
 
     @defer.inlineCallbacks
     def on_POST(self, request):
@@ -568,7 +568,7 @@ class ThreepidRestServlet(RestServlet):
             logger.debug("Binding threepid %s to %s", threepid, user_id)
             yield self.identity_handler.bind_threepid(threePidCreds, user_id)
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class ThreepidDeleteRestServlet(RestServlet):
@@ -603,7 +603,7 @@ class ThreepidDeleteRestServlet(RestServlet):
         else:
             id_server_unbind_result = "no-support"
 
-        defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result}))
+        return (200, {"id_server_unbind_result": id_server_unbind_result})
 
 
 class WhoamiRestServlet(RestServlet):
@@ -617,7 +617,7 @@ class WhoamiRestServlet(RestServlet):
     def on_GET(self, request):
         requester = yield self.auth.get_user_by_req(request)
 
-        defer.returnValue((200, {"user_id": requester.user.to_string()}))
+        return (200, {"user_id": requester.user.to_string()})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py
index f155c26259..98f2f6f4b5 100644
--- a/synapse/rest/client/v2_alpha/account_data.py
+++ b/synapse/rest/client/v2_alpha/account_data.py
@@ -55,7 +55,7 @@ class AccountDataServlet(RestServlet):
 
         self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     @defer.inlineCallbacks
     def on_GET(self, request, user_id, account_data_type):
@@ -70,7 +70,7 @@ class AccountDataServlet(RestServlet):
         if event is None:
             raise NotFoundError("Account data not found")
 
-        defer.returnValue((200, event))
+        return (200, event)
 
 
 class RoomAccountDataServlet(RestServlet):
@@ -112,7 +112,7 @@ class RoomAccountDataServlet(RestServlet):
 
         self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     @defer.inlineCallbacks
     def on_GET(self, request, user_id, room_id, account_data_type):
@@ -127,7 +127,7 @@ class RoomAccountDataServlet(RestServlet):
         if event is None:
             raise NotFoundError("Room account data not found")
 
-        defer.returnValue((200, event))
+        return (200, event)
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py
index d29c10b83d..33f6a23028 100644
--- a/synapse/rest/client/v2_alpha/account_validity.py
+++ b/synapse/rest/client/v2_alpha/account_validity.py
@@ -42,6 +42,8 @@ class AccountValidityRenewServlet(RestServlet):
         self.hs = hs
         self.account_activity_handler = hs.get_account_validity_handler()
         self.auth = hs.get_auth()
+        self.success_html = hs.config.account_validity.account_renewed_html_content
+        self.failure_html = hs.config.account_validity.invalid_token_html_content
 
     @defer.inlineCallbacks
     def on_GET(self, request):
@@ -49,14 +51,21 @@ class AccountValidityRenewServlet(RestServlet):
             raise SynapseError(400, "Missing renewal token")
         renewal_token = request.args[b"token"][0]
 
-        yield self.account_activity_handler.renew_account(renewal_token.decode("utf8"))
+        token_valid = yield self.account_activity_handler.renew_account(
+            renewal_token.decode("utf8")
+        )
+
+        if token_valid:
+            status_code = 200
+            response = self.success_html
+        else:
+            status_code = 404
+            response = self.failure_html
 
-        request.setResponseCode(200)
+        request.setResponseCode(status_code)
         request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
-        request.setHeader(
-            b"Content-Length", b"%d" % (len(AccountValidityRenewServlet.SUCCESS_HTML),)
-        )
-        request.write(AccountValidityRenewServlet.SUCCESS_HTML)
+        request.setHeader(b"Content-Length", b"%d" % (len(response),))
+        request.write(response.encode("utf8"))
         finish_request(request)
         defer.returnValue(None)
 
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
index bebc2951e7..f21aff39e5 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -207,7 +207,7 @@ class AuthRestServlet(RestServlet):
             request.write(html_bytes)
             finish_request(request)
 
-            defer.returnValue(None)
+            return None
         elif stagetype == LoginType.TERMS:
             if ("session" not in request.args or len(request.args["session"])) == 0:
                 raise SynapseError(400, "No session supplied")
@@ -239,7 +239,7 @@ class AuthRestServlet(RestServlet):
 
             request.write(html_bytes)
             finish_request(request)
-            defer.returnValue(None)
+            return None
         else:
             raise SynapseError(404, "Unknown auth stage type")
 
diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py
index fc7e2f4dd5..a4fa45fe11 100644
--- a/synapse/rest/client/v2_alpha/capabilities.py
+++ b/synapse/rest/client/v2_alpha/capabilities.py
@@ -58,7 +58,7 @@ class CapabilitiesRestServlet(RestServlet):
                 "m.change_password": {"enabled": change_password},
             }
         }
-        defer.returnValue((200, response))
+        return (200, response)
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py
index d279229d74..9adf76cc0c 100644
--- a/synapse/rest/client/v2_alpha/devices.py
+++ b/synapse/rest/client/v2_alpha/devices.py
@@ -48,7 +48,7 @@ class DevicesRestServlet(RestServlet):
         devices = yield self.device_handler.get_devices_by_user(
             requester.user.to_string()
         )
-        defer.returnValue((200, {"devices": devices}))
+        return (200, {"devices": devices})
 
 
 class DeleteDevicesRestServlet(RestServlet):
@@ -91,7 +91,7 @@ class DeleteDevicesRestServlet(RestServlet):
         yield self.device_handler.delete_devices(
             requester.user.to_string(), body["devices"]
         )
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class DeviceRestServlet(RestServlet):
@@ -114,7 +114,7 @@ class DeviceRestServlet(RestServlet):
         device = yield self.device_handler.get_device(
             requester.user.to_string(), device_id
         )
-        defer.returnValue((200, device))
+        return (200, device)
 
     @interactive_auth_handler
     @defer.inlineCallbacks
@@ -137,7 +137,7 @@ class DeviceRestServlet(RestServlet):
         )
 
         yield self.device_handler.delete_device(requester.user.to_string(), device_id)
-        defer.returnValue((200, {}))
+        return (200, {})
 
     @defer.inlineCallbacks
     def on_PUT(self, request, device_id):
@@ -147,7 +147,7 @@ class DeviceRestServlet(RestServlet):
         yield self.device_handler.update_device(
             requester.user.to_string(), device_id, body
         )
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py
index 3f0adf4a21..22be0ee3c5 100644
--- a/synapse/rest/client/v2_alpha/filter.py
+++ b/synapse/rest/client/v2_alpha/filter.py
@@ -56,7 +56,7 @@ class GetFilterRestServlet(RestServlet):
                 user_localpart=target_user.localpart, filter_id=filter_id
             )
 
-            defer.returnValue((200, filter.get_filter_json()))
+            return (200, filter.get_filter_json())
         except (KeyError, StoreError):
             raise SynapseError(400, "No such filter", errcode=Codes.NOT_FOUND)
 
@@ -89,7 +89,7 @@ class CreateFilterRestServlet(RestServlet):
             user_localpart=target_user.localpart, user_filter=content
         )
 
-        defer.returnValue((200, {"filter_id": str(filter_id)}))
+        return (200, {"filter_id": str(filter_id)})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py
index a312dd2593..e629c4256d 100644
--- a/synapse/rest/client/v2_alpha/groups.py
+++ b/synapse/rest/client/v2_alpha/groups.py
@@ -47,7 +47,7 @@ class GroupServlet(RestServlet):
             group_id, requester_user_id
         )
 
-        defer.returnValue((200, group_description))
+        return (200, group_description)
 
     @defer.inlineCallbacks
     def on_POST(self, request, group_id):
@@ -59,7 +59,7 @@ class GroupServlet(RestServlet):
             group_id, requester_user_id, content
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class GroupSummaryServlet(RestServlet):
@@ -83,7 +83,7 @@ class GroupSummaryServlet(RestServlet):
             group_id, requester_user_id
         )
 
-        defer.returnValue((200, get_group_summary))
+        return (200, get_group_summary)
 
 
 class GroupSummaryRoomsCatServlet(RestServlet):
@@ -120,7 +120,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
             content=content,
         )
 
-        defer.returnValue((200, resp))
+        return (200, resp)
 
     @defer.inlineCallbacks
     def on_DELETE(self, request, group_id, category_id, room_id):
@@ -131,7 +131,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
             group_id, requester_user_id, room_id=room_id, category_id=category_id
         )
 
-        defer.returnValue((200, resp))
+        return (200, resp)
 
 
 class GroupCategoryServlet(RestServlet):
@@ -157,7 +157,7 @@ class GroupCategoryServlet(RestServlet):
             group_id, requester_user_id, category_id=category_id
         )
 
-        defer.returnValue((200, category))
+        return (200, category)
 
     @defer.inlineCallbacks
     def on_PUT(self, request, group_id, category_id):
@@ -169,7 +169,7 @@ class GroupCategoryServlet(RestServlet):
             group_id, requester_user_id, category_id=category_id, content=content
         )
 
-        defer.returnValue((200, resp))
+        return (200, resp)
 
     @defer.inlineCallbacks
     def on_DELETE(self, request, group_id, category_id):
@@ -180,7 +180,7 @@ class GroupCategoryServlet(RestServlet):
             group_id, requester_user_id, category_id=category_id
         )
 
-        defer.returnValue((200, resp))
+        return (200, resp)
 
 
 class GroupCategoriesServlet(RestServlet):
@@ -204,7 +204,7 @@ class GroupCategoriesServlet(RestServlet):
             group_id, requester_user_id
         )
 
-        defer.returnValue((200, category))
+        return (200, category)
 
 
 class GroupRoleServlet(RestServlet):
@@ -228,7 +228,7 @@ class GroupRoleServlet(RestServlet):
             group_id, requester_user_id, role_id=role_id
         )
 
-        defer.returnValue((200, category))
+        return (200, category)
 
     @defer.inlineCallbacks
     def on_PUT(self, request, group_id, role_id):
@@ -240,7 +240,7 @@ class GroupRoleServlet(RestServlet):
             group_id, requester_user_id, role_id=role_id, content=content
         )
 
-        defer.returnValue((200, resp))
+        return (200, resp)
 
     @defer.inlineCallbacks
     def on_DELETE(self, request, group_id, role_id):
@@ -251,7 +251,7 @@ class GroupRoleServlet(RestServlet):
             group_id, requester_user_id, role_id=role_id
         )
 
-        defer.returnValue((200, resp))
+        return (200, resp)
 
 
 class GroupRolesServlet(RestServlet):
@@ -275,7 +275,7 @@ class GroupRolesServlet(RestServlet):
             group_id, requester_user_id
         )
 
-        defer.returnValue((200, category))
+        return (200, category)
 
 
 class GroupSummaryUsersRoleServlet(RestServlet):
@@ -312,7 +312,7 @@ class GroupSummaryUsersRoleServlet(RestServlet):
             content=content,
         )
 
-        defer.returnValue((200, resp))
+        return (200, resp)
 
     @defer.inlineCallbacks
     def on_DELETE(self, request, group_id, role_id, user_id):
@@ -323,7 +323,7 @@ class GroupSummaryUsersRoleServlet(RestServlet):
             group_id, requester_user_id, user_id=user_id, role_id=role_id
         )
 
-        defer.returnValue((200, resp))
+        return (200, resp)
 
 
 class GroupRoomServlet(RestServlet):
@@ -347,7 +347,7 @@ class GroupRoomServlet(RestServlet):
             group_id, requester_user_id
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupUsersServlet(RestServlet):
@@ -371,7 +371,7 @@ class GroupUsersServlet(RestServlet):
             group_id, requester_user_id
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupInvitedUsersServlet(RestServlet):
@@ -395,7 +395,7 @@ class GroupInvitedUsersServlet(RestServlet):
             group_id, requester_user_id
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupSettingJoinPolicyServlet(RestServlet):
@@ -420,7 +420,7 @@ class GroupSettingJoinPolicyServlet(RestServlet):
             group_id, requester_user_id, content
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupCreateServlet(RestServlet):
@@ -450,7 +450,7 @@ class GroupCreateServlet(RestServlet):
             group_id, requester_user_id, content
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupAdminRoomsServlet(RestServlet):
@@ -477,7 +477,7 @@ class GroupAdminRoomsServlet(RestServlet):
             group_id, requester_user_id, room_id, content
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
     @defer.inlineCallbacks
     def on_DELETE(self, request, group_id, room_id):
@@ -488,7 +488,7 @@ class GroupAdminRoomsServlet(RestServlet):
             group_id, requester_user_id, room_id
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupAdminRoomsConfigServlet(RestServlet):
@@ -516,7 +516,7 @@ class GroupAdminRoomsConfigServlet(RestServlet):
             group_id, requester_user_id, room_id, config_key, content
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupAdminUsersInviteServlet(RestServlet):
@@ -546,7 +546,7 @@ class GroupAdminUsersInviteServlet(RestServlet):
             group_id, user_id, requester_user_id, config
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupAdminUsersKickServlet(RestServlet):
@@ -573,7 +573,7 @@ class GroupAdminUsersKickServlet(RestServlet):
             group_id, user_id, requester_user_id, content
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupSelfLeaveServlet(RestServlet):
@@ -598,7 +598,7 @@ class GroupSelfLeaveServlet(RestServlet):
             group_id, requester_user_id, requester_user_id, content
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupSelfJoinServlet(RestServlet):
@@ -623,7 +623,7 @@ class GroupSelfJoinServlet(RestServlet):
             group_id, requester_user_id, content
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupSelfAcceptInviteServlet(RestServlet):
@@ -648,7 +648,7 @@ class GroupSelfAcceptInviteServlet(RestServlet):
             group_id, requester_user_id, content
         )
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupSelfUpdatePublicityServlet(RestServlet):
@@ -672,7 +672,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet):
         publicise = content["publicise"]
         yield self.store.update_group_publicity(group_id, requester_user_id, publicise)
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class PublicisedGroupsForUserServlet(RestServlet):
@@ -694,7 +694,7 @@ class PublicisedGroupsForUserServlet(RestServlet):
 
         result = yield self.groups_handler.get_publicised_groups_for_user(user_id)
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class PublicisedGroupsForUsersServlet(RestServlet):
@@ -719,7 +719,7 @@ class PublicisedGroupsForUsersServlet(RestServlet):
 
         result = yield self.groups_handler.bulk_get_publicised_groups(user_ids)
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class GroupsForUserServlet(RestServlet):
@@ -741,7 +741,7 @@ class GroupsForUserServlet(RestServlet):
 
         result = yield self.groups_handler.get_joined_groups(requester_user_id)
 
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py
index 45c9928b65..6008adec7c 100644
--- a/synapse/rest/client/v2_alpha/keys.py
+++ b/synapse/rest/client/v2_alpha/keys.py
@@ -95,7 +95,7 @@ class KeyUploadServlet(RestServlet):
         result = yield self.e2e_keys_handler.upload_keys_for_user(
             user_id, device_id, body
         )
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class KeyQueryServlet(RestServlet):
@@ -149,7 +149,7 @@ class KeyQueryServlet(RestServlet):
         timeout = parse_integer(request, "timeout", 10 * 1000)
         body = parse_json_object_from_request(request)
         result = yield self.e2e_keys_handler.query_devices(body, timeout)
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 class KeyChangesServlet(RestServlet):
@@ -189,7 +189,7 @@ class KeyChangesServlet(RestServlet):
 
         results = yield self.device_handler.get_user_ids_changed(user_id, from_token)
 
-        defer.returnValue((200, results))
+        return (200, results)
 
 
 class OneTimeKeyServlet(RestServlet):
@@ -224,7 +224,7 @@ class OneTimeKeyServlet(RestServlet):
         timeout = parse_integer(request, "timeout", 10 * 1000)
         body = parse_json_object_from_request(request)
         result = yield self.e2e_keys_handler.claim_one_time_keys(body, timeout)
-        defer.returnValue((200, result))
+        return (200, result)
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py
index 728a52328f..d034863a3c 100644
--- a/synapse/rest/client/v2_alpha/notifications.py
+++ b/synapse/rest/client/v2_alpha/notifications.py
@@ -88,9 +88,7 @@ class NotificationsServlet(RestServlet):
             returned_push_actions.append(returned_pa)
             next_token = str(pa["stream_ordering"])
 
-        defer.returnValue(
-            (200, {"notifications": returned_push_actions, "next_token": next_token})
-        )
+        return (200, {"notifications": returned_push_actions, "next_token": next_token})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py
index b1b5385b09..b4925c0f59 100644
--- a/synapse/rest/client/v2_alpha/openid.py
+++ b/synapse/rest/client/v2_alpha/openid.py
@@ -83,16 +83,14 @@ class IdTokenServlet(RestServlet):
 
         yield self.store.insert_open_id_token(token, ts_valid_until_ms, user_id)
 
-        defer.returnValue(
-            (
-                200,
-                {
-                    "access_token": token,
-                    "token_type": "Bearer",
-                    "matrix_server_name": self.server_name,
-                    "expires_in": self.EXPIRES_MS / 1000,
-                },
-            )
+        return (
+            200,
+            {
+                "access_token": token,
+                "token_type": "Bearer",
+                "matrix_server_name": self.server_name,
+                "expires_in": self.EXPIRES_MS / 1000,
+            },
         )
 
 
diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py
index e75664279b..d93d6a9f24 100644
--- a/synapse/rest/client/v2_alpha/read_marker.py
+++ b/synapse/rest/client/v2_alpha/read_marker.py
@@ -59,7 +59,7 @@ class ReadMarkerRestServlet(RestServlet):
                 event_id=read_marker_event_id,
             )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py
index 488905626a..98a97b7059 100644
--- a/synapse/rest/client/v2_alpha/receipts.py
+++ b/synapse/rest/client/v2_alpha/receipts.py
@@ -52,7 +52,7 @@ class ReceiptRestServlet(RestServlet):
             room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index f327999e59..05ea1459e3 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -95,7 +95,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
             raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
 
         ret = yield self.identity_handler.requestEmailToken(**body)
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 class MsisdnRegisterRequestTokenRestServlet(RestServlet):
@@ -138,7 +138,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
             )
 
         ret = yield self.identity_handler.requestMsisdnToken(**body)
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 class UsernameAvailabilityRestServlet(RestServlet):
@@ -178,7 +178,7 @@ class UsernameAvailabilityRestServlet(RestServlet):
 
             yield self.registration_handler.check_username(username)
 
-            defer.returnValue((200, {"available": True}))
+            return (200, {"available": True})
 
 
 class RegisterRestServlet(RestServlet):
@@ -230,7 +230,7 @@ class RegisterRestServlet(RestServlet):
 
         if kind == b"guest":
             ret = yield self._do_guest_registration(body, address=client_addr)
-            defer.returnValue(ret)
+            return ret
             return
         elif kind != b"user":
             raise UnrecognizedRequestError(
@@ -282,7 +282,7 @@ class RegisterRestServlet(RestServlet):
                 result = yield self._do_appservice_registration(
                     desired_username, access_token, body
                 )
-            defer.returnValue((200, result))  # we throw for non 200 responses
+            return (200, result)  # we throw for non 200 responses
             return
 
         # for either shared secret or regular registration, downcase the
@@ -301,7 +301,7 @@ class RegisterRestServlet(RestServlet):
             result = yield self._do_shared_secret_registration(
                 desired_username, desired_password, body
             )
-            defer.returnValue((200, result))  # we throw for non 200 responses
+            return (200, result)  # we throw for non 200 responses
             return
 
         # == Normal User Registration == (everyone else)
@@ -500,7 +500,7 @@ class RegisterRestServlet(RestServlet):
                 bind_msisdn=params.get("bind_msisdn"),
             )
 
-        defer.returnValue((200, return_dict))
+        return (200, return_dict)
 
     def on_OPTIONS(self, _):
         return 200, {}
@@ -510,7 +510,7 @@ class RegisterRestServlet(RestServlet):
         user_id = yield self.registration_handler.appservice_register(
             username, as_token
         )
-        defer.returnValue((yield self._create_registration_details(user_id, body)))
+        return (yield self._create_registration_details(user_id, body))
 
     @defer.inlineCallbacks
     def _do_shared_secret_registration(self, username, password, body):
@@ -546,7 +546,7 @@ class RegisterRestServlet(RestServlet):
         )
 
         result = yield self._create_registration_details(user_id, body)
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _create_registration_details(self, user_id, params):
@@ -570,7 +570,7 @@ class RegisterRestServlet(RestServlet):
             )
 
             result.update({"access_token": access_token, "device_id": device_id})
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _do_guest_registration(self, params, address=None):
@@ -588,16 +588,14 @@ class RegisterRestServlet(RestServlet):
             user_id, device_id, initial_display_name, is_guest=True
         )
 
-        defer.returnValue(
-            (
-                200,
-                {
-                    "user_id": user_id,
-                    "device_id": device_id,
-                    "access_token": access_token,
-                    "home_server": self.hs.hostname,
-                },
-            )
+        return (
+            200,
+            {
+                "user_id": user_id,
+                "device_id": device_id,
+                "access_token": access_token,
+                "home_server": self.hs.hostname,
+            },
         )
 
 
diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py
index 6e52f6d284..1538b247e5 100644
--- a/synapse/rest/client/v2_alpha/relations.py
+++ b/synapse/rest/client/v2_alpha/relations.py
@@ -72,11 +72,13 @@ class RelationSendServlet(RestServlet):
             "POST",
             client_patterns(self.PATTERN + "$", releases=()),
             self.on_PUT_or_POST,
+            self.__class__.__name__,
         )
         http_server.register_paths(
             "PUT",
             client_patterns(self.PATTERN + "/(?P<txn_id>[^/]*)$", releases=()),
             self.on_PUT,
+            self.__class__.__name__,
         )
 
     def on_PUT(self, request, *args, **kwargs):
@@ -116,7 +118,7 @@ class RelationSendServlet(RestServlet):
             requester, event_dict=event_dict, txn_id=txn_id
         )
 
-        defer.returnValue((200, {"event_id": event.event_id}))
+        return (200, {"event_id": event.event_id})
 
 
 class RelationPaginationServlet(RestServlet):
@@ -196,7 +198,7 @@ class RelationPaginationServlet(RestServlet):
         return_value["chunk"] = events
         return_value["original_event"] = original_event
 
-        defer.returnValue((200, return_value))
+        return (200, return_value)
 
 
 class RelationAggregationPaginationServlet(RestServlet):
@@ -268,7 +270,7 @@ class RelationAggregationPaginationServlet(RestServlet):
                 to_token=to_token,
             )
 
-        defer.returnValue((200, pagination_chunk.to_dict()))
+        return (200, pagination_chunk.to_dict())
 
 
 class RelationAggregationGroupPaginationServlet(RestServlet):
@@ -354,7 +356,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
         return_value = result.to_dict()
         return_value["chunk"] = events
 
-        defer.returnValue((200, return_value))
+        return (200, return_value)
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py
index e7578af804..3fdd4584a3 100644
--- a/synapse/rest/client/v2_alpha/report_event.py
+++ b/synapse/rest/client/v2_alpha/report_event.py
@@ -72,7 +72,7 @@ class ReportEventRestServlet(RestServlet):
             received_ts=self.clock.time_msec(),
         )
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py
index 8d1b810565..10dec96208 100644
--- a/synapse/rest/client/v2_alpha/room_keys.py
+++ b/synapse/rest/client/v2_alpha/room_keys.py
@@ -135,7 +135,7 @@ class RoomKeysServlet(RestServlet):
             body = {"rooms": {room_id: body}}
 
         yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body)
-        defer.returnValue((200, {}))
+        return (200, {})
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id, session_id):
@@ -218,7 +218,7 @@ class RoomKeysServlet(RestServlet):
             else:
                 room_keys = room_keys["rooms"][room_id]
 
-        defer.returnValue((200, room_keys))
+        return (200, room_keys)
 
     @defer.inlineCallbacks
     def on_DELETE(self, request, room_id, session_id):
@@ -242,7 +242,7 @@ class RoomKeysServlet(RestServlet):
         yield self.e2e_room_keys_handler.delete_room_keys(
             user_id, version, room_id, session_id
         )
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 class RoomKeysNewVersionServlet(RestServlet):
@@ -293,7 +293,7 @@ class RoomKeysNewVersionServlet(RestServlet):
         info = parse_json_object_from_request(request)
 
         new_version = yield self.e2e_room_keys_handler.create_version(user_id, info)
-        defer.returnValue((200, {"version": new_version}))
+        return (200, {"version": new_version})
 
     # we deliberately don't have a PUT /version, as these things really should
     # be immutable to avoid people footgunning
@@ -338,7 +338,7 @@ class RoomKeysVersionServlet(RestServlet):
         except SynapseError as e:
             if e.code == 404:
                 raise SynapseError(404, "No backup found", Codes.NOT_FOUND)
-        defer.returnValue((200, info))
+        return (200, info)
 
     @defer.inlineCallbacks
     def on_DELETE(self, request, version):
@@ -358,7 +358,7 @@ class RoomKeysVersionServlet(RestServlet):
         user_id = requester.user.to_string()
 
         yield self.e2e_room_keys_handler.delete_version(user_id, version)
-        defer.returnValue((200, {}))
+        return (200, {})
 
     @defer.inlineCallbacks
     def on_PUT(self, request, version):
@@ -392,7 +392,7 @@ class RoomKeysVersionServlet(RestServlet):
             )
 
         yield self.e2e_room_keys_handler.update_version(user_id, version, info)
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
index d7f7faa029..14ba61a63e 100644
--- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
+++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
@@ -80,7 +80,7 @@ class RoomUpgradeRestServlet(RestServlet):
 
         ret = {"replacement_room": new_room_id}
 
-        defer.returnValue((200, ret))
+        return (200, ret)
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py
index 78075b8fc0..2613648d82 100644
--- a/synapse/rest/client/v2_alpha/sendtodevice.py
+++ b/synapse/rest/client/v2_alpha/sendtodevice.py
@@ -60,7 +60,7 @@ class SendToDeviceRestServlet(servlet.RestServlet):
         )
 
         response = (200, {})
-        defer.returnValue(response)
+        return response
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index 02d56dee6c..7b32dd2212 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -174,7 +174,7 @@ class SyncRestServlet(RestServlet):
             time_now, sync_result, requester.access_token_id, filter
         )
 
-        defer.returnValue((200, response_content))
+        return (200, response_content)
 
     @defer.inlineCallbacks
     def encode_response(self, time_now, sync_result, access_token_id, filter):
@@ -205,27 +205,23 @@ class SyncRestServlet(RestServlet):
             event_formatter,
         )
 
-        defer.returnValue(
-            {
-                "account_data": {"events": sync_result.account_data},
-                "to_device": {"events": sync_result.to_device},
-                "device_lists": {
-                    "changed": list(sync_result.device_lists.changed),
-                    "left": list(sync_result.device_lists.left),
-                },
-                "presence": SyncRestServlet.encode_presence(
-                    sync_result.presence, time_now
-                ),
-                "rooms": {"join": joined, "invite": invited, "leave": archived},
-                "groups": {
-                    "join": sync_result.groups.join,
-                    "invite": sync_result.groups.invite,
-                    "leave": sync_result.groups.leave,
-                },
-                "device_one_time_keys_count": sync_result.device_one_time_keys_count,
-                "next_batch": sync_result.next_batch.to_string(),
-            }
-        )
+        return {
+            "account_data": {"events": sync_result.account_data},
+            "to_device": {"events": sync_result.to_device},
+            "device_lists": {
+                "changed": list(sync_result.device_lists.changed),
+                "left": list(sync_result.device_lists.left),
+            },
+            "presence": SyncRestServlet.encode_presence(sync_result.presence, time_now),
+            "rooms": {"join": joined, "invite": invited, "leave": archived},
+            "groups": {
+                "join": sync_result.groups.join,
+                "invite": sync_result.groups.invite,
+                "leave": sync_result.groups.leave,
+            },
+            "device_one_time_keys_count": sync_result.device_one_time_keys_count,
+            "next_batch": sync_result.next_batch.to_string(),
+        }
 
     @staticmethod
     def encode_presence(events, time_now):
@@ -273,7 +269,7 @@ class SyncRestServlet(RestServlet):
                 event_formatter=event_formatter,
             )
 
-        defer.returnValue(joined)
+        return joined
 
     @defer.inlineCallbacks
     def encode_invited(self, rooms, time_now, token_id, event_formatter):
@@ -309,7 +305,7 @@ class SyncRestServlet(RestServlet):
             invited_state.append(invite)
             invited[room.room_id] = {"invite_state": {"events": invited_state}}
 
-        defer.returnValue(invited)
+        return invited
 
     @defer.inlineCallbacks
     def encode_archived(self, rooms, time_now, token_id, event_fields, event_formatter):
@@ -342,7 +338,7 @@ class SyncRestServlet(RestServlet):
                 event_formatter=event_formatter,
             )
 
-        defer.returnValue(joined)
+        return joined
 
     @defer.inlineCallbacks
     def encode_room(
@@ -414,7 +410,7 @@ class SyncRestServlet(RestServlet):
             result["unread_notifications"] = room.unread_notifications
             result["summary"] = room.summary
 
-        defer.returnValue(result)
+        return result
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py
index 07b6ede603..d173544355 100644
--- a/synapse/rest/client/v2_alpha/tags.py
+++ b/synapse/rest/client/v2_alpha/tags.py
@@ -45,7 +45,7 @@ class TagListServlet(RestServlet):
 
         tags = yield self.store.get_tags_for_room(user_id, room_id)
 
-        defer.returnValue((200, {"tags": tags}))
+        return (200, {"tags": tags})
 
 
 class TagServlet(RestServlet):
@@ -76,7 +76,7 @@ class TagServlet(RestServlet):
 
         self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
     @defer.inlineCallbacks
     def on_DELETE(self, request, user_id, room_id, tag):
@@ -88,7 +88,7 @@ class TagServlet(RestServlet):
 
         self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
 
-        defer.returnValue((200, {}))
+        return (200, {})
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py
index 1e66662a05..158e686b01 100644
--- a/synapse/rest/client/v2_alpha/thirdparty.py
+++ b/synapse/rest/client/v2_alpha/thirdparty.py
@@ -40,7 +40,7 @@ class ThirdPartyProtocolsServlet(RestServlet):
         yield self.auth.get_user_by_req(request, allow_guest=True)
 
         protocols = yield self.appservice_handler.get_3pe_protocols()
-        defer.returnValue((200, protocols))
+        return (200, protocols)
 
 
 class ThirdPartyProtocolServlet(RestServlet):
@@ -60,9 +60,9 @@ class ThirdPartyProtocolServlet(RestServlet):
             only_protocol=protocol
         )
         if protocol in protocols:
-            defer.returnValue((200, protocols[protocol]))
+            return (200, protocols[protocol])
         else:
-            defer.returnValue((404, {"error": "Unknown protocol"}))
+            return (404, {"error": "Unknown protocol"})
 
 
 class ThirdPartyUserServlet(RestServlet):
@@ -85,7 +85,7 @@ class ThirdPartyUserServlet(RestServlet):
             ThirdPartyEntityKind.USER, protocol, fields
         )
 
-        defer.returnValue((200, results))
+        return (200, results)
 
 
 class ThirdPartyLocationServlet(RestServlet):
@@ -108,7 +108,7 @@ class ThirdPartyLocationServlet(RestServlet):
             ThirdPartyEntityKind.LOCATION, protocol, fields
         )
 
-        defer.returnValue((200, results))
+        return (200, results)
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py
index e19fb6d583..7ab2b80e46 100644
--- a/synapse/rest/client/v2_alpha/user_directory.py
+++ b/synapse/rest/client/v2_alpha/user_directory.py
@@ -60,7 +60,7 @@ class UserDirectorySearchRestServlet(RestServlet):
         user_id = requester.user.to_string()
 
         if not self.hs.config.user_directory_search_enabled:
-            defer.returnValue((200, {"limited": False, "results": []}))
+            return (200, {"limited": False, "results": []})
 
         body = parse_json_object_from_request(request)
 
@@ -76,7 +76,7 @@ class UserDirectorySearchRestServlet(RestServlet):
             user_id, search_term, limit
         )
 
-        defer.returnValue((200, results))
+        return (200, results)
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 65afffbb42..cf5759e9a6 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -33,6 +33,7 @@ from synapse.api.errors import (
     RequestSendFailed,
     SynapseError,
 )
+from synapse.config._base import ConfigError
 from synapse.logging.context import defer_to_thread
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.util.async_helpers import Linearizer
@@ -171,7 +172,7 @@ class MediaRepository(object):
 
         yield self._generate_thumbnails(None, media_id, media_id, media_type)
 
-        defer.returnValue("mxc://%s/%s" % (self.server_name, media_id))
+        return "mxc://%s/%s" % (self.server_name, media_id)
 
     @defer.inlineCallbacks
     def get_local_media(self, request, media_id, name):
@@ -282,7 +283,7 @@ class MediaRepository(object):
             with responder:
                 pass
 
-        defer.returnValue(media_info)
+        return media_info
 
     @defer.inlineCallbacks
     def _get_remote_media_impl(self, server_name, media_id):
@@ -317,14 +318,14 @@ class MediaRepository(object):
 
             responder = yield self.media_storage.fetch_media(file_info)
             if responder:
-                defer.returnValue((responder, media_info))
+                return (responder, media_info)
 
         # Failed to find the file anywhere, lets download it.
 
         media_info = yield self._download_remote_file(server_name, media_id, file_id)
 
         responder = yield self.media_storage.fetch_media(file_info)
-        defer.returnValue((responder, media_info))
+        return (responder, media_info)
 
     @defer.inlineCallbacks
     def _download_remote_file(self, server_name, media_id, file_id):
@@ -421,7 +422,7 @@ class MediaRepository(object):
 
         yield self._generate_thumbnails(server_name, media_id, file_id, media_type)
 
-        defer.returnValue(media_info)
+        return media_info
 
     def _get_thumbnail_requirements(self, media_type):
         return self.thumbnail_requirements.get(media_type, ())
@@ -500,7 +501,7 @@ class MediaRepository(object):
                 media_id, t_width, t_height, t_type, t_method, t_len
             )
 
-            defer.returnValue(output_path)
+            return output_path
 
     @defer.inlineCallbacks
     def generate_remote_exact_thumbnail(
@@ -554,7 +555,7 @@ class MediaRepository(object):
                 t_len,
             )
 
-            defer.returnValue(output_path)
+            return output_path
 
     @defer.inlineCallbacks
     def _generate_thumbnails(
@@ -667,7 +668,7 @@ class MediaRepository(object):
                     media_id, t_width, t_height, t_type, t_method, t_len
                 )
 
-        defer.returnValue({"width": m_width, "height": m_height})
+        return {"width": m_width, "height": m_height}
 
     @defer.inlineCallbacks
     def delete_old_remote_media(self, before_ts):
@@ -704,7 +705,7 @@ class MediaRepository(object):
                 yield self.store.delete_remote_media(origin, media_id)
                 deleted += 1
 
-        defer.returnValue({"deleted": deleted})
+        return {"deleted": deleted}
 
 
 class MediaRepositoryResource(Resource):
@@ -753,8 +754,11 @@ class MediaRepositoryResource(Resource):
     """
 
     def __init__(self, hs):
-        Resource.__init__(self)
+        # If we're not configured to use it, raise if we somehow got here.
+        if not hs.config.can_load_media_repo:
+            raise ConfigError("Synapse is not configured to use a media repo.")
 
+        super().__init__()
         media_repo = hs.get_media_repository()
 
         self.putChild(b"upload", UploadResource(hs, media_repo))
diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index 25e5ac2848..3b87717a5a 100644
--- a/synapse/rest/media/v1/media_storage.py
+++ b/synapse/rest/media/v1/media_storage.py
@@ -69,7 +69,7 @@ class MediaStorage(object):
             )
             yield finish_cb()
 
-        defer.returnValue(fname)
+        return fname
 
     @contextlib.contextmanager
     def store_into_file(self, file_info):
@@ -143,14 +143,14 @@ class MediaStorage(object):
         path = self._file_info_to_path(file_info)
         local_path = os.path.join(self.local_media_directory, path)
         if os.path.exists(local_path):
-            defer.returnValue(FileResponder(open(local_path, "rb")))
+            return FileResponder(open(local_path, "rb"))
 
         for provider in self.storage_providers:
             res = yield provider.fetch(path, file_info)
             if res:
-                defer.returnValue(res)
+                return res
 
-        defer.returnValue(None)
+        return None
 
     @defer.inlineCallbacks
     def ensure_media_is_in_local_cache(self, file_info):
@@ -166,7 +166,7 @@ class MediaStorage(object):
         path = self._file_info_to_path(file_info)
         local_path = os.path.join(self.local_media_directory, path)
         if os.path.exists(local_path):
-            defer.returnValue(local_path)
+            return local_path
 
         dirname = os.path.dirname(local_path)
         if not os.path.exists(dirname):
@@ -181,7 +181,7 @@ class MediaStorage(object):
                     )
                     yield res.write_to_consumer(consumer)
                     yield consumer.wait()
-                defer.returnValue(local_path)
+                return local_path
 
         raise Exception("file could not be found")
 
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 5871737bfd..bd40891a7f 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -182,7 +182,7 @@ class PreviewUrlResource(DirectServeResource):
             og = cache_result["og"]
             if isinstance(og, six.text_type):
                 og = og.encode("utf8")
-            defer.returnValue(og)
+            return og
             return
 
         media_info = yield self._download_url(url, user)
@@ -284,7 +284,7 @@ class PreviewUrlResource(DirectServeResource):
             media_info["created_ts"],
         )
 
-        defer.returnValue(jsonog)
+        return jsonog
 
     @defer.inlineCallbacks
     def _download_url(self, url, user):
@@ -354,22 +354,20 @@ class PreviewUrlResource(DirectServeResource):
             # therefore not expire it.
             raise
 
-        defer.returnValue(
-            {
-                "media_type": media_type,
-                "media_length": length,
-                "download_name": download_name,
-                "created_ts": time_now_ms,
-                "filesystem_id": file_id,
-                "filename": fname,
-                "uri": uri,
-                "response_code": code,
-                # FIXME: we should calculate a proper expiration based on the
-                # Cache-Control and Expire headers.  But for now, assume 1 hour.
-                "expires": 60 * 60 * 1000,
-                "etag": headers["ETag"][0] if "ETag" in headers else None,
-            }
-        )
+        return {
+            "media_type": media_type,
+            "media_length": length,
+            "download_name": download_name,
+            "created_ts": time_now_ms,
+            "filesystem_id": file_id,
+            "filename": fname,
+            "uri": uri,
+            "response_code": code,
+            # FIXME: we should calculate a proper expiration based on the
+            # Cache-Control and Expire headers.  But for now, assume 1 hour.
+            "expires": 60 * 60 * 1000,
+            "etag": headers["ETag"][0] if "ETag" in headers else None,
+        }
 
     def _start_expire_url_cache_data(self):
         return run_as_background_process(
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
index f183743f31..729c097e6d 100644
--- a/synapse/server_notices/resource_limits_server_notices.py
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -193,4 +193,4 @@ class ResourceLimitsServerNotices(object):
                 if event_id in referenced_events:
                     referenced_events.remove(event.event_id)
 
-        defer.returnValue((currently_blocked, referenced_events))
+        return (currently_blocked, referenced_events)
diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py
index 71e7e75320..2dac90578c 100644
--- a/synapse/server_notices/server_notices_manager.py
+++ b/synapse/server_notices/server_notices_manager.py
@@ -86,7 +86,7 @@ class ServerNoticesManager(object):
         res = yield self._event_creation_handler.create_and_send_nonmember_event(
             requester, event_dict, ratelimit=False
         )
-        defer.returnValue(res)
+        return res
 
     @cachedInlineCallbacks()
     def get_notice_room_for_user(self, user_id):
@@ -120,7 +120,7 @@ class ServerNoticesManager(object):
                 # we found a room which our user shares with the system notice
                 # user
                 logger.info("Using room %s", room.room_id)
-                defer.returnValue(room.room_id)
+                return room.room_id
 
         # apparently no existing notice room: create a new one
         logger.info("Creating server notices room for %s", user_id)
@@ -158,4 +158,4 @@ class ServerNoticesManager(object):
         self._notifier.on_new_event("account_data_key", max_id, users=[user_id])
 
         logger.info("Created server notices room %s for %s", room_id, user_id)
-        defer.returnValue(room_id)
+        return room_id
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 9f708fa205..a0d34f16ea 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -135,7 +135,7 @@ class StateHandler(object):
             event = None
             if event_id:
                 event = yield self.store.get_event(event_id, allow_none=True)
-            defer.returnValue(event)
+            return event
             return
 
         state_map = yield self.store.get_events(
@@ -145,7 +145,7 @@ class StateHandler(object):
             key: state_map[e_id] for key, e_id in iteritems(state) if e_id in state_map
         }
 
-        defer.returnValue(state)
+        return state
 
     @defer.inlineCallbacks
     def get_current_state_ids(self, room_id, latest_event_ids=None):
@@ -169,7 +169,7 @@ class StateHandler(object):
         ret = yield self.resolve_state_groups_for_events(room_id, latest_event_ids)
         state = ret.state
 
-        defer.returnValue(state)
+        return state
 
     @defer.inlineCallbacks
     def get_current_users_in_room(self, room_id, latest_event_ids=None):
@@ -189,7 +189,7 @@ class StateHandler(object):
         logger.debug("calling resolve_state_groups from get_current_users_in_room")
         entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids)
         joined_users = yield self.store.get_joined_users_from_state(room_id, entry)
-        defer.returnValue(joined_users)
+        return joined_users
 
     @defer.inlineCallbacks
     def get_current_hosts_in_room(self, room_id, latest_event_ids=None):
@@ -198,7 +198,7 @@ class StateHandler(object):
         logger.debug("calling resolve_state_groups from get_current_hosts_in_room")
         entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids)
         joined_hosts = yield self.store.get_joined_hosts(room_id, entry)
-        defer.returnValue(joined_hosts)
+        return joined_hosts
 
     @defer.inlineCallbacks
     def compute_event_context(self, event, old_state=None):
@@ -241,7 +241,7 @@ class StateHandler(object):
                 prev_state_ids=prev_state_ids,
             )
 
-            defer.returnValue(context)
+            return context
 
         if old_state:
             # We already have the state, so we don't need to calculate it.
@@ -275,7 +275,7 @@ class StateHandler(object):
                 prev_state_ids=prev_state_ids,
             )
 
-            defer.returnValue(context)
+            return context
 
         logger.debug("calling resolve_state_groups from compute_event_context")
 
@@ -343,7 +343,7 @@ class StateHandler(object):
             delta_ids=delta_ids,
         )
 
-        defer.returnValue(context)
+        return context
 
     @defer.inlineCallbacks
     def resolve_state_groups_for_events(self, room_id, event_ids):
@@ -368,19 +368,17 @@ class StateHandler(object):
         state_groups_ids = yield self.store.get_state_groups_ids(room_id, event_ids)
 
         if len(state_groups_ids) == 0:
-            defer.returnValue(_StateCacheEntry(state={}, state_group=None))
+            return _StateCacheEntry(state={}, state_group=None)
         elif len(state_groups_ids) == 1:
             name, state_list = list(state_groups_ids.items()).pop()
 
             prev_group, delta_ids = yield self.store.get_state_group_delta(name)
 
-            defer.returnValue(
-                _StateCacheEntry(
-                    state=state_list,
-                    state_group=name,
-                    prev_group=prev_group,
-                    delta_ids=delta_ids,
-                )
+            return _StateCacheEntry(
+                state=state_list,
+                state_group=name,
+                prev_group=prev_group,
+                delta_ids=delta_ids,
             )
 
         room_version = yield self.store.get_room_version(room_id)
@@ -392,7 +390,7 @@ class StateHandler(object):
             None,
             state_res_store=StateResolutionStore(self.store),
         )
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def resolve_events(self, room_version, state_sets, event):
@@ -415,7 +413,7 @@ class StateHandler(object):
 
         new_state = {key: state_map[ev_id] for key, ev_id in iteritems(new_state)}
 
-        defer.returnValue(new_state)
+        return new_state
 
 
 class StateResolutionHandler(object):
@@ -479,7 +477,7 @@ class StateResolutionHandler(object):
             if self._state_cache is not None:
                 cache = self._state_cache.get(group_names, None)
                 if cache:
-                    defer.returnValue(cache)
+                    return cache
 
             logger.info(
                 "Resolving state for %s with %d groups", room_id, len(state_groups_ids)
@@ -525,7 +523,7 @@ class StateResolutionHandler(object):
             if self._state_cache is not None:
                 self._state_cache[group_names] = cache
 
-            defer.returnValue(cache)
+            return cache
 
 
 def _make_state_cache_entry(new_state, state_groups_ids):
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index 88acd4817e..a2f92d9ff9 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -55,7 +55,7 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory):
             a map from (type, state_key) to event_id.
     """
     if len(state_sets) == 1:
-        defer.returnValue(state_sets[0])
+        return state_sets[0]
 
     unconflicted_state, conflicted_state = _seperate(state_sets)
 
@@ -97,10 +97,8 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory):
     state_map_new = yield state_map_factory(new_needed_events)
     state_map.update(state_map_new)
 
-    defer.returnValue(
-        _resolve_with_state(
-            unconflicted_state, conflicted_state, auth_events, state_map
-        )
+    return _resolve_with_state(
+        unconflicted_state, conflicted_state, auth_events, state_map
     )
 
 
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index db969e8997..b327c86f40 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -63,7 +63,7 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
     unconflicted_state, conflicted_state = _seperate(state_sets)
 
     if not conflicted_state:
-        defer.returnValue(unconflicted_state)
+        return unconflicted_state
 
     logger.debug("%d conflicted state entries", len(conflicted_state))
     logger.debug("Calculating auth chain difference")
@@ -137,7 +137,7 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
 
     logger.debug("done")
 
-    defer.returnValue(resolved_state)
+    return resolved_state
 
 
 @defer.inlineCallbacks
@@ -168,18 +168,18 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store):
             aev = yield _get_event(aid, event_map, state_res_store)
             if (aev.type, aev.state_key) == (EventTypes.Create, ""):
                 if aev.content.get("creator") == event.sender:
-                    defer.returnValue(100)
+                    return 100
                 break
-        defer.returnValue(0)
+        return 0
 
     level = pl.content.get("users", {}).get(event.sender)
     if level is None:
         level = pl.content.get("users_default", 0)
 
     if level is None:
-        defer.returnValue(0)
+        return 0
     else:
-        defer.returnValue(int(level))
+        return int(level)
 
 
 @defer.inlineCallbacks
@@ -224,7 +224,7 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store):
     intersection = set(auth_sets[0]).intersection(*auth_sets[1:])
     union = set().union(*auth_sets)
 
-    defer.returnValue(union - intersection)
+    return union - intersection
 
 
 def _seperate(state_sets):
@@ -343,7 +343,7 @@ def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_
     it = lexicographical_topological_sort(graph, key=_get_power_order)
     sorted_events = list(it)
 
-    defer.returnValue(sorted_events)
+    return sorted_events
 
 
 @defer.inlineCallbacks
@@ -396,7 +396,7 @@ def _iterative_auth_checks(
         except AuthError:
             pass
 
-    defer.returnValue(resolved_state)
+    return resolved_state
 
 
 @defer.inlineCallbacks
@@ -439,7 +439,7 @@ def _mainline_sort(event_ids, resolved_power_event_id, event_map, state_res_stor
 
     event_ids.sort(key=lambda ev_id: order_map[ev_id])
 
-    defer.returnValue(event_ids)
+    return event_ids
 
 
 @defer.inlineCallbacks
@@ -462,7 +462,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
     while event:
         depth = mainline_map.get(event.event_id)
         if depth is not None:
-            defer.returnValue(depth)
+            return depth
 
         auth_events = event.auth_event_ids()
         event = None
@@ -474,7 +474,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
                 break
 
     # Didn't find a power level auth event, so we just return 0
-    defer.returnValue(0)
+    return 0
 
 
 @defer.inlineCallbacks
@@ -493,7 +493,7 @@ def _get_event(event_id, event_map, state_res_store):
     if event_id not in event_map:
         events = yield state_res_store.get_events([event_id], allow_rejected=True)
         event_map.update(events)
-    defer.returnValue(event_map[event_id])
+    return event_map[event_id]
 
 
 def lexicographical_topological_sort(graph, key):
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 86a333a919..e7f6ea7286 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -498,7 +498,7 @@ class DataStore(
         )
         count = yield self.runInteraction("get_users_paginate", self.get_user_count_txn)
         retval = {"users": users, "total": count}
-        defer.returnValue(retval)
+        return retval
 
     def search_users(self, term):
         """Function to search users list for one or more users with
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index a7c93efa46..489ce82fae 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -513,7 +513,7 @@ class SQLBaseStore(object):
                 after_callback(*after_args, **after_kwargs)
             raise
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def runWithConnection(self, func, *args, **kwargs):
@@ -553,7 +553,7 @@ class SQLBaseStore(object):
         with PreserveLoggingContext():
             result = yield self._db_pool.runWithConnection(inner_func, *args, **kwargs)
 
-        defer.returnValue(result)
+        return result
 
     @staticmethod
     def cursor_to_dict(cursor):
@@ -615,8 +615,8 @@ class SQLBaseStore(object):
             # a cursor after we receive an error from the db.
             if not or_ignore:
                 raise
-            defer.returnValue(False)
-        defer.returnValue(True)
+            return False
+        return True
 
     @staticmethod
     def _simple_insert_txn(txn, table, values):
@@ -708,7 +708,7 @@ class SQLBaseStore(object):
                     insertion_values,
                     lock=lock,
                 )
-                defer.returnValue(result)
+                return result
             except self.database_engine.module.IntegrityError as e:
                 attempts += 1
                 if attempts >= 5:
@@ -1121,7 +1121,7 @@ class SQLBaseStore(object):
         results = []
 
         if not iterable:
-            defer.returnValue(results)
+            return results
 
         # iterables can not be sliced, so convert it to a list first
         it_list = list(iterable)
@@ -1142,7 +1142,7 @@ class SQLBaseStore(object):
 
             results.extend(rows)
 
-        defer.returnValue(results)
+        return results
 
     @classmethod
     def _simple_select_many_txn(cls, txn, table, column, iterable, keyvalues, retcols):
diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py
index 8394389073..9fa5b4f3d6 100644
--- a/synapse/storage/account_data.py
+++ b/synapse/storage/account_data.py
@@ -111,9 +111,9 @@ class AccountDataWorkerStore(SQLBaseStore):
         )
 
         if result:
-            defer.returnValue(json.loads(result))
+            return json.loads(result)
         else:
-            defer.returnValue(None)
+            return None
 
     @cached(num_args=2)
     def get_account_data_for_room(self, user_id, room_id):
@@ -264,11 +264,9 @@ class AccountDataWorkerStore(SQLBaseStore):
             on_invalidate=cache_context.invalidate,
         )
         if not ignored_account_data:
-            defer.returnValue(False)
+            return False
 
-        defer.returnValue(
-            ignored_user_id in ignored_account_data.get("ignored_users", {})
-        )
+        return ignored_user_id in ignored_account_data.get("ignored_users", {})
 
 
 class AccountDataStore(AccountDataWorkerStore):
@@ -332,7 +330,7 @@ class AccountDataStore(AccountDataWorkerStore):
             )
 
         result = self._account_data_id_gen.get_current_token()
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def add_account_data_for_user(self, user_id, account_data_type, content):
@@ -373,7 +371,7 @@ class AccountDataStore(AccountDataWorkerStore):
             )
 
         result = self._account_data_id_gen.get_current_token()
-        defer.returnValue(result)
+        return result
 
     def _update_max_stream_id(self, next_id):
         """Update the max stream_id
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index eb329ebd8b..05d9c05c3f 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -145,7 +145,7 @@ class ApplicationServiceTransactionWorkerStore(
             for service in as_list:
                 if service.id == res["as_id"]:
                     services.append(service)
-        defer.returnValue(services)
+        return services
 
     @defer.inlineCallbacks
     def get_appservice_state(self, service):
@@ -164,9 +164,9 @@ class ApplicationServiceTransactionWorkerStore(
             desc="get_appservice_state",
         )
         if result:
-            defer.returnValue(result.get("state"))
+            return result.get("state")
             return
-        defer.returnValue(None)
+        return None
 
     def set_appservice_state(self, service, state):
         """Set the application service state.
@@ -298,15 +298,13 @@ class ApplicationServiceTransactionWorkerStore(
         )
 
         if not entry:
-            defer.returnValue(None)
+            return None
 
         event_ids = json.loads(entry["event_ids"])
 
         events = yield self.get_events_as_list(event_ids)
 
-        defer.returnValue(
-            AppServiceTransaction(service=service, id=entry["txn_id"], events=events)
-        )
+        return AppServiceTransaction(service=service, id=entry["txn_id"], events=events)
 
     def _get_last_txn(self, txn, service_id):
         txn.execute(
@@ -360,7 +358,7 @@ class ApplicationServiceTransactionWorkerStore(
 
         events = yield self.get_events_as_list(event_ids)
 
-        defer.returnValue((upper_bound, events))
+        return (upper_bound, events)
 
 
 class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStore):
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 50f913a414..e5f0668f09 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -115,7 +115,7 @@ class BackgroundUpdateStore(SQLBaseStore):
                         " Unscheduling background update task."
                     )
                     self._all_done = True
-                    defer.returnValue(None)
+                    return None
 
     @defer.inlineCallbacks
     def has_completed_background_updates(self):
@@ -127,11 +127,11 @@ class BackgroundUpdateStore(SQLBaseStore):
         # if we've previously determined that there is nothing left to do, that
         # is easy
         if self._all_done:
-            defer.returnValue(True)
+            return True
 
         # obviously, if we have things in our queue, we're not done.
         if self._background_update_queue:
-            defer.returnValue(False)
+            return False
 
         # otherwise, check if there are updates to be run. This is important,
         # as we may be running on a worker which doesn't perform the bg updates
@@ -144,9 +144,9 @@ class BackgroundUpdateStore(SQLBaseStore):
         )
         if not updates:
             self._all_done = True
-            defer.returnValue(True)
+            return True
 
-        defer.returnValue(False)
+        return False
 
     @defer.inlineCallbacks
     def do_next_background_update(self, desired_duration_ms):
@@ -173,14 +173,14 @@ class BackgroundUpdateStore(SQLBaseStore):
 
         if not self._background_update_queue:
             # no work left to do
-            defer.returnValue(None)
+            return None
 
         # pop from the front, and add back to the back
         update_name = self._background_update_queue.pop(0)
         self._background_update_queue.append(update_name)
 
         res = yield self._do_background_update(update_name, desired_duration_ms)
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def _do_background_update(self, update_name, desired_duration_ms):
@@ -231,7 +231,7 @@ class BackgroundUpdateStore(SQLBaseStore):
 
         performance.update(items_updated, duration_ms)
 
-        defer.returnValue(len(self._background_update_performance))
+        return len(self._background_update_performance)
 
     def register_background_update_handler(self, update_name, update_handler):
         """Register a handler for doing a background update.
@@ -266,7 +266,7 @@ class BackgroundUpdateStore(SQLBaseStore):
         @defer.inlineCallbacks
         def noop_update(progress, batch_size):
             yield self._end_background_update(update_name)
-            defer.returnValue(1)
+            return 1
 
         self.register_background_update_handler(update_name, noop_update)
 
@@ -370,7 +370,7 @@ class BackgroundUpdateStore(SQLBaseStore):
                 logger.info("Adding index %s to %s", index_name, table)
                 yield self.runWithConnection(runner)
             yield self._end_background_update(update_name)
-            defer.returnValue(1)
+            return 1
 
         self.register_background_update_handler(update_name, updater)
 
diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py
index bda68de5be..6db8c54077 100644
--- a/synapse/storage/client_ips.py
+++ b/synapse/storage/client_ips.py
@@ -104,7 +104,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
 
         yield self.runWithConnection(f)
         yield self._end_background_update("user_ips_drop_nonunique_index")
-        defer.returnValue(1)
+        return 1
 
     @defer.inlineCallbacks
     def _analyze_user_ip(self, progress, batch_size):
@@ -121,7 +121,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
 
         yield self._end_background_update("user_ips_analyze")
 
-        defer.returnValue(1)
+        return 1
 
     @defer.inlineCallbacks
     def _remove_user_ip_dupes(self, progress, batch_size):
@@ -291,7 +291,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
         if last:
             yield self._end_background_update("user_ips_remove_dupes")
 
-        defer.returnValue(batch_size)
+        return batch_size
 
     @defer.inlineCallbacks
     def insert_client_ip(
@@ -401,7 +401,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
                         "device_id": did,
                         "last_seen": last_seen,
                     }
-        defer.returnValue(ret)
+        return ret
 
     @classmethod
     def _get_last_client_ip_by_device_txn(cls, txn, user_id, device_id, retcols):
@@ -461,14 +461,12 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
             ((row["access_token"], row["ip"]), (row["user_agent"], row["last_seen"]))
             for row in rows
         )
-        defer.returnValue(
-            list(
-                {
-                    "access_token": access_token,
-                    "ip": ip,
-                    "user_agent": user_agent,
-                    "last_seen": last_seen,
-                }
-                for (access_token, ip), (user_agent, last_seen) in iteritems(results)
-            )
+        return list(
+            {
+                "access_token": access_token,
+                "ip": ip,
+                "user_agent": user_agent,
+                "last_seen": last_seen,
+            }
+            for (access_token, ip), (user_agent, last_seen) in iteritems(results)
         )
diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py
index 4ea0deea4f..79bb0ea46d 100644
--- a/synapse/storage/deviceinbox.py
+++ b/synapse/storage/deviceinbox.py
@@ -92,7 +92,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
                 user_id, last_deleted_stream_id
             )
             if not has_changed:
-                defer.returnValue(0)
+                return 0
 
         def delete_messages_for_device_txn(txn):
             sql = (
@@ -115,7 +115,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
             last_deleted_stream_id, up_to_stream_id
         )
 
-        defer.returnValue(count)
+        return count
 
     def get_new_device_msgs_for_remote(
         self, destination, last_stream_id, current_stream_id, limit
@@ -263,7 +263,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore):
                     destination, stream_id
                 )
 
-        defer.returnValue(self._device_inbox_id_gen.get_current_token())
+        return self._device_inbox_id_gen.get_current_token()
 
     @defer.inlineCallbacks
     def add_messages_from_remote_to_device_inbox(
@@ -312,7 +312,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore):
             for user_id in local_messages_by_user_then_device.keys():
                 self._device_inbox_stream_cache.entity_has_changed(user_id, stream_id)
 
-        defer.returnValue(stream_id)
+        return stream_id
 
     def _add_messages_to_local_device_inbox_txn(
         self, txn, stream_id, messages_by_user_then_device
@@ -426,4 +426,4 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore):
 
         yield self._end_background_update(self.DEVICE_INBOX_STREAM_ID)
 
-        defer.returnValue(1)
+        return 1
diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py
index d2b113a4e7..8f72d92895 100644
--- a/synapse/storage/devices.py
+++ b/synapse/storage/devices.py
@@ -71,7 +71,7 @@ class DeviceWorkerStore(SQLBaseStore):
             desc="get_devices_by_user",
         )
 
-        defer.returnValue({d["device_id"]: d for d in devices})
+        return {d["device_id"]: d for d in devices}
 
     @defer.inlineCallbacks
     def get_devices_by_remote(self, destination, from_stream_id, limit):
@@ -88,7 +88,7 @@ class DeviceWorkerStore(SQLBaseStore):
             destination, int(from_stream_id)
         )
         if not has_changed:
-            defer.returnValue((now_stream_id, []))
+            return (now_stream_id, [])
 
         # We retrieve n+1 devices from the list of outbound pokes where n is
         # our outbound device update limit. We then check if the very last
@@ -111,7 +111,7 @@ class DeviceWorkerStore(SQLBaseStore):
 
         # Return an empty list if there are no updates
         if not updates:
-            defer.returnValue((now_stream_id, []))
+            return (now_stream_id, [])
 
         # if we have exceeded the limit, we need to exclude any results with the
         # same stream_id as the last row.
@@ -147,13 +147,13 @@ class DeviceWorkerStore(SQLBaseStore):
         # skip that stream_id and return an empty list, and continue with the next
         # stream_id next time.
         if not query_map:
-            defer.returnValue((stream_id_cutoff, []))
+            return (stream_id_cutoff, [])
 
         results = yield self._get_device_update_edus_by_remote(
             destination, from_stream_id, query_map
         )
 
-        defer.returnValue((now_stream_id, results))
+        return (now_stream_id, results)
 
     def _get_devices_by_remote_txn(
         self, txn, destination, from_stream_id, now_stream_id, limit
@@ -232,7 +232,7 @@ class DeviceWorkerStore(SQLBaseStore):
 
                 results.append(result)
 
-        defer.returnValue(results)
+        return results
 
     def _get_last_device_update_for_remote_user(
         self, destination, user_id, from_stream_id
@@ -330,7 +330,7 @@ class DeviceWorkerStore(SQLBaseStore):
             else:
                 results[user_id] = yield self._get_cached_devices_for_user(user_id)
 
-        defer.returnValue((user_ids_not_in_cache, results))
+        return (user_ids_not_in_cache, results)
 
     @cachedInlineCallbacks(num_args=2, tree=True)
     def _get_cached_user_device(self, user_id, device_id):
@@ -340,7 +340,7 @@ class DeviceWorkerStore(SQLBaseStore):
             retcol="content",
             desc="_get_cached_user_device",
         )
-        defer.returnValue(db_to_json(content))
+        return db_to_json(content)
 
     @cachedInlineCallbacks()
     def _get_cached_devices_for_user(self, user_id):
@@ -350,9 +350,9 @@ class DeviceWorkerStore(SQLBaseStore):
             retcols=("device_id", "content"),
             desc="_get_cached_devices_for_user",
         )
-        defer.returnValue(
-            {device["device_id"]: db_to_json(device["content"]) for device in devices}
-        )
+        return {
+            device["device_id"]: db_to_json(device["content"]) for device in devices
+        }
 
     def get_devices_with_keys_by_user(self, user_id):
         """Get all devices (with any device keys) for a user
@@ -482,7 +482,7 @@ class DeviceWorkerStore(SQLBaseStore):
         results = {user_id: None for user_id in user_ids}
         results.update({row["user_id"]: row["stream_id"] for row in rows})
 
-        defer.returnValue(results)
+        return results
 
 
 class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
@@ -543,7 +543,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
         """
         key = (user_id, device_id)
         if self.device_id_exists_cache.get(key, None):
-            defer.returnValue(False)
+            return False
 
         try:
             inserted = yield self._simple_insert(
@@ -557,7 +557,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
                 or_ignore=True,
             )
             self.device_id_exists_cache.prefill(key, True)
-            defer.returnValue(inserted)
+            return inserted
         except Exception as e:
             logger.error(
                 "store_device with device_id=%s(%r) user_id=%s(%r)"
@@ -780,7 +780,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
                 hosts,
                 stream_id,
             )
-        defer.returnValue(stream_id)
+        return stream_id
 
     def _add_device_change_txn(self, txn, user_id, device_ids, hosts, stream_id):
         now = self._clock.time_msec()
@@ -889,4 +889,4 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
 
         yield self.runWithConnection(f)
         yield self._end_background_update(DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES)
-        defer.returnValue(1)
+        return 1
diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py
index 201bbd430c..e966a73f3d 100644
--- a/synapse/storage/directory.py
+++ b/synapse/storage/directory.py
@@ -46,7 +46,7 @@ class DirectoryWorkerStore(SQLBaseStore):
         )
 
         if not room_id:
-            defer.returnValue(None)
+            return None
             return
 
         servers = yield self._simple_select_onecol(
@@ -57,10 +57,10 @@ class DirectoryWorkerStore(SQLBaseStore):
         )
 
         if not servers:
-            defer.returnValue(None)
+            return None
             return
 
-        defer.returnValue(RoomAliasMapping(room_id, room_alias.to_string(), servers))
+        return RoomAliasMapping(room_id, room_alias.to_string(), servers)
 
     def get_room_alias_creator(self, room_alias):
         return self._simple_select_one_onecol(
@@ -125,7 +125,7 @@ class DirectoryStore(DirectoryWorkerStore):
             raise SynapseError(
                 409, "Room alias %s already exists" % room_alias.to_string()
             )
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def delete_room_alias(self, room_alias):
@@ -133,7 +133,7 @@ class DirectoryStore(DirectoryWorkerStore):
             "delete_room_alias", self._delete_room_alias_txn, room_alias
         )
 
-        defer.returnValue(room_id)
+        return room_id
 
     def _delete_room_alias_txn(self, txn, room_alias):
         txn.execute(
diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py
index f40ef2ab64..99128f2df7 100644
--- a/synapse/storage/e2e_room_keys.py
+++ b/synapse/storage/e2e_room_keys.py
@@ -61,7 +61,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
 
         row["session_data"] = json.loads(row["session_data"])
 
-        defer.returnValue(row)
+        return row
 
     @defer.inlineCallbacks
     def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key):
@@ -118,7 +118,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
         try:
             version = int(version)
         except ValueError:
-            defer.returnValue({"rooms": {}})
+            return {"rooms": {}}
 
         keyvalues = {"user_id": user_id, "version": version}
         if room_id:
@@ -151,7 +151,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                 "session_data": json.loads(row["session_data"]),
             }
 
-        defer.returnValue(sessions)
+        return sessions
 
     @defer.inlineCallbacks
     def delete_e2e_room_keys(self, user_id, version, room_id=None, session_id=None):
diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py
index 2fabb9e2cb..1e07474e70 100644
--- a/synapse/storage/end_to_end_keys.py
+++ b/synapse/storage/end_to_end_keys.py
@@ -41,7 +41,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
             dict containing "key_json", "device_display_name".
         """
         if not query_list:
-            defer.returnValue({})
+            return {}
 
         results = yield self.runInteraction(
             "get_e2e_device_keys",
@@ -55,7 +55,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
             for device_id, device_info in iteritems(device_keys):
                 device_info["keys"] = db_to_json(device_info.pop("key_json"))
 
-        defer.returnValue(results)
+        return results
 
     def _get_e2e_device_keys_txn(
         self, txn, query_list, include_all_devices=False, include_deleted_devices=False
@@ -130,9 +130,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
             desc="add_e2e_one_time_keys_check",
         )
 
-        defer.returnValue(
-            {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows}
-        )
+        return {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows}
 
     @defer.inlineCallbacks
     def add_e2e_one_time_keys(self, user_id, device_id, time_now, new_keys):
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
index cb4478342f..4f500d893e 100644
--- a/synapse/storage/event_federation.py
+++ b/synapse/storage/event_federation.py
@@ -131,9 +131,9 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         )
 
         if not rows:
-            defer.returnValue(0)
+            return 0
         else:
-            defer.returnValue(max(row["depth"] for row in rows))
+            return max(row["depth"] for row in rows)
 
     def _get_oldest_events_in_room_txn(self, txn, room_id):
         return self._simple_select_onecol_txn(
@@ -169,7 +169,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
             # make sure that we don't completely ignore the older events.
             res = res[0:5] + random.sample(res[5:], 5)
 
-        defer.returnValue(res)
+        return res
 
     def get_latest_event_ids_and_hashes_in_room(self, room_id):
         """
@@ -411,7 +411,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
             limit,
         )
         events = yield self.get_events_as_list(ids)
-        defer.returnValue(events)
+        return events
 
     def _get_missing_events(self, txn, room_id, earliest_events, latest_events, limit):
 
@@ -463,7 +463,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
             desc="get_successor_events",
         )
 
-        defer.returnValue([row["event_id"] for row in rows])
+        return [row["event_id"] for row in rows]
 
 
 class EventFederationStore(EventFederationWorkerStore):
@@ -654,4 +654,4 @@ class EventFederationStore(EventFederationWorkerStore):
         if not result:
             yield self._end_background_update(self.EVENT_AUTH_STATE_ONLY)
 
-        defer.returnValue(batch_size)
+        return batch_size
diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index dcfb67e029..22025effbc 100644
--- a/synapse/storage/event_push_actions.py
+++ b/synapse/storage/event_push_actions.py
@@ -100,7 +100,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             user_id,
             last_read_event_id,
         )
-        defer.returnValue(ret)
+        return ret
 
     def _get_unread_counts_by_receipt_txn(
         self, txn, room_id, user_id, last_read_event_id
@@ -178,7 +178,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             return [r[0] for r in txn]
 
         ret = yield self.runInteraction("get_push_action_users_in_range", f)
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def get_unread_push_actions_for_user_in_range_for_http(
@@ -279,7 +279,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
 
         # Take only up to the limit. We have to stop at the limit because
         # one of the subqueries may have hit the limit.
-        defer.returnValue(notifs[:limit])
+        return notifs[:limit]
 
     @defer.inlineCallbacks
     def get_unread_push_actions_for_user_in_range_for_email(
@@ -380,7 +380,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
         notifs.sort(key=lambda r: -(r["received_ts"] or 0))
 
         # Now return the first `limit`
-        defer.returnValue(notifs[:limit])
+        return notifs[:limit]
 
     def get_if_maybe_push_in_range_for_user(self, user_id, min_stream_ordering):
         """A fast check to see if there might be something to push for the
@@ -477,7 +477,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
                 keyvalues={"event_id": event_id},
                 desc="remove_push_actions_from_staging",
             )
-            defer.returnValue(res)
+            return res
         except Exception:
             # this method is called from an exception handler, so propagating
             # another exception here really isn't helpful - there's nothing
@@ -732,7 +732,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
         push_actions = yield self.runInteraction("get_push_actions_for_user", f)
         for pa in push_actions:
             pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"])
-        defer.returnValue(push_actions)
+        return push_actions
 
     @defer.inlineCallbacks
     def get_time_of_last_push_action_before(self, stream_ordering):
@@ -749,7 +749,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
             return txn.fetchone()
 
         result = yield self.runInteraction("get_time_of_last_push_action_before", f)
-        defer.returnValue(result[0] if result else None)
+        return result[0] if result else None
 
     @defer.inlineCallbacks
     def get_latest_push_action_stream_ordering(self):
@@ -758,7 +758,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
             return txn.fetchone()
 
         result = yield self.runInteraction("get_latest_push_action_stream_ordering", f)
-        defer.returnValue(result[0] or 0)
+        return result[0] or 0
 
     def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id):
         # Sad that we have to blow away the cache for the whole room here
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index b70457bfc6..ac876287fc 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -223,7 +223,7 @@ def _retry_on_integrity_error(func):
         except self.database_engine.module.IntegrityError:
             logger.exception("IntegrityError, retrying.")
             res = yield func(self, *args, delete_existing=True, **kwargs)
-        defer.returnValue(res)
+        return res
 
     return f
 
@@ -309,7 +309,7 @@ class EventsStore(
 
         max_persisted_id = yield self._stream_id_gen.get_current_token()
 
-        defer.returnValue(max_persisted_id)
+        return max_persisted_id
 
     @defer.inlineCallbacks
     @log_function
@@ -334,7 +334,7 @@ class EventsStore(
         yield make_deferred_yieldable(deferred)
 
         max_persisted_id = yield self._stream_id_gen.get_current_token()
-        defer.returnValue((event.internal_metadata.stream_ordering, max_persisted_id))
+        return (event.internal_metadata.stream_ordering, max_persisted_id)
 
     def _maybe_start_persisting(self, room_id):
         @defer.inlineCallbacks
@@ -364,147 +364,161 @@ class EventsStore(
         if not events_and_contexts:
             return
 
-        if backfilled:
-            stream_ordering_manager = self._backfill_id_gen.get_next_mult(
-                len(events_and_contexts)
-            )
-        else:
-            stream_ordering_manager = self._stream_id_gen.get_next_mult(
-                len(events_and_contexts)
-            )
-
-        with stream_ordering_manager as stream_orderings:
-            for (event, context), stream in zip(events_and_contexts, stream_orderings):
-                event.internal_metadata.stream_ordering = stream
-
-            chunks = [
-                events_and_contexts[x : x + 100]
-                for x in range(0, len(events_and_contexts), 100)
-            ]
-
-            for chunk in chunks:
-                # We can't easily parallelize these since different chunks
-                # might contain the same event. :(
+        chunks = [
+            events_and_contexts[x : x + 100]
+            for x in range(0, len(events_and_contexts), 100)
+        ]
 
-                # NB: Assumes that we are only persisting events for one room
-                # at a time.
+        for chunk in chunks:
+            # We can't easily parallelize these since different chunks
+            # might contain the same event. :(
 
-                # map room_id->list[event_ids] giving the new forward
-                # extremities in each room
-                new_forward_extremeties = {}
+            # NB: Assumes that we are only persisting events for one room
+            # at a time.
 
-                # map room_id->(type,state_key)->event_id tracking the full
-                # state in each room after adding these events.
-                # This is simply used to prefill the get_current_state_ids
-                # cache
-                current_state_for_room = {}
+            # map room_id->list[event_ids] giving the new forward
+            # extremities in each room
+            new_forward_extremeties = {}
 
-                # map room_id->(to_delete, to_insert) where to_delete is a list
-                # of type/state keys to remove from current state, and to_insert
-                # is a map (type,key)->event_id giving the state delta in each
-                # room
-                state_delta_for_room = {}
+            # map room_id->(type,state_key)->event_id tracking the full
+            # state in each room after adding these events.
+            # This is simply used to prefill the get_current_state_ids
+            # cache
+            current_state_for_room = {}
 
-                if not backfilled:
-                    with Measure(self._clock, "_calculate_state_and_extrem"):
-                        # Work out the new "current state" for each room.
-                        # We do this by working out what the new extremities are and then
-                        # calculating the state from that.
-                        events_by_room = {}
-                        for event, context in chunk:
-                            events_by_room.setdefault(event.room_id, []).append(
-                                (event, context)
-                            )
+            # map room_id->(to_delete, to_insert) where to_delete is a list
+            # of type/state keys to remove from current state, and to_insert
+            # is a map (type,key)->event_id giving the state delta in each
+            # room
+            state_delta_for_room = {}
 
-                        for room_id, ev_ctx_rm in iteritems(events_by_room):
-                            latest_event_ids = yield self.get_latest_event_ids_in_room(
-                                room_id
-                            )
-                            new_latest_event_ids = yield self._calculate_new_extremities(
-                                room_id, ev_ctx_rm, latest_event_ids
+            if not backfilled:
+                with Measure(self._clock, "_calculate_state_and_extrem"):
+                    # Work out the new "current state" for each room.
+                    # We do this by working out what the new extremities are and then
+                    # calculating the state from that.
+                    events_by_room = {}
+                    for event, context in chunk:
+                        events_by_room.setdefault(event.room_id, []).append(
+                            (event, context)
+                        )
+
+                    for room_id, ev_ctx_rm in iteritems(events_by_room):
+                        latest_event_ids = yield self.get_latest_event_ids_in_room(
+                            room_id
+                        )
+                        new_latest_event_ids = yield self._calculate_new_extremities(
+                            room_id, ev_ctx_rm, latest_event_ids
+                        )
+
+                        latest_event_ids = set(latest_event_ids)
+                        if new_latest_event_ids == latest_event_ids:
+                            # No change in extremities, so no change in state
+                            continue
+
+                        # there should always be at least one forward extremity.
+                        # (except during the initial persistence of the send_join
+                        # results, in which case there will be no existing
+                        # extremities, so we'll `continue` above and skip this bit.)
+                        assert new_latest_event_ids, "No forward extremities left!"
+
+                        new_forward_extremeties[room_id] = new_latest_event_ids
+
+                        len_1 = (
+                            len(latest_event_ids) == 1
+                            and len(new_latest_event_ids) == 1
+                        )
+                        if len_1:
+                            all_single_prev_not_state = all(
+                                len(event.prev_event_ids()) == 1
+                                and not event.is_state()
+                                for event, ctx in ev_ctx_rm
                             )
-
-                            latest_event_ids = set(latest_event_ids)
-                            if new_latest_event_ids == latest_event_ids:
-                                # No change in extremities, so no change in state
+                            # Don't bother calculating state if they're just
+                            # a long chain of single ancestor non-state events.
+                            if all_single_prev_not_state:
                                 continue
 
-                            # there should always be at least one forward extremity.
-                            # (except during the initial persistence of the send_join
-                            # results, in which case there will be no existing
-                            # extremities, so we'll `continue` above and skip this bit.)
-                            assert new_latest_event_ids, "No forward extremities left!"
-
-                            new_forward_extremeties[room_id] = new_latest_event_ids
-
-                            len_1 = (
-                                len(latest_event_ids) == 1
-                                and len(new_latest_event_ids) == 1
+                        state_delta_counter.inc()
+                        if len(new_latest_event_ids) == 1:
+                            state_delta_single_event_counter.inc()
+
+                            # This is a fairly handwavey check to see if we could
+                            # have guessed what the delta would have been when
+                            # processing one of these events.
+                            # What we're interested in is if the latest extremities
+                            # were the same when we created the event as they are
+                            # now. When this server creates a new event (as opposed
+                            # to receiving it over federation) it will use the
+                            # forward extremities as the prev_events, so we can
+                            # guess this by looking at the prev_events and checking
+                            # if they match the current forward extremities.
+                            for ev, _ in ev_ctx_rm:
+                                prev_event_ids = set(ev.prev_event_ids())
+                                if latest_event_ids == prev_event_ids:
+                                    state_delta_reuse_delta_counter.inc()
+                                    break
+
+                        logger.info("Calculating state delta for room %s", room_id)
+                        with Measure(
+                            self._clock, "persist_events.get_new_state_after_events"
+                        ):
+                            res = yield self._get_new_state_after_events(
+                                room_id,
+                                ev_ctx_rm,
+                                latest_event_ids,
+                                new_latest_event_ids,
                             )
-                            if len_1:
-                                all_single_prev_not_state = all(
-                                    len(event.prev_event_ids()) == 1
-                                    and not event.is_state()
-                                    for event, ctx in ev_ctx_rm
-                                )
-                                # Don't bother calculating state if they're just
-                                # a long chain of single ancestor non-state events.
-                                if all_single_prev_not_state:
-                                    continue
-
-                            state_delta_counter.inc()
-                            if len(new_latest_event_ids) == 1:
-                                state_delta_single_event_counter.inc()
-
-                                # This is a fairly handwavey check to see if we could
-                                # have guessed what the delta would have been when
-                                # processing one of these events.
-                                # What we're interested in is if the latest extremities
-                                # were the same when we created the event as they are
-                                # now. When this server creates a new event (as opposed
-                                # to receiving it over federation) it will use the
-                                # forward extremities as the prev_events, so we can
-                                # guess this by looking at the prev_events and checking
-                                # if they match the current forward extremities.
-                                for ev, _ in ev_ctx_rm:
-                                    prev_event_ids = set(ev.prev_event_ids())
-                                    if latest_event_ids == prev_event_ids:
-                                        state_delta_reuse_delta_counter.inc()
-                                        break
-
-                            logger.info("Calculating state delta for room %s", room_id)
+                            current_state, delta_ids = res
+
+                        # If either are not None then there has been a change,
+                        # and we need to work out the delta (or use that
+                        # given)
+                        if delta_ids is not None:
+                            # If there is a delta we know that we've
+                            # only added or replaced state, never
+                            # removed keys entirely.
+                            state_delta_for_room[room_id] = ([], delta_ids)
+                        elif current_state is not None:
                             with Measure(
-                                self._clock, "persist_events.get_new_state_after_events"
+                                self._clock, "persist_events.calculate_state_delta"
                             ):
-                                res = yield self._get_new_state_after_events(
-                                    room_id,
-                                    ev_ctx_rm,
-                                    latest_event_ids,
-                                    new_latest_event_ids,
+                                delta = yield self._calculate_state_delta(
+                                    room_id, current_state
                                 )
-                                current_state, delta_ids = res
-
-                            # If either are not None then there has been a change,
-                            # and we need to work out the delta (or use that
-                            # given)
-                            if delta_ids is not None:
-                                # If there is a delta we know that we've
-                                # only added or replaced state, never
-                                # removed keys entirely.
-                                state_delta_for_room[room_id] = ([], delta_ids)
-                            elif current_state is not None:
-                                with Measure(
-                                    self._clock, "persist_events.calculate_state_delta"
-                                ):
-                                    delta = yield self._calculate_state_delta(
-                                        room_id, current_state
-                                    )
-                                state_delta_for_room[room_id] = delta
-
-                            # If we have the current_state then lets prefill
-                            # the cache with it.
-                            if current_state is not None:
-                                current_state_for_room[room_id] = current_state
+                            state_delta_for_room[room_id] = delta
+
+                        # If we have the current_state then lets prefill
+                        # the cache with it.
+                        if current_state is not None:
+                            current_state_for_room[room_id] = current_state
+
+            # We want to calculate the stream orderings as late as possible, as
+            # we only notify after all events with a lesser stream ordering have
+            # been persisted. I.e. if we spend 10s inside the with block then
+            # that will delay all subsequent events from being notified about.
+            # Hence why we do it down here rather than wrapping the entire
+            # function.
+            #
+            # Its safe to do this after calculating the state deltas etc as we
+            # only need to protect the *persistence* of the events. This is to
+            # ensure that queries of the form "fetch events since X" don't
+            # return events and stream positions after events that are still in
+            # flight, as otherwise subsequent requests "fetch event since Y"
+            # will not return those events.
+            #
+            # Note: Multiple instances of this function cannot be in flight at
+            # the same time for the same room.
+            if backfilled:
+                stream_ordering_manager = self._backfill_id_gen.get_next_mult(
+                    len(chunk)
+                )
+            else:
+                stream_ordering_manager = self._stream_id_gen.get_next_mult(len(chunk))
+
+            with stream_ordering_manager as stream_orderings:
+                for (event, context), stream in zip(chunk, stream_orderings):
+                    event.internal_metadata.stream_ordering = stream
 
                 yield self.runInteraction(
                     "persist_events",
@@ -595,7 +609,7 @@ class EventsStore(
             stale = latest_event_ids & result
             stale_forward_extremities_counter.observe(len(stale))
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _get_events_which_are_prevs(self, event_ids):
@@ -633,7 +647,7 @@ class EventsStore(
                 "_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk
             )
 
-        defer.returnValue(results)
+        return results
 
     @defer.inlineCallbacks
     def _get_prevs_before_rejected(self, event_ids):
@@ -695,7 +709,7 @@ class EventsStore(
                 "_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk
             )
 
-        defer.returnValue(existing_prevs)
+        return existing_prevs
 
     @defer.inlineCallbacks
     def _get_new_state_after_events(
@@ -796,7 +810,7 @@ class EventsStore(
         # If they old and new groups are the same then we don't need to do
         # anything.
         if old_state_groups == new_state_groups:
-            defer.returnValue((None, None))
+            return (None, None)
 
         if len(new_state_groups) == 1 and len(old_state_groups) == 1:
             # If we're going from one state group to another, lets check if
@@ -813,7 +827,7 @@ class EventsStore(
                 # the current state in memory then lets also return that,
                 # but it doesn't matter if we don't.
                 new_state = state_groups_map.get(new_state_group)
-                defer.returnValue((new_state, delta_ids))
+                return (new_state, delta_ids)
 
         # Now that we have calculated new_state_groups we need to get
         # their state IDs so we can resolve to a single state set.
@@ -825,7 +839,7 @@ class EventsStore(
         if len(new_state_groups) == 1:
             # If there is only one state group, then we know what the current
             # state is.
-            defer.returnValue((state_groups_map[new_state_groups.pop()], None))
+            return (state_groups_map[new_state_groups.pop()], None)
 
         # Ok, we need to defer to the state handler to resolve our state sets.
 
@@ -854,7 +868,7 @@ class EventsStore(
             state_res_store=StateResolutionStore(self),
         )
 
-        defer.returnValue((res.state, None))
+        return (res.state, None)
 
     @defer.inlineCallbacks
     def _calculate_state_delta(self, room_id, current_state):
@@ -877,7 +891,7 @@ class EventsStore(
             if ev_id != existing_state.get(key)
         }
 
-        defer.returnValue((to_delete, to_insert))
+        return (to_delete, to_insert)
 
     @log_function
     def _persist_events_txn(
@@ -1564,7 +1578,7 @@ class EventsStore(
             return count
 
         ret = yield self.runInteraction("count_messages", _count_messages)
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def count_daily_sent_messages(self):
@@ -1585,7 +1599,7 @@ class EventsStore(
             return count
 
         ret = yield self.runInteraction("count_daily_sent_messages", _count_messages)
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def count_daily_active_rooms(self):
@@ -1600,7 +1614,7 @@ class EventsStore(
             return count
 
         ret = yield self.runInteraction("count_daily_active_rooms", _count)
-        defer.returnValue(ret)
+        return ret
 
     def get_current_backfill_token(self):
         """The current minimum token that backfilled events have reached"""
@@ -2183,7 +2197,7 @@ class EventsStore(
         """
         to_1, so_1 = yield self._get_event_ordering(event_id1)
         to_2, so_2 = yield self._get_event_ordering(event_id2)
-        defer.returnValue((to_1, so_1) > (to_2, so_2))
+        return (to_1, so_1) > (to_2, so_2)
 
     @cachedInlineCallbacks(max_entries=5000)
     def _get_event_ordering(self, event_id):
@@ -2197,9 +2211,7 @@ class EventsStore(
         if not res:
             raise SynapseError(404, "Could not find event %s" % (event_id,))
 
-        defer.returnValue(
-            (int(res["topological_ordering"]), int(res["stream_ordering"]))
-        )
+        return (int(res["topological_ordering"]), int(res["stream_ordering"]))
 
     def get_all_updated_current_state_deltas(self, from_token, to_token, limit):
         def get_all_updated_current_state_deltas_txn(txn):
diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py
index 1ce21d190c..6587f31e2b 100644
--- a/synapse/storage/events_bg_updates.py
+++ b/synapse/storage/events_bg_updates.py
@@ -135,7 +135,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
         if not result:
             yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME)
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _background_reindex_origin_server_ts(self, progress, batch_size):
@@ -212,7 +212,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
         if not result:
             yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME)
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _cleanup_extremities_bg_update(self, progress, batch_size):
@@ -396,4 +396,4 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
                 "_cleanup_extremities_bg_update_drop_table", _drop_table_txn
             )
 
-        defer.returnValue(num_handled)
+        return num_handled
diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py
index 858fc755a1..c6fa7f82fd 100644
--- a/synapse/storage/events_worker.py
+++ b/synapse/storage/events_worker.py
@@ -29,12 +29,7 @@ from synapse.api.room_versions import EventFormatVersions
 from synapse.events import FrozenEvent, event_type_from_format_version  # noqa: F401
 from synapse.events.snapshot import EventContext  # noqa: F401
 from synapse.events.utils import prune_event
-from synapse.logging.context import (
-    LoggingContext,
-    PreserveLoggingContext,
-    make_deferred_yieldable,
-    run_in_background,
-)
+from synapse.logging.context import LoggingContext, PreserveLoggingContext
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.types import get_domain_from_id
 from synapse.util import batch_iter
@@ -139,8 +134,11 @@ class EventsWorkerStore(SQLBaseStore):
                 If there is a mismatch, behave as per allow_none.
 
         Returns:
-            Deferred : A FrozenEvent.
+            Deferred[EventBase|None]
         """
+        if not isinstance(event_id, str):
+            raise TypeError("Invalid event event_id %r" % (event_id,))
+
         events = yield self.get_events_as_list(
             [event_id],
             check_redacted=check_redacted,
@@ -157,7 +155,7 @@ class EventsWorkerStore(SQLBaseStore):
         if event is None and not allow_none:
             raise NotFoundError("Could not find event %s" % (event_id,))
 
-        defer.returnValue(event)
+        return event
 
     @defer.inlineCallbacks
     def get_events(
@@ -187,7 +185,7 @@ class EventsWorkerStore(SQLBaseStore):
             allow_rejected=allow_rejected,
         )
 
-        defer.returnValue({e.event_id: e for e in events})
+        return {e.event_id: e for e in events}
 
     @defer.inlineCallbacks
     def get_events_as_list(
@@ -217,7 +215,7 @@ class EventsWorkerStore(SQLBaseStore):
         """
 
         if not event_ids:
-            defer.returnValue([])
+            return []
 
         # there may be duplicates so we cast the list to a set
         event_entry_map = yield self._get_events_from_cache_or_db(
@@ -268,6 +266,14 @@ class EventsWorkerStore(SQLBaseStore):
                     )
                     continue
 
+                if original_event.room_id != entry.event.room_id:
+                    logger.info(
+                        "Withholding redaction %s of event %s from a different room",
+                        event_id,
+                        redacted_event_id,
+                    )
+                    continue
+
                 if entry.event.internal_metadata.need_to_check_redaction():
                     original_domain = get_domain_from_id(original_event.sender)
                     redaction_domain = get_domain_from_id(entry.event.sender)
@@ -305,7 +311,7 @@ class EventsWorkerStore(SQLBaseStore):
                         event.unsigned["prev_content"] = prev.content
                         event.unsigned["prev_sender"] = prev.sender
 
-        defer.returnValue(events)
+        return events
 
     @defer.inlineCallbacks
     def _get_events_from_cache_or_db(self, event_ids, allow_rejected=False):
@@ -331,13 +337,12 @@ class EventsWorkerStore(SQLBaseStore):
             log_ctx = LoggingContext.current_context()
             log_ctx.record_event_fetch(len(missing_events_ids))
 
-            # Note that _enqueue_events is also responsible for turning db rows
+            # Note that _get_events_from_db is also responsible for turning db rows
             # into FrozenEvents (via _get_event_from_row), which involves seeing if
             # the events have been redacted, and if so pulling the redaction event out
             # of the database to check it.
             #
-            # _enqueue_events is a bit of a rubbish name but naming is hard.
-            missing_events = yield self._enqueue_events(
+            missing_events = yield self._get_events_from_db(
                 missing_events_ids, allow_rejected=allow_rejected
             )
 
@@ -410,28 +415,28 @@ class EventsWorkerStore(SQLBaseStore):
                 The fetch requests. Each entry consists of a list of event
                 ids to be fetched, and a deferred to be completed once the
                 events have been fetched.
+
+                The deferreds are callbacked with a dictionary mapping from event id
+                to event row. Note that it may well contain additional events that
+                were not part of this request.
         """
         with Measure(self._clock, "_fetch_event_list"):
             try:
-                event_id_lists = list(zip(*event_list))[0]
-                event_ids = [item for sublist in event_id_lists for item in sublist]
+                events_to_fetch = set(
+                    event_id for events, _ in event_list for event_id in events
+                )
 
                 row_dict = self._new_transaction(
-                    conn, "do_fetch", [], [], self._fetch_event_rows, event_ids
+                    conn, "do_fetch", [], [], self._fetch_event_rows, events_to_fetch
                 )
 
                 # We only want to resolve deferreds from the main thread
-                def fire(lst, res):
-                    for ids, d in lst:
-                        if not d.called:
-                            try:
-                                with PreserveLoggingContext():
-                                    d.callback([res[i] for i in ids if i in res])
-                            except Exception:
-                                logger.exception("Failed to callback")
+                def fire():
+                    for _, d in event_list:
+                        d.callback(row_dict)
 
                 with PreserveLoggingContext():
-                    self.hs.get_reactor().callFromThread(fire, event_list, row_dict)
+                    self.hs.get_reactor().callFromThread(fire)
             except Exception as e:
                 logger.exception("do_fetch")
 
@@ -446,13 +451,98 @@ class EventsWorkerStore(SQLBaseStore):
                     self.hs.get_reactor().callFromThread(fire, event_list, e)
 
     @defer.inlineCallbacks
-    def _enqueue_events(self, events, allow_rejected=False):
+    def _get_events_from_db(self, event_ids, allow_rejected=False):
+        """Fetch a bunch of events from the database.
+
+        Returned events will be added to the cache for future lookups.
+
+        Args:
+            event_ids (Iterable[str]): The event_ids of the events to fetch
+            allow_rejected (bool): Whether to include rejected events
+
+        Returns:
+            Deferred[Dict[str, _EventCacheEntry]]:
+                map from event id to result. May return extra events which
+                weren't asked for.
+        """
+        fetched_events = {}
+        events_to_fetch = event_ids
+
+        while events_to_fetch:
+            row_map = yield self._enqueue_events(events_to_fetch)
+
+            # we need to recursively fetch any redactions of those events
+            redaction_ids = set()
+            for event_id in events_to_fetch:
+                row = row_map.get(event_id)
+                fetched_events[event_id] = row
+                if row:
+                    redaction_ids.update(row["redactions"])
+
+            events_to_fetch = redaction_ids.difference(fetched_events.keys())
+            if events_to_fetch:
+                logger.debug("Also fetching redaction events %s", events_to_fetch)
+
+        # build a map from event_id to EventBase
+        event_map = {}
+        for event_id, row in fetched_events.items():
+            if not row:
+                continue
+            assert row["event_id"] == event_id
+
+            rejected_reason = row["rejected_reason"]
+
+            if not allow_rejected and rejected_reason:
+                continue
+
+            d = json.loads(row["json"])
+            internal_metadata = json.loads(row["internal_metadata"])
+
+            format_version = row["format_version"]
+            if format_version is None:
+                # This means that we stored the event before we had the concept
+                # of a event format version, so it must be a V1 event.
+                format_version = EventFormatVersions.V1
+
+            original_ev = event_type_from_format_version(format_version)(
+                event_dict=d,
+                internal_metadata_dict=internal_metadata,
+                rejected_reason=rejected_reason,
+            )
+
+            event_map[event_id] = original_ev
+
+        # finally, we can decide whether each one nededs redacting, and build
+        # the cache entries.
+        result_map = {}
+        for event_id, original_ev in event_map.items():
+            redactions = fetched_events[event_id]["redactions"]
+            redacted_event = self._maybe_redact_event_row(
+                original_ev, redactions, event_map
+            )
+
+            cache_entry = _EventCacheEntry(
+                event=original_ev, redacted_event=redacted_event
+            )
+
+            self._get_event_cache.prefill((event_id,), cache_entry)
+            result_map[event_id] = cache_entry
+
+        return result_map
+
+    @defer.inlineCallbacks
+    def _enqueue_events(self, events):
         """Fetches events from the database using the _event_fetch_list. This
         allows batch and bulk fetching of events - it allows us to fetch events
         without having to create a new transaction for each request for events.
+
+        Args:
+            events (Iterable[str]): events to be fetched.
+
+        Returns:
+            Deferred[Dict[str, Dict]]: map from event id to row data from the database.
+                May contain events that weren't requested.
         """
-        if not events:
-            defer.returnValue({})
 
         events_d = defer.Deferred()
         with self._event_fetch_lock:
@@ -471,32 +561,12 @@ class EventsWorkerStore(SQLBaseStore):
                 "fetch_events", self.runWithConnection, self._do_fetch
             )
 
-        logger.debug("Loading %d events", len(events))
+        logger.debug("Loading %d events: %s", len(events), events)
         with PreserveLoggingContext():
-            rows = yield events_d
-        logger.debug("Loaded %d events (%d rows)", len(events), len(rows))
-
-        if not allow_rejected:
-            rows[:] = [r for r in rows if r["rejected_reason"] is None]
-
-        res = yield make_deferred_yieldable(
-            defer.gatherResults(
-                [
-                    run_in_background(
-                        self._get_event_from_row,
-                        row["internal_metadata"],
-                        row["json"],
-                        row["redactions"],
-                        rejected_reason=row["rejected_reason"],
-                        format_version=row["format_version"],
-                    )
-                    for row in rows
-                ],
-                consumeErrors=True,
-            )
-        )
+            row_map = yield events_d
+        logger.debug("Loaded %d events (%d rows)", len(events), len(row_map))
 
-        defer.returnValue({e.event.event_id: e for e in res if e})
+        return row_map
 
     def _fetch_event_rows(self, txn, event_ids):
         """Fetch event rows from the database
@@ -569,50 +639,7 @@ class EventsWorkerStore(SQLBaseStore):
 
         return event_dict
 
-    @defer.inlineCallbacks
-    def _get_event_from_row(
-        self, internal_metadata, js, redactions, format_version, rejected_reason=None
-    ):
-        """Parse an event row which has been read from the database
-
-        Args:
-            internal_metadata (str): json-encoded internal_metadata column
-            js (str): json-encoded event body from event_json
-            redactions (list[str]): a list of the events which claim to have redacted
-                this event, from the redactions table
-            format_version: (str): the 'format_version' column
-            rejected_reason (str|None): the reason this event was rejected, if any
-
-        Returns:
-            _EventCacheEntry
-        """
-        with Measure(self._clock, "_get_event_from_row"):
-            d = json.loads(js)
-            internal_metadata = json.loads(internal_metadata)
-
-            if format_version is None:
-                # This means that we stored the event before we had the concept
-                # of a event format version, so it must be a V1 event.
-                format_version = EventFormatVersions.V1
-
-            original_ev = event_type_from_format_version(format_version)(
-                event_dict=d,
-                internal_metadata_dict=internal_metadata,
-                rejected_reason=rejected_reason,
-            )
-
-            redacted_event = yield self._maybe_redact_event_row(original_ev, redactions)
-
-            cache_entry = _EventCacheEntry(
-                event=original_ev, redacted_event=redacted_event
-            )
-
-            self._get_event_cache.prefill((original_ev.event_id,), cache_entry)
-
-        defer.returnValue(cache_entry)
-
-    @defer.inlineCallbacks
-    def _maybe_redact_event_row(self, original_ev, redactions):
+    def _maybe_redact_event_row(self, original_ev, redactions, event_map):
         """Given an event object and a list of possible redacting event ids,
         determine whether to honour any of those redactions and if so return a redacted
         event.
@@ -620,6 +647,8 @@ class EventsWorkerStore(SQLBaseStore):
         Args:
              original_ev (EventBase):
              redactions (iterable[str]): list of event ids of potential redaction events
+             event_map (dict[str, EventBase]): other events which have been fetched, in
+                 which we can look up the redaaction events. Map from event id to event.
 
         Returns:
             Deferred[EventBase|None]: if the event should be redacted, a pruned
@@ -629,16 +658,25 @@ class EventsWorkerStore(SQLBaseStore):
             # we choose to ignore redactions of m.room.create events.
             return None
 
-        redaction_map = yield self._get_events_from_cache_or_db(redactions)
-
         for redaction_id in redactions:
-            redaction_entry = redaction_map.get(redaction_id)
-            if not redaction_entry:
+            redaction_event = event_map.get(redaction_id)
+            if not redaction_event or redaction_event.rejected_reason:
                 # we don't have the redaction event, or the redaction event was not
                 # authorized.
+                logger.debug(
+                    "%s was redacted by %s but redaction not found/authed",
+                    original_ev.event_id,
+                    redaction_id,
+                )
                 continue
 
-            redaction_event = redaction_entry.event
+            if redaction_event.room_id != original_ev.room_id:
+                logger.debug(
+                    "%s was redacted by %s but redaction was in a different room!",
+                    original_ev.event_id,
+                    redaction_id,
+                )
+                continue
 
             # Starting in room version v3, some redactions need to be
             # rechecked if we didn't have the redacted event at the
@@ -650,8 +688,15 @@ class EventsWorkerStore(SQLBaseStore):
                     redaction_event.internal_metadata.recheck_redaction = False
                 else:
                     # Senders don't match, so the event isn't actually redacted
+                    logger.debug(
+                        "%s was redacted by %s but the senders don't match",
+                        original_ev.event_id,
+                        redaction_id,
+                    )
                     continue
 
+            logger.debug("Redacting %s due to %s", original_ev.event_id, redaction_id)
+
             # we found a good redaction event. Redact!
             redacted_event = prune_event(original_ev)
             redacted_event.unsigned["redacted_by"] = redaction_id
@@ -679,7 +724,7 @@ class EventsWorkerStore(SQLBaseStore):
             desc="have_events_in_timeline",
         )
 
-        defer.returnValue(set(r["event_id"] for r in rows))
+        return set(r["event_id"] for r in rows)
 
     @defer.inlineCallbacks
     def have_seen_events(self, event_ids):
@@ -705,7 +750,7 @@ class EventsWorkerStore(SQLBaseStore):
         input_iterator = iter(event_ids)
         for chunk in iter(lambda: list(itertools.islice(input_iterator, 100)), []):
             yield self.runInteraction("have_seen_events", have_seen_events_txn, chunk)
-        defer.returnValue(results)
+        return results
 
     def get_seen_events_with_rejections(self, event_ids):
         """Given a list of event ids, check if we rejected them.
@@ -816,4 +861,4 @@ class EventsWorkerStore(SQLBaseStore):
         # it.
         complexity_v1 = round(state_events / 500, 2)
 
-        defer.returnValue({"v1": complexity_v1})
+        return {"v1": complexity_v1}
diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py
index b195dc66a0..23b48f6cea 100644
--- a/synapse/storage/filtering.py
+++ b/synapse/storage/filtering.py
@@ -15,8 +15,6 @@
 
 from canonicaljson import encode_canonical_json
 
-from twisted.internet import defer
-
 from synapse.api.errors import Codes, SynapseError
 from synapse.util.caches.descriptors import cachedInlineCallbacks
 
@@ -41,7 +39,7 @@ class FilteringStore(SQLBaseStore):
             desc="get_user_filter",
         )
 
-        defer.returnValue(db_to_json(def_json))
+        return db_to_json(def_json)
 
     def add_user_filter(self, user_localpart, user_filter):
         def_json = encode_canonical_json(user_filter)
diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py
index 73e6fc6de2..15b01c6958 100644
--- a/synapse/storage/group_server.py
+++ b/synapse/storage/group_server.py
@@ -307,15 +307,13 @@ class GroupServerStore(SQLBaseStore):
             desc="get_group_categories",
         )
 
-        defer.returnValue(
-            {
-                row["category_id"]: {
-                    "is_public": row["is_public"],
-                    "profile": json.loads(row["profile"]),
-                }
-                for row in rows
+        return {
+            row["category_id"]: {
+                "is_public": row["is_public"],
+                "profile": json.loads(row["profile"]),
             }
-        )
+            for row in rows
+        }
 
     @defer.inlineCallbacks
     def get_group_category(self, group_id, category_id):
@@ -328,7 +326,7 @@ class GroupServerStore(SQLBaseStore):
 
         category["profile"] = json.loads(category["profile"])
 
-        defer.returnValue(category)
+        return category
 
     def upsert_group_category(self, group_id, category_id, profile, is_public):
         """Add/update room category for group
@@ -370,15 +368,13 @@ class GroupServerStore(SQLBaseStore):
             desc="get_group_roles",
         )
 
-        defer.returnValue(
-            {
-                row["role_id"]: {
-                    "is_public": row["is_public"],
-                    "profile": json.loads(row["profile"]),
-                }
-                for row in rows
+        return {
+            row["role_id"]: {
+                "is_public": row["is_public"],
+                "profile": json.loads(row["profile"]),
             }
-        )
+            for row in rows
+        }
 
     @defer.inlineCallbacks
     def get_group_role(self, group_id, role_id):
@@ -391,7 +387,7 @@ class GroupServerStore(SQLBaseStore):
 
         role["profile"] = json.loads(role["profile"])
 
-        defer.returnValue(role)
+        return role
 
     def upsert_group_role(self, group_id, role_id, profile, is_public):
         """Add/remove user role
@@ -960,7 +956,7 @@ class GroupServerStore(SQLBaseStore):
                 _register_user_group_membership_txn,
                 next_id,
             )
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def create_group(
@@ -1057,9 +1053,9 @@ class GroupServerStore(SQLBaseStore):
 
         now = int(self._clock.time_msec())
         if row and now < row["valid_until_ms"]:
-            defer.returnValue(json.loads(row["attestation_json"]))
+            return json.loads(row["attestation_json"])
 
-        defer.returnValue(None)
+        return None
 
     def get_joined_groups(self, user_id):
         return self._simple_select_onecol(
diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py
index 081564360f..752e9788a2 100644
--- a/synapse/storage/monthly_active_users.py
+++ b/synapse/storage/monthly_active_users.py
@@ -173,7 +173,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
             )
             if user_id:
                 count = count + 1
-        defer.returnValue(count)
+        return count
 
     @defer.inlineCallbacks
     def upsert_monthly_active_user(self, user_id):
diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py
index 42ec8c6bb8..1a0f2d5768 100644
--- a/synapse/storage/presence.py
+++ b/synapse/storage/presence.py
@@ -90,9 +90,7 @@ class PresenceStore(SQLBaseStore):
                 presence_states,
             )
 
-        defer.returnValue(
-            (stream_orderings[-1], self._presence_id_gen.get_current_token())
-        )
+        return (stream_orderings[-1], self._presence_id_gen.get_current_token())
 
     def _update_presence_txn(self, txn, stream_orderings, presence_states):
         for stream_id, state in zip(stream_orderings, presence_states):
@@ -180,7 +178,7 @@ class PresenceStore(SQLBaseStore):
         for row in rows:
             row["currently_active"] = bool(row["currently_active"])
 
-        defer.returnValue({row["user_id"]: UserPresenceState(**row) for row in rows})
+        return {row["user_id"]: UserPresenceState(**row) for row in rows}
 
     def get_current_presence_token(self):
         return self._presence_id_gen.get_current_token()
diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py
index 0ff392bdb4..8a5d8e9b18 100644
--- a/synapse/storage/profile.py
+++ b/synapse/storage/profile.py
@@ -34,15 +34,13 @@ class ProfileWorkerStore(SQLBaseStore):
         except StoreError as e:
             if e.code == 404:
                 # no match
-                defer.returnValue(ProfileInfo(None, None))
+                return ProfileInfo(None, None)
                 return
             else:
                 raise
 
-        defer.returnValue(
-            ProfileInfo(
-                avatar_url=profile["avatar_url"], display_name=profile["displayname"]
-            )
+        return ProfileInfo(
+            avatar_url=profile["avatar_url"], display_name=profile["displayname"]
         )
 
     def get_profile_displayname(self, user_localpart):
@@ -168,7 +166,7 @@ class ProfileStore(ProfileWorkerStore):
         )
 
         if res:
-            defer.returnValue(True)
+            return True
 
         res = yield self._simple_select_one_onecol(
             table="group_invites",
@@ -179,4 +177,4 @@ class ProfileStore(ProfileWorkerStore):
         )
 
         if res:
-            defer.returnValue(True)
+            return True
diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py
index 98cec8c82b..a6517c4cf3 100644
--- a/synapse/storage/push_rule.py
+++ b/synapse/storage/push_rule.py
@@ -120,7 +120,7 @@ class PushRulesWorkerStore(
 
         rules = _load_rules(rows, enabled_map)
 
-        defer.returnValue(rules)
+        return rules
 
     @cachedInlineCallbacks(max_entries=5000)
     def get_push_rules_enabled_for_user(self, user_id):
@@ -130,9 +130,7 @@ class PushRulesWorkerStore(
             retcols=("user_name", "rule_id", "enabled"),
             desc="get_push_rules_enabled_for_user",
         )
-        defer.returnValue(
-            {r["rule_id"]: False if r["enabled"] == 0 else True for r in results}
-        )
+        return {r["rule_id"]: False if r["enabled"] == 0 else True for r in results}
 
     def have_push_rules_changed_for_user(self, user_id, last_id):
         if not self.push_rules_stream_cache.has_entity_changed(user_id, last_id):
@@ -160,7 +158,7 @@ class PushRulesWorkerStore(
     )
     def bulk_get_push_rules(self, user_ids):
         if not user_ids:
-            defer.returnValue({})
+            return {}
 
         results = {user_id: [] for user_id in user_ids}
 
@@ -182,7 +180,7 @@ class PushRulesWorkerStore(
         for user_id, rules in results.items():
             results[user_id] = _load_rules(rules, enabled_map_by_user.get(user_id, {}))
 
-        defer.returnValue(results)
+        return results
 
     @defer.inlineCallbacks
     def move_push_rule_from_room_to_room(self, new_room_id, user_id, rule):
@@ -253,7 +251,7 @@ class PushRulesWorkerStore(
         result = yield self._bulk_get_push_rules_for_room(
             event.room_id, state_group, current_state_ids, event=event
         )
-        defer.returnValue(result)
+        return result
 
     @cachedInlineCallbacks(num_args=2, cache_context=True)
     def _bulk_get_push_rules_for_room(
@@ -312,7 +310,7 @@ class PushRulesWorkerStore(
 
         rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None}
 
-        defer.returnValue(rules_by_user)
+        return rules_by_user
 
     @cachedList(
         cached_method_name="get_push_rules_enabled_for_user",
@@ -322,7 +320,7 @@ class PushRulesWorkerStore(
     )
     def bulk_get_push_rules_enabled(self, user_ids):
         if not user_ids:
-            defer.returnValue({})
+            return {}
 
         results = {user_id: {} for user_id in user_ids}
 
@@ -336,7 +334,7 @@ class PushRulesWorkerStore(
         for row in rows:
             enabled = bool(row["enabled"])
             results.setdefault(row["user_name"], {})[row["rule_id"]] = enabled
-        defer.returnValue(results)
+        return results
 
 
 class PushRuleStore(PushRulesWorkerStore):
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
index cfe0a94330..b431d24b8a 100644
--- a/synapse/storage/pusher.py
+++ b/synapse/storage/pusher.py
@@ -63,7 +63,7 @@ class PusherWorkerStore(SQLBaseStore):
         ret = yield self._simple_select_one_onecol(
             "pushers", {"user_name": user_id}, "id", allow_none=True
         )
-        defer.returnValue(ret is not None)
+        return ret is not None
 
     def get_pushers_by_app_id_and_pushkey(self, app_id, pushkey):
         return self.get_pushers_by({"app_id": app_id, "pushkey": pushkey})
@@ -95,7 +95,7 @@ class PusherWorkerStore(SQLBaseStore):
             ],
             desc="get_pushers_by",
         )
-        defer.returnValue(self._decode_pushers_rows(ret))
+        return self._decode_pushers_rows(ret)
 
     @defer.inlineCallbacks
     def get_all_pushers(self):
@@ -106,7 +106,7 @@ class PusherWorkerStore(SQLBaseStore):
             return self._decode_pushers_rows(rows)
 
         rows = yield self.runInteraction("get_all_pushers", get_pushers)
-        defer.returnValue(rows)
+        return rows
 
     def get_all_updated_pushers(self, last_id, current_id, limit):
         if last_id == current_id:
@@ -205,7 +205,7 @@ class PusherWorkerStore(SQLBaseStore):
         result = {user_id: False for user_id in user_ids}
         result.update({r["user_name"]: True for r in rows})
 
-        defer.returnValue(result)
+        return result
 
 
 class PusherStore(PusherWorkerStore):
@@ -308,22 +308,36 @@ class PusherStore(PusherWorkerStore):
     def update_pusher_last_stream_ordering_and_success(
         self, app_id, pushkey, user_id, last_stream_ordering, last_success
     ):
-        yield self._simple_update_one(
-            "pushers",
-            {"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
-            {
+        """Update the last stream ordering position we've processed up to for
+        the given pusher.
+
+        Args:
+            app_id (str)
+            pushkey (str)
+            last_stream_ordering (int)
+            last_success (int)
+
+        Returns:
+            Deferred[bool]: True if the pusher still exists; False if it has been deleted.
+        """
+        updated = yield self._simple_update(
+            table="pushers",
+            keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
+            updatevalues={
                 "last_stream_ordering": last_stream_ordering,
                 "last_success": last_success,
             },
             desc="update_pusher_last_stream_ordering_and_success",
         )
 
+        return bool(updated)
+
     @defer.inlineCallbacks
     def update_pusher_failing_since(self, app_id, pushkey, user_id, failing_since):
-        yield self._simple_update_one(
-            "pushers",
-            {"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
-            {"failing_since": failing_since},
+        yield self._simple_update(
+            table="pushers",
+            keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
+            updatevalues={"failing_since": failing_since},
             desc="update_pusher_failing_since",
         )
 
@@ -343,7 +357,7 @@ class PusherStore(PusherWorkerStore):
                 "throttle_ms": row["throttle_ms"],
             }
 
-        defer.returnValue(params_by_room)
+        return params_by_room
 
     @defer.inlineCallbacks
     def set_throttle_params(self, pusher_id, room_id, params):
diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py
index b477da12b1..6aa6d98ebb 100644
--- a/synapse/storage/receipts.py
+++ b/synapse/storage/receipts.py
@@ -58,7 +58,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
     @cachedInlineCallbacks()
     def get_users_with_read_receipts_in_room(self, room_id):
         receipts = yield self.get_receipts_for_room(room_id, "m.read")
-        defer.returnValue(set(r["user_id"] for r in receipts))
+        return set(r["user_id"] for r in receipts)
 
     @cached(num_args=2)
     def get_receipts_for_room(self, room_id, receipt_type):
@@ -92,7 +92,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
             desc="get_receipts_for_user",
         )
 
-        defer.returnValue({row["room_id"]: row["event_id"] for row in rows})
+        return {row["room_id"]: row["event_id"] for row in rows}
 
     @defer.inlineCallbacks
     def get_receipts_for_user_with_orderings(self, user_id, receipt_type):
@@ -110,16 +110,14 @@ class ReceiptsWorkerStore(SQLBaseStore):
             return txn.fetchall()
 
         rows = yield self.runInteraction("get_receipts_for_user_with_orderings", f)
-        defer.returnValue(
-            {
-                row[0]: {
-                    "event_id": row[1],
-                    "topological_ordering": row[2],
-                    "stream_ordering": row[3],
-                }
-                for row in rows
+        return {
+            row[0]: {
+                "event_id": row[1],
+                "topological_ordering": row[2],
+                "stream_ordering": row[3],
             }
-        )
+            for row in rows
+        }
 
     @defer.inlineCallbacks
     def get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None):
@@ -147,7 +145,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
             room_ids, to_key, from_key=from_key
         )
 
-        defer.returnValue([ev for res in results.values() for ev in res])
+        return [ev for res in results.values() for ev in res]
 
     def get_linearized_receipts_for_room(self, room_id, to_key, from_key=None):
         """Get receipts for a single room for sending to clients.
@@ -197,7 +195,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
         rows = yield self.runInteraction("get_linearized_receipts_for_room", f)
 
         if not rows:
-            defer.returnValue([])
+            return []
 
         content = {}
         for row in rows:
@@ -205,9 +203,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
                 row["user_id"]
             ] = json.loads(row["data"])
 
-        defer.returnValue(
-            [{"type": "m.receipt", "room_id": room_id, "content": content}]
-        )
+        return [{"type": "m.receipt", "room_id": room_id, "content": content}]
 
     @cachedList(
         cached_method_name="_get_linearized_receipts_for_room",
@@ -217,7 +213,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
     )
     def _get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None):
         if not room_ids:
-            defer.returnValue({})
+            return {}
 
         def f(txn):
             if from_key:
@@ -264,7 +260,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
             room_id: [results[room_id]] if room_id in results else []
             for room_id in room_ids
         }
-        defer.returnValue(results)
+        return results
 
     def get_all_updated_receipts(self, last_id, current_id, limit=None):
         if last_id == current_id:
@@ -468,7 +464,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
             )
 
         if event_ts is None:
-            defer.returnValue(None)
+            return None
 
         now = self._clock.time_msec()
         logger.debug(
@@ -482,7 +478,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
 
         max_persisted_id = self._receipts_id_gen.get_current_token()
 
-        defer.returnValue((stream_id, max_persisted_id))
+        return (stream_id, max_persisted_id)
 
     def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids, data):
         return self.runInteraction(
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 596b272f7b..d8e1864a08 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -75,12 +75,12 @@ class RegistrationWorkerStore(SQLBaseStore):
 
         info = yield self.get_user_by_id(user_id)
         if not info:
-            defer.returnValue(False)
+            return False
 
         now = self.clock.time_msec()
         trial_duration_ms = self.config.mau_trial_days * 24 * 60 * 60 * 1000
         is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms
-        defer.returnValue(is_trial)
+        return is_trial
 
     @cached()
     def get_user_by_access_token(self, token):
@@ -115,7 +115,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             allow_none=True,
             desc="get_expiration_ts_for_user",
         )
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def set_account_validity_for_user(
@@ -190,7 +190,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             desc="get_user_from_renewal_token",
         )
 
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def get_renewal_token_for_user(self, user_id):
@@ -209,7 +209,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             desc="get_renewal_token_for_user",
         )
 
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def get_users_expiring_soon(self):
@@ -237,7 +237,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             self.config.account_validity.renew_at,
         )
 
-        defer.returnValue(res)
+        return res
 
     @defer.inlineCallbacks
     def set_renewal_mail_status(self, user_id, email_sent):
@@ -280,7 +280,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             desc="is_server_admin",
         )
 
-        defer.returnValue(res if res else False)
+        return res if res else False
 
     def _query_for_auth(self, txn, token):
         sql = (
@@ -311,7 +311,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         res = yield self.runInteraction(
             "is_support_user", self.is_support_user_txn, user_id
         )
-        defer.returnValue(res)
+        return res
 
     def is_support_user_txn(self, txn, user_id):
         res = self._simple_select_one_onecol_txn(
@@ -349,7 +349,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             return 0
 
         ret = yield self.runInteraction("count_users", _count_users)
-        defer.returnValue(ret)
+        return ret
 
     def count_daily_user_type(self):
         """
@@ -395,7 +395,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             return count
 
         ret = yield self.runInteraction("count_users", _count_users)
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def find_next_generated_user_id_localpart(self):
@@ -425,7 +425,7 @@ class RegistrationWorkerStore(SQLBaseStore):
                 if i not in found:
                     return i
 
-        defer.returnValue(
+        return (
             (
                 yield self.runInteraction(
                     "find_next_generated_user_id", _find_next_generated_user_id
@@ -447,7 +447,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         user_id = yield self.runInteraction(
             "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, medium, address
         )
-        defer.returnValue(user_id)
+        return user_id
 
     def get_user_id_by_threepid_txn(self, txn, medium, address):
         """Returns user id from threepid
@@ -487,7 +487,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             ["medium", "address", "validated_at", "added_at"],
             "user_get_threepids",
         )
-        defer.returnValue(ret)
+        return ret
 
     def user_delete_threepid(self, user_id, medium, address):
         return self._simple_delete(
@@ -569,6 +569,27 @@ class RegistrationWorkerStore(SQLBaseStore):
             desc="get_id_servers_user_bound",
         )
 
+    @cachedInlineCallbacks()
+    def get_user_deactivated_status(self, user_id):
+        """Retrieve the value for the `deactivated` property for the provided user.
+
+        Args:
+            user_id (str): The ID of the user to retrieve the status for.
+
+        Returns:
+            defer.Deferred(bool): The requested value.
+        """
+
+        res = yield self._simple_select_one_onecol(
+            table="users",
+            keyvalues={"name": user_id},
+            retcol="deactivated",
+            desc="get_user_deactivated_status",
+        )
+
+        # Convert the integer into a boolean.
+        return res == 1
+
 
 class RegistrationStore(
     RegistrationWorkerStore, background_updates.BackgroundUpdateStore
@@ -677,7 +698,7 @@ class RegistrationStore(
         if end:
             yield self._end_background_update("users_set_deactivated_flag")
 
-        defer.returnValue(batch_size)
+        return batch_size
 
     @defer.inlineCallbacks
     def add_access_token_to_user(self, user_id, token, device_id, valid_until_ms):
@@ -968,7 +989,7 @@ class RegistrationStore(
             desc="is_guest",
         )
 
-        defer.returnValue(res if res else False)
+        return res if res else False
 
     def add_user_pending_deactivation(self, user_id):
         """
@@ -1035,7 +1056,7 @@ class RegistrationStore(
 
         yield self._end_background_update("user_threepids_grandfather")
 
-        defer.returnValue(1)
+        return 1
 
     def get_threepid_validation_session(
         self, medium, client_secret, address=None, sid=None, validated=True
@@ -1328,24 +1349,3 @@ class RegistrationStore(
             user_id,
             deactivated,
         )
-
-    @cachedInlineCallbacks()
-    def get_user_deactivated_status(self, user_id):
-        """Retrieve the value for the `deactivated` property for the provided user.
-
-        Args:
-            user_id (str): The ID of the user to retrieve the status for.
-
-        Returns:
-            defer.Deferred(bool): The requested value.
-        """
-
-        res = yield self._simple_select_one_onecol(
-            table="users",
-            keyvalues={"name": user_id},
-            retcol="deactivated",
-            desc="get_user_deactivated_status",
-        )
-
-        # Convert the integer into a boolean.
-        defer.returnValue(res == 1)
diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py
index 9954bc094f..fcb5f2f23a 100644
--- a/synapse/storage/relations.py
+++ b/synapse/storage/relations.py
@@ -17,8 +17,6 @@ import logging
 
 import attr
 
-from twisted.internet import defer
-
 from synapse.api.constants import RelationTypes
 from synapse.api.errors import SynapseError
 from synapse.storage._base import SQLBaseStore
@@ -363,7 +361,7 @@ class RelationsWorkerStore(SQLBaseStore):
             return
 
         edit_event = yield self.get_event(edit_id, allow_none=True)
-        defer.returnValue(edit_event)
+        return edit_event
 
     def has_user_annotated_event(self, parent_id, event_type, aggregation_key, sender):
         """Check if a user has already annotated an event with the same key
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index fe9d79d792..bc606292b8 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -193,14 +193,12 @@ class RoomWorkerStore(SQLBaseStore):
         )
 
         if row:
-            defer.returnValue(
-                RatelimitOverride(
-                    messages_per_second=row["messages_per_second"],
-                    burst_count=row["burst_count"],
-                )
+            return RatelimitOverride(
+                messages_per_second=row["messages_per_second"],
+                burst_count=row["burst_count"],
             )
         else:
-            defer.returnValue(None)
+            return None
 
 
 class RoomStore(RoomWorkerStore, SearchStore):
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 257bcdb2f8..eecb276465 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -108,7 +108,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             room_id, on_invalidate=cache_context.invalidate
         )
         hosts = frozenset(get_domain_from_id(user_id) for user_id in user_ids)
-        defer.returnValue(hosts)
+        return hosts
 
     @cached(max_entries=100000, iterable=True)
     def get_users_in_room(self, room_id):
@@ -156,9 +156,12 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             # then we can avoid a join, which is a Very Good Thing given how
             # frequently this function gets called.
             if self._current_state_events_membership_up_to_date:
+                # Note, rejected events will have a null membership field, so
+                # we we manually filter them out.
                 sql = """
                     SELECT count(*), membership FROM current_state_events
                     WHERE type = 'm.room.member' AND room_id = ?
+                        AND membership IS NOT NULL
                     GROUP BY membership
                 """
             else:
@@ -179,19 +182,30 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
             # we order by membership and then fairly arbitrarily by event_id so
             # heroes are consistent
-            sql = """
-                SELECT m.user_id, m.membership, m.event_id
-                FROM room_memberships as m
-                 INNER JOIN current_state_events as c
-                 ON m.event_id = c.event_id
-                 AND m.room_id = c.room_id
-                 AND m.user_id = c.state_key
-                 WHERE c.type = 'm.room.member' AND c.room_id = ?
-                 ORDER BY
-                    CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
-                    m.event_id ASC
-                 LIMIT ?
-            """
+            if self._current_state_events_membership_up_to_date:
+                # Note, rejected events will have a null membership field, so
+                # we we manually filter them out.
+                sql = """
+                    SELECT state_key, membership, event_id
+                    FROM current_state_events
+                    WHERE type = 'm.room.member' AND room_id = ?
+                        AND membership IS NOT NULL
+                    ORDER BY
+                        CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
+                        event_id ASC
+                    LIMIT ?
+                """
+            else:
+                sql = """
+                    SELECT c.state_key, m.membership, c.event_id
+                    FROM room_memberships as m
+                    INNER JOIN current_state_events as c USING (room_id, event_id)
+                    WHERE c.type = 'm.room.member' AND c.room_id = ?
+                    ORDER BY
+                        CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
+                        c.event_id ASC
+                    LIMIT ?
+                """
 
             # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user.
             txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6))
@@ -253,31 +267,38 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         invites = yield self.get_invited_rooms_for_user(user_id)
         for invite in invites:
             if invite.room_id == room_id:
-                defer.returnValue(invite)
-        defer.returnValue(None)
+                return invite
+        return None
 
+    @defer.inlineCallbacks
     def get_rooms_for_user_where_membership_is(self, user_id, membership_list):
         """ Get all the rooms for this user where the membership for this user
         matches one in the membership list.
 
+        Filters out forgotten rooms.
+
         Args:
             user_id (str): The user ID.
             membership_list (list): A list of synapse.api.constants.Membership
             values which the user must be in.
+
         Returns:
-            A list of dictionary objects, with room_id, membership and sender
-            defined.
+            Deferred[list[RoomsForUser]]
         """
         if not membership_list:
             return defer.succeed(None)
 
-        return self.runInteraction(
+        rooms = yield self.runInteraction(
             "get_rooms_for_user_where_membership_is",
             self._get_rooms_for_user_where_membership_is_txn,
             user_id,
             membership_list,
         )
 
+        # Now we filter out forgotten rooms
+        forgotten_rooms = yield self.get_forgotten_rooms_for_user(user_id)
+        return [room for room in rooms if room.room_id not in forgotten_rooms]
+
     def _get_rooms_for_user_where_membership_is_txn(
         self, txn, user_id, membership_list
     ):
@@ -287,26 +308,33 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
         results = []
         if membership_list:
-            where_clause = "user_id = ? AND (%s) AND forgotten = 0" % (
-                " OR ".join(["m.membership = ?" for _ in membership_list]),
-            )
-
-            args = [user_id]
-            args.extend(membership_list)
+            if self._current_state_events_membership_up_to_date:
+                sql = """
+                    SELECT room_id, e.sender, c.membership, event_id, e.stream_ordering
+                    FROM current_state_events AS c
+                    INNER JOIN events AS e USING (room_id, event_id)
+                    WHERE
+                        c.type = 'm.room.member'
+                        AND state_key = ?
+                        AND c.membership IN (%s)
+                """ % (
+                    ",".join("?" * len(membership_list))
+                )
+            else:
+                sql = """
+                    SELECT room_id, e.sender, m.membership, event_id, e.stream_ordering
+                    FROM current_state_events AS c
+                    INNER JOIN room_memberships AS m USING (room_id, event_id)
+                    INNER JOIN events AS e USING (room_id, event_id)
+                    WHERE
+                        c.type = 'm.room.member'
+                        AND state_key = ?
+                        AND m.membership IN (%s)
+                """ % (
+                    ",".join("?" * len(membership_list))
+                )
 
-            sql = (
-                "SELECT m.room_id, m.sender, m.membership, m.event_id, e.stream_ordering"
-                " FROM current_state_events as c"
-                " INNER JOIN room_memberships as m"
-                " ON m.event_id = c.event_id"
-                " INNER JOIN events as e"
-                " ON e.event_id = c.event_id"
-                " AND m.room_id = c.room_id"
-                " AND m.user_id = c.state_key"
-                " WHERE c.type = 'm.room.member' AND %s"
-            ) % (where_clause,)
-
-            txn.execute(sql, args)
+            txn.execute(sql, (user_id, *membership_list))
             results = [RoomsForUser(**r) for r in self.cursor_to_dict(txn)]
 
         if do_invite:
@@ -347,11 +375,9 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         rooms = yield self.get_rooms_for_user_where_membership_is(
             user_id, membership_list=[Membership.JOIN]
         )
-        defer.returnValue(
-            frozenset(
-                GetRoomsForUserWithStreamOrdering(r.room_id, r.stream_ordering)
-                for r in rooms
-            )
+        return frozenset(
+            GetRoomsForUserWithStreamOrdering(r.room_id, r.stream_ordering)
+            for r in rooms
         )
 
     @defer.inlineCallbacks
@@ -361,7 +387,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         rooms = yield self.get_rooms_for_user_with_stream_ordering(
             user_id, on_invalidate=on_invalidate
         )
-        defer.returnValue(frozenset(r.room_id for r in rooms))
+        return frozenset(r.room_id for r in rooms)
 
     @cachedInlineCallbacks(max_entries=500000, cache_context=True, iterable=True)
     def get_users_who_share_room_with_user(self, user_id, cache_context):
@@ -378,7 +404,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             )
             user_who_share_room.update(user_ids)
 
-        defer.returnValue(user_who_share_room)
+        return user_who_share_room
 
     @defer.inlineCallbacks
     def get_joined_users_from_context(self, event, context):
@@ -394,7 +420,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         result = yield self._get_joined_users_from_context(
             event.room_id, state_group, current_state_ids, event=event, context=context
         )
-        defer.returnValue(result)
+        return result
 
     def get_joined_users_from_state(self, room_id, state_entry):
         state_group = state_entry.state_group
@@ -508,7 +534,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
                         avatar_url=to_ascii(event.content.get("avatar_url", None)),
                     )
 
-        defer.returnValue(users_in_room)
+        return users_in_room
 
     @cachedInlineCallbacks(max_entries=10000)
     def is_host_joined(self, room_id, host):
@@ -533,14 +559,14 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         rows = yield self._execute("is_host_joined", None, sql, room_id, like_clause)
 
         if not rows:
-            defer.returnValue(False)
+            return False
 
         user_id = rows[0][0]
         if get_domain_from_id(user_id) != host:
             # This can only happen if the host name has something funky in it
             raise Exception("Invalid host name")
 
-        defer.returnValue(True)
+        return True
 
     @cachedInlineCallbacks()
     def was_host_joined(self, room_id, host):
@@ -573,14 +599,14 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         rows = yield self._execute("was_host_joined", None, sql, room_id, like_clause)
 
         if not rows:
-            defer.returnValue(False)
+            return False
 
         user_id = rows[0][0]
         if get_domain_from_id(user_id) != host:
             # This can only happen if the host name has something funky in it
             raise Exception("Invalid host name")
 
-        defer.returnValue(True)
+        return True
 
     def get_joined_hosts(self, room_id, state_entry):
         state_group = state_entry.state_group
@@ -607,7 +633,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         cache = self._get_joined_hosts_cache(room_id)
         joined_hosts = yield cache.get_destinations(state_entry)
 
-        defer.returnValue(joined_hosts)
+        return joined_hosts
 
     @cached(max_entries=10000)
     def _get_joined_hosts_cache(self, room_id):
@@ -637,7 +663,45 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             return rows[0][0]
 
         count = yield self.runInteraction("did_forget_membership", f)
-        defer.returnValue(count == 0)
+        return count == 0
+
+    @cached()
+    def get_forgotten_rooms_for_user(self, user_id):
+        """Gets all rooms the user has forgotten.
+
+        Args:
+            user_id (str)
+
+        Returns:
+            Deferred[set[str]]
+        """
+
+        def _get_forgotten_rooms_for_user_txn(txn):
+            # This is a slightly convoluted query that first looks up all rooms
+            # that the user has forgotten in the past, then rechecks that list
+            # to see if any have subsequently been updated. This is done so that
+            # we can use a partial index on `forgotten = 1` on the assumption
+            # that few users will actually forget many rooms.
+            #
+            # Note that a room is considered "forgotten" if *all* membership
+            # events for that user and room have the forgotten field set (as
+            # when a user forgets a room we update all rows for that user and
+            # room, not just the current one).
+            sql = """
+                SELECT room_id, (
+                    SELECT count(*) FROM room_memberships
+                    WHERE room_id = m.room_id AND user_id = m.user_id AND forgotten = 0
+                ) AS count
+                FROM room_memberships AS m
+                WHERE user_id = ? AND forgotten = 1
+                GROUP BY room_id, user_id;
+            """
+            txn.execute(sql, (user_id,))
+            return set(row[0] for row in txn if row[1] == 0)
+
+        return self.runInteraction(
+            "get_forgotten_rooms_for_user", _get_forgotten_rooms_for_user_txn
+        )
 
     @defer.inlineCallbacks
     def get_rooms_user_has_been_in(self, user_id):
@@ -670,6 +734,13 @@ class RoomMemberStore(RoomMemberWorkerStore):
             _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME,
             self._background_current_state_membership,
         )
+        self.register_background_index_update(
+            "room_membership_forgotten_idx",
+            index_name="room_memberships_user_room_forgotten",
+            table="room_memberships",
+            columns=["user_id", "room_id"],
+            where_clause="forgotten = 1",
+        )
 
     def _store_room_members_txn(self, txn, events, backfilled):
         """Store a room member in the database.
@@ -771,6 +842,9 @@ class RoomMemberStore(RoomMemberWorkerStore):
             txn.execute(sql, (user_id, room_id))
 
             self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id))
+            self._invalidate_cache_and_stream(
+                txn, self.get_forgotten_rooms_for_user, (user_id,)
+            )
 
         return self.runInteraction("forget_membership", f)
 
@@ -847,53 +921,65 @@ class RoomMemberStore(RoomMemberWorkerStore):
         if not result:
             yield self._end_background_update(_MEMBERSHIP_PROFILE_UPDATE_NAME)
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _background_current_state_membership(self, progress, batch_size):
         """Update the new membership column on current_state_events.
+
+        This works by iterating over all rooms in alphebetical order.
         """
 
-        if "rooms" not in progress:
-            rooms = yield self._simple_select_onecol(
-                table="current_state_events",
-                keyvalues={},
-                retcol="DISTINCT room_id",
-                desc="_background_current_state_membership_get_rooms",
-            )
-            progress["rooms"] = rooms
+        def _background_current_state_membership_txn(txn, last_processed_room):
+            processed = 0
+            while processed < batch_size:
+                txn.execute(
+                    """
+                        SELECT MIN(room_id) FROM current_state_events WHERE room_id > ?
+                    """,
+                    (last_processed_room,),
+                )
+                row = txn.fetchone()
+                if not row or not row[0]:
+                    return processed, True
 
-        rooms = progress["rooms"]
+                next_room, = row
 
-        def _background_current_state_membership_txn(txn):
-            processed = 0
-            while rooms and processed < batch_size:
                 sql = """
-                    UPDATE current_state_events AS c
+                    UPDATE current_state_events
                     SET membership = (
                         SELECT membership FROM room_memberships
-                        WHERE event_id = c.event_id
+                        WHERE event_id = current_state_events.event_id
                     )
                     WHERE room_id = ?
                 """
-                txn.execute(sql, (rooms.pop(),))
+                txn.execute(sql, (next_room,))
                 processed += txn.rowcount
 
+                last_processed_room = next_room
+
             self._background_update_progress_txn(
-                txn, _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME, progress
+                txn,
+                _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME,
+                {"last_processed_room": last_processed_room},
             )
 
-            return processed
+            return processed, False
 
-        result = yield self.runInteraction(
+        # If we haven't got a last processed room then just use the empty
+        # string, which will compare before all room IDs correctly.
+        last_processed_room = progress.get("last_processed_room", "")
+
+        row_count, finished = yield self.runInteraction(
             "_background_current_state_membership_update",
             _background_current_state_membership_txn,
+            last_processed_room,
         )
 
-        if not rooms:
+        if finished:
             yield self._end_background_update(_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME)
 
-        defer.returnValue(result)
+        return row_count
 
 
 class _JoinedHostsCache(object):
@@ -921,7 +1007,7 @@ class _JoinedHostsCache(object):
             state_entry(synapse.state._StateCacheEntry)
         """
         if state_entry.state_group == self.state_group:
-            defer.returnValue(frozenset(self.hosts_to_joined_users))
+            return frozenset(self.hosts_to_joined_users)
 
         with (yield self.linearizer.queue(())):
             if state_entry.state_group == self.state_group:
@@ -958,7 +1044,7 @@ class _JoinedHostsCache(object):
             else:
                 self.state_group = object()
             self._len = sum(len(v) for v in itervalues(self.hosts_to_joined_users))
-        defer.returnValue(frozenset(self.hosts_to_joined_users))
+        return frozenset(self.hosts_to_joined_users)
 
     def __len__(self):
         return self._len
diff --git a/synapse/storage/schema/delta/56/current_state_events_membership.sql b/synapse/storage/schema/delta/56/current_state_events_membership.sql
index b2e08cd85d..473018676f 100644
--- a/synapse/storage/schema/delta/56/current_state_events_membership.sql
+++ b/synapse/storage/schema/delta/56/current_state_events_membership.sql
@@ -20,6 +20,3 @@
 -- for membership events. (Will also be null for membership events until the
 -- background update job has finished).
 ALTER TABLE current_state_events ADD membership TEXT;
-
-INSERT INTO background_updates (update_name, progress_json) VALUES
-  ('current_state_events_membership', '{}');
diff --git a/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql b/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql
new file mode 100644
index 0000000000..3133d42d4a
--- /dev/null
+++ b/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql
@@ -0,0 +1,24 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- We add membership to current state so that we don't need to join against
+-- room_memberships, which can be surprisingly costly (we do such queries
+-- very frequently).
+-- This will be null for non-membership events and the content.membership key
+-- for membership events. (Will also be null for membership events until the
+-- background update job has finished).
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+  ('current_state_events_membership', '{}');
diff --git a/synapse/storage/schema/delta/56/room_membership_idx.sql b/synapse/storage/schema/delta/56/room_membership_idx.sql
new file mode 100644
index 0000000000..92ab1f5e65
--- /dev/null
+++ b/synapse/storage/schema/delta/56/room_membership_idx.sql
@@ -0,0 +1,18 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Adds an index on room_memberships for fetching all forgotten rooms for a user
+INSERT INTO background_updates (update_name, progress_json) VALUES
+  ('room_membership_forgotten_idx', '{}');
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index f3b1cec933..df87ab6a6d 100644
--- a/synapse/storage/search.py
+++ b/synapse/storage/search.py
@@ -166,7 +166,7 @@ class SearchStore(BackgroundUpdateStore):
         if not result:
             yield self._end_background_update(self.EVENT_SEARCH_UPDATE_NAME)
 
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def _background_reindex_gin_search(self, progress, batch_size):
@@ -209,7 +209,7 @@ class SearchStore(BackgroundUpdateStore):
             yield self.runWithConnection(create_index)
 
         yield self._end_background_update(self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME)
-        defer.returnValue(1)
+        return 1
 
     @defer.inlineCallbacks
     def _background_reindex_search_order(self, progress, batch_size):
@@ -287,7 +287,7 @@ class SearchStore(BackgroundUpdateStore):
         if not finished:
             yield self._end_background_update(self.EVENT_SEARCH_ORDER_UPDATE_NAME)
 
-        defer.returnValue(num_rows)
+        return num_rows
 
     def store_event_search_txn(self, txn, event, key, value):
         """Add event to the search table
@@ -454,17 +454,15 @@ class SearchStore(BackgroundUpdateStore):
 
         count = sum(row["count"] for row in count_results if row["room_id"] in room_ids)
 
-        defer.returnValue(
-            {
-                "results": [
-                    {"event": event_map[r["event_id"]], "rank": r["rank"]}
-                    for r in results
-                    if r["event_id"] in event_map
-                ],
-                "highlights": highlights,
-                "count": count,
-            }
-        )
+        return {
+            "results": [
+                {"event": event_map[r["event_id"]], "rank": r["rank"]}
+                for r in results
+                if r["event_id"] in event_map
+            ],
+            "highlights": highlights,
+            "count": count,
+        }
 
     @defer.inlineCallbacks
     def search_rooms(self, room_ids, search_term, keys, limit, pagination_token=None):
@@ -599,22 +597,20 @@ class SearchStore(BackgroundUpdateStore):
 
         count = sum(row["count"] for row in count_results if row["room_id"] in room_ids)
 
-        defer.returnValue(
-            {
-                "results": [
-                    {
-                        "event": event_map[r["event_id"]],
-                        "rank": r["rank"],
-                        "pagination_token": "%s,%s"
-                        % (r["origin_server_ts"], r["stream_ordering"]),
-                    }
-                    for r in results
-                    if r["event_id"] in event_map
-                ],
-                "highlights": highlights,
-                "count": count,
-            }
-        )
+        return {
+            "results": [
+                {
+                    "event": event_map[r["event_id"]],
+                    "rank": r["rank"],
+                    "pagination_token": "%s,%s"
+                    % (r["origin_server_ts"], r["stream_ordering"]),
+                }
+                for r in results
+                if r["event_id"] in event_map
+            ],
+            "highlights": highlights,
+            "count": count,
+        }
 
     def _find_highlights_in_postgres(self, search_query, events):
         """Given a list of events and a search term, return a list of words
diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py
index 6bd81e84ad..fb83218f90 100644
--- a/synapse/storage/signatures.py
+++ b/synapse/storage/signatures.py
@@ -59,7 +59,7 @@ class SignatureWorkerStore(SQLBaseStore):
             for e_id, h in hashes.items()
         }
 
-        defer.returnValue(list(hashes.items()))
+        return list(hashes.items())
 
     def _get_event_reference_hashes_txn(self, txn, event_id):
         """Get all the hashes for a given PDU.
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index a35289876d..1980a87108 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -422,7 +422,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         # Retrieve the room's create event
         create_event = yield self.get_create_event_for_room(room_id)
-        defer.returnValue(create_event.content.get("room_version", "1"))
+        return create_event.content.get("room_version", "1")
 
     @defer.inlineCallbacks
     def get_room_predecessor(self, room_id):
@@ -442,7 +442,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         create_event = yield self.get_create_event_for_room(room_id)
 
         # Return predecessor if present
-        defer.returnValue(create_event.content.get("predecessor", None))
+        return create_event.content.get("predecessor", None)
 
     @defer.inlineCallbacks
     def get_create_event_for_room(self, room_id):
@@ -466,7 +466,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         # Retrieve the room's create event and return
         create_event = yield self.get_event(create_id)
-        defer.returnValue(create_event)
+        return create_event
 
     @cached(max_entries=100000, iterable=True)
     def get_current_state_ids(self, room_id):
@@ -563,7 +563,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         if not event:
             return
 
-        defer.returnValue(event.content.get("canonical_alias"))
+        return event.content.get("canonical_alias")
 
     @cached(max_entries=10000, iterable=True)
     def get_state_group_delta(self, state_group):
@@ -613,14 +613,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
                 dict of state_group_id -> (dict of (type, state_key) -> event id)
         """
         if not event_ids:
-            defer.returnValue({})
+            return {}
 
         event_to_groups = yield self._get_state_group_for_events(event_ids)
 
         groups = set(itervalues(event_to_groups))
         group_to_state = yield self._get_state_for_groups(groups)
 
-        defer.returnValue(group_to_state)
+        return group_to_state
 
     @defer.inlineCallbacks
     def get_state_ids_for_group(self, state_group):
@@ -634,7 +634,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         """
         group_to_state = yield self._get_state_for_groups((state_group,))
 
-        defer.returnValue(group_to_state[state_group])
+        return group_to_state[state_group]
 
     @defer.inlineCallbacks
     def get_state_groups(self, room_id, event_ids):
@@ -645,7 +645,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
                 dict of state_group_id -> list of state events.
         """
         if not event_ids:
-            defer.returnValue({})
+            return {}
 
         group_to_ids = yield self.get_state_groups_ids(room_id, event_ids)
 
@@ -658,16 +658,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             get_prev_content=False,
         )
 
-        defer.returnValue(
-            {
-                group: [
-                    state_event_map[v]
-                    for v in itervalues(event_id_map)
-                    if v in state_event_map
-                ]
-                for group, event_id_map in iteritems(group_to_ids)
-            }
-        )
+        return {
+            group: [
+                state_event_map[v]
+                for v in itervalues(event_id_map)
+                if v in state_event_map
+            ]
+            for group, event_id_map in iteritems(group_to_ids)
+        }
 
     @defer.inlineCallbacks
     def _get_state_groups_from_groups(self, groups, state_filter):
@@ -694,7 +692,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             )
             results.update(res)
 
-        defer.returnValue(results)
+        return results
 
     def _get_state_groups_from_groups_txn(
         self, txn, groups, state_filter=StateFilter.all()
@@ -829,7 +827,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             for event_id, group in iteritems(event_to_groups)
         }
 
-        defer.returnValue({event: event_to_state[event] for event in event_ids})
+        return {event: event_to_state[event] for event in event_ids}
 
     @defer.inlineCallbacks
     def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()):
@@ -855,7 +853,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             for event_id, group in iteritems(event_to_groups)
         }
 
-        defer.returnValue({event: event_to_state[event] for event in event_ids})
+        return {event: event_to_state[event] for event in event_ids}
 
     @defer.inlineCallbacks
     def get_state_for_event(self, event_id, state_filter=StateFilter.all()):
@@ -871,7 +869,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             A deferred dict from (type, state_key) -> state_event
         """
         state_map = yield self.get_state_for_events([event_id], state_filter)
-        defer.returnValue(state_map[event_id])
+        return state_map[event_id]
 
     @defer.inlineCallbacks
     def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()):
@@ -887,7 +885,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             A deferred dict from (type, state_key) -> state_event
         """
         state_map = yield self.get_state_ids_for_events([event_id], state_filter)
-        defer.returnValue(state_map[event_id])
+        return state_map[event_id]
 
     @cached(max_entries=50000)
     def _get_state_group_for_event(self, event_id):
@@ -917,7 +915,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             desc="_get_state_group_for_events",
         )
 
-        defer.returnValue({row["event_id"]: row["state_group"] for row in rows})
+        return {row["event_id"]: row["state_group"] for row in rows}
 
     def _get_state_for_group_using_cache(self, cache, group, state_filter):
         """Checks if group is in cache. See `_get_state_for_groups`
@@ -997,7 +995,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         incomplete_groups = incomplete_groups_m | incomplete_groups_nm
 
         if not incomplete_groups:
-            defer.returnValue(state)
+            return state
 
         cache_sequence_nm = self._state_group_cache.sequence
         cache_sequence_m = self._state_group_members_cache.sequence
@@ -1024,7 +1022,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             # everything we need from the database anyway.
             state[group] = state_filter.filter_state(group_state_dict)
 
-        defer.returnValue(state)
+        return state
 
     def _get_state_for_groups_using_cache(self, groups, cache, state_filter):
         """Gets the state at each of a list of state groups, optionally
@@ -1498,7 +1496,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
                 self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME
             )
 
-        defer.returnValue(result * BATCH_SIZE_SCALE_FACTOR)
+        return result * BATCH_SIZE_SCALE_FACTOR
 
     @defer.inlineCallbacks
     def _background_index_state(self, progress, batch_size):
@@ -1528,4 +1526,4 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
 
         yield self._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME)
 
-        defer.returnValue(1)
+        return 1
diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py
index 0ad40cf226..b6959f7967 100644
--- a/synapse/storage/stats.py
+++ b/synapse/storage/stats.py
@@ -140,7 +140,7 @@ class StatsStore(StateDeltasStore):
 
         if not self.stats_enabled:
             yield self._end_background_update("populate_stats_prepare")
-            defer.returnValue(1)
+            return 1
 
         def _wedge_incremental_processor(txn):
             """
@@ -218,7 +218,7 @@ class StatsStore(StateDeltasStore):
         self.get_earliest_token_for_stats.invalidate_all()
 
         yield self._end_background_update("populate_stats_prepare")
-        defer.returnValue(1)
+        return 1
 
     @defer.inlineCallbacks
     def _populate_stats_process_users(self, progress, batch_size):
@@ -227,7 +227,7 @@ class StatsStore(StateDeltasStore):
         """
         if not self.stats_enabled:
             yield self._end_background_update("populate_stats_process_users")
-            defer.returnValue(1)
+            return 1
 
         def _get_next_batch(txn):
             # Only fetch 250 users, so we don't fetch too many at once, even
@@ -260,7 +260,7 @@ class StatsStore(StateDeltasStore):
         # No more users -- complete the transaction.
         if not users_to_work_on:
             yield self._end_background_update("populate_stats_process_users")
-            defer.returnValue(1)
+            return 1
 
         logger.info(
             "Processing the next %d users of %d remaining",
@@ -280,7 +280,7 @@ class StatsStore(StateDeltasStore):
                 promised_positions,
             )
             yield self._end_background_update("populate_stats_process_users")
-            defer.returnValue(1)
+            return 1
 
         for (user_id,) in users_to_work_on:
             now = self.hs.get_reactor().seconds()
@@ -348,7 +348,7 @@ class StatsStore(StateDeltasStore):
 
             if processed_membership_count > batch_size:
                 # Don't process any more users, we've hit our batch size.
-                defer.returnValue(processed_membership_count)
+                return processed_membership_count
 
         yield self.runInteraction(
             "populate_stats",
@@ -357,7 +357,7 @@ class StatsStore(StateDeltasStore):
             progress,
         )
 
-        defer.returnValue(processed_membership_count)
+        return processed_membership_count
 
     @defer.inlineCallbacks
     def _populate_stats_process_rooms(self, progress, batch_size):
@@ -366,7 +366,7 @@ class StatsStore(StateDeltasStore):
         """
         if not self.stats_enabled:
             yield self._end_background_update("populate_stats_process_rooms")
-            defer.returnValue(1)
+            return 1
 
         def _get_next_batch(txn):
             # Only fetch 250 rooms, so we don't fetch too many at once, even
@@ -399,7 +399,7 @@ class StatsStore(StateDeltasStore):
         # No more rooms -- complete the transaction.
         if not rooms_to_work_on:
             yield self._end_background_update("populate_stats_process_rooms")
-            defer.returnValue(1)
+            return 1
 
         logger.info(
             "Processing the next %d rooms of %d remaining",
@@ -420,7 +420,7 @@ class StatsStore(StateDeltasStore):
                 promised_positions,
             )
             yield self._end_background_update("populate_stats_process_rooms")
-            defer.returnValue(1)
+            return 1
 
         for (room_id,) in rooms_to_work_on:
             current_state_ids = yield self.get_current_state_ids(room_id)
@@ -435,16 +435,18 @@ class StatsStore(StateDeltasStore):
             avatar_id = current_state_ids.get((EventTypes.RoomAvatar, ""))
             canonical_alias_id = current_state_ids.get((EventTypes.CanonicalAlias, ""))
 
+            event_ids = [
+                join_rules_id,
+                history_visibility_id,
+                encryption_id,
+                name_id,
+                topic_id,
+                avatar_id,
+                canonical_alias_id,
+            ]
+
             state_events = yield self.get_events(
-                [
-                    join_rules_id,
-                    history_visibility_id,
-                    encryption_id,
-                    name_id,
-                    topic_id,
-                    avatar_id,
-                    canonical_alias_id,
-                ]
+                [ev for ev in event_ids if ev is not None]
             )
 
             def _get_or_none(event_id, arg):
@@ -535,7 +537,7 @@ class StatsStore(StateDeltasStore):
 
             if processed_event_count > batch_size:
                 # Don't process any more rooms, we've hit our batch size.
-                defer.returnValue(processed_event_count)
+                return processed_event_count
 
         yield self.runInteraction(
             "populate_stats",
@@ -544,7 +546,7 @@ class StatsStore(StateDeltasStore):
             progress,
         )
 
-        defer.returnValue(processed_event_count)
+        return processed_event_count
 
     def update_total_event_count_between_txn(self, txn, low_pos, high_pos):
         """
@@ -934,7 +936,7 @@ class StatsStore(StateDeltasStore):
                 "stats_collect_old", self._collect_old_txn, stats_type
             )
             if not maybe_more:
-                defer.returnValue(None)
+                return None
 
     @defer.inlineCallbacks
     def update_stats_delta(
@@ -966,7 +968,7 @@ class StatsStore(StateDeltasStore):
                     fields,
                     complete_with_stream_id=complete_with_stream_id,
                 )
-                defer.returnValue(res)
+                return res
             except OldCollectionRequired:
                 # retry after collecting old rows
                 yield self.collect_old(stats_type)
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index a0465484df..856c2ee8d8 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -300,7 +300,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         )
 
         if not room_ids:
-            defer.returnValue({})
+            return {}
 
         results = {}
         room_ids = list(room_ids)
@@ -323,7 +323,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             )
             results.update(dict(zip(rm_ids, res)))
 
-        defer.returnValue(results)
+        return results
 
     def get_rooms_that_changed(self, room_ids, from_key):
         """Given a list of rooms and a token, return rooms where there may have
@@ -364,7 +364,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             the chunk of events returned.
         """
         if from_key == to_key:
-            defer.returnValue(([], from_key))
+            return ([], from_key)
 
         from_id = RoomStreamToken.parse_stream_token(from_key).stream
         to_id = RoomStreamToken.parse_stream_token(to_key).stream
@@ -374,7 +374,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         )
 
         if not has_changed:
-            defer.returnValue(([], from_key))
+            return ([], from_key)
 
         def f(txn):
             sql = (
@@ -407,7 +407,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             # get.
             key = from_key
 
-        defer.returnValue((ret, key))
+        return (ret, key)
 
     @defer.inlineCallbacks
     def get_membership_changes_for_user(self, user_id, from_key, to_key):
@@ -415,14 +415,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         to_id = RoomStreamToken.parse_stream_token(to_key).stream
 
         if from_key == to_key:
-            defer.returnValue([])
+            return []
 
         if from_id:
             has_changed = self._membership_stream_cache.has_entity_changed(
                 user_id, int(from_id)
             )
             if not has_changed:
-                defer.returnValue([])
+                return []
 
         def f(txn):
             sql = (
@@ -447,7 +447,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         self._set_before_and_after(ret, rows, topo_order=False)
 
-        defer.returnValue(ret)
+        return ret
 
     @defer.inlineCallbacks
     def get_recent_events_for_room(self, room_id, limit, end_token):
@@ -477,7 +477,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         self._set_before_and_after(events, rows)
 
-        defer.returnValue((events, token))
+        return (events, token)
 
     @defer.inlineCallbacks
     def get_recent_event_ids_for_room(self, room_id, limit, end_token):
@@ -496,7 +496,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         """
         # Allow a zero limit here, and no-op.
         if limit == 0:
-            defer.returnValue(([], end_token))
+            return ([], end_token)
 
         end_token = RoomStreamToken.parse(end_token)
 
@@ -511,7 +511,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         # We want to return the results in ascending order.
         rows.reverse()
 
-        defer.returnValue((rows, token))
+        return (rows, token)
 
     def get_room_event_after_stream_ordering(self, room_id, stream_ordering):
         """Gets details of the first event in a room at or after a stream ordering
@@ -549,12 +549,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         """
         token = yield self.get_room_max_stream_ordering()
         if room_id is None:
-            defer.returnValue("s%d" % (token,))
+            return "s%d" % (token,)
         else:
             topo = yield self.runInteraction(
                 "_get_max_topological_txn", self._get_max_topological_txn, room_id
             )
-            defer.returnValue("t%d-%d" % (topo, token))
+            return "t%d-%d" % (topo, token)
 
     def get_stream_token_for_event(self, event_id):
         """The stream token for an event
@@ -674,14 +674,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             [e for e in results["after"]["event_ids"]], get_prev_content=True
         )
 
-        defer.returnValue(
-            {
-                "events_before": events_before,
-                "events_after": events_after,
-                "start": results["before"]["token"],
-                "end": results["after"]["token"],
-            }
-        )
+        return {
+            "events_before": events_before,
+            "events_after": events_after,
+            "start": results["before"]["token"],
+            "end": results["after"]["token"],
+        }
 
     def _get_events_around_txn(
         self, txn, room_id, event_id, before_limit, after_limit, event_filter
@@ -785,7 +783,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         events = yield self.get_events_as_list(event_ids)
 
-        defer.returnValue((upper_bound, events))
+        return (upper_bound, events)
 
     def get_federation_out_pos(self, typ):
         return self._simple_select_one_onecol(
@@ -939,7 +937,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         self._set_before_and_after(events, rows)
 
-        defer.returnValue((events, token))
+        return (events, token)
 
 
 class StreamStore(StreamWorkerStore):
diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py
index e88f8ea35f..20dd6bd53d 100644
--- a/synapse/storage/tags.py
+++ b/synapse/storage/tags.py
@@ -66,7 +66,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
             room_id string, tag string and content string.
         """
         if last_id == current_id:
-            defer.returnValue([])
+            return []
 
         def get_all_updated_tags_txn(txn):
             sql = (
@@ -107,7 +107,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
             )
             results.extend(tags)
 
-        defer.returnValue(results)
+        return results
 
     @defer.inlineCallbacks
     def get_updated_tags(self, user_id, stream_id):
@@ -135,7 +135,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
             user_id, int(stream_id)
         )
         if not changed:
-            defer.returnValue({})
+            return {}
 
         room_ids = yield self.runInteraction("get_updated_tags", get_updated_tags_txn)
 
@@ -145,7 +145,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
             for room_id in room_ids:
                 results[room_id] = tags_by_room.get(room_id, {})
 
-        defer.returnValue(results)
+        return results
 
     def get_tags_for_room(self, user_id, room_id):
         """Get all the tags for the given room
@@ -194,7 +194,7 @@ class TagsStore(TagsWorkerStore):
         self.get_tags_for_user.invalidate((user_id,))
 
         result = self._account_data_id_gen.get_current_token()
-        defer.returnValue(result)
+        return result
 
     @defer.inlineCallbacks
     def remove_tag_from_room(self, user_id, room_id, tag):
@@ -217,7 +217,7 @@ class TagsStore(TagsWorkerStore):
         self.get_tags_for_user.invalidate((user_id,))
 
         result = self._account_data_id_gen.get_current_token()
-        defer.returnValue(result)
+        return result
 
     def _update_revision_txn(self, txn, user_id, room_id, next_id):
         """Update the latest revision of the tags for the given user and room.
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index c585cf6cf7..b3c3bf55bc 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -147,7 +147,7 @@ class TransactionStore(SQLBaseStore):
 
         result = self._destination_retry_cache.get(destination, SENTINEL)
         if result is not SENTINEL:
-            defer.returnValue(result)
+            return result
 
         result = yield self.runInteraction(
             "get_destination_retry_timings",
@@ -158,7 +158,7 @@ class TransactionStore(SQLBaseStore):
         # We don't hugely care about race conditions between getting and
         # invalidating the cache, since we time out fairly quickly anyway.
         self._destination_retry_cache[destination] = result
-        defer.returnValue(result)
+        return result
 
     def _get_destination_retry_timings(self, txn, destination):
         result = self._simple_select_one_txn(
diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py
index 7fd16fe65e..b5188d9bee 100644
--- a/synapse/storage/user_directory.py
+++ b/synapse/storage/user_directory.py
@@ -109,7 +109,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
         yield self._simple_insert(TEMP_TABLE + "_position", {"position": new_pos})
 
         yield self._end_background_update("populate_user_directory_createtables")
-        defer.returnValue(1)
+        return 1
 
     @defer.inlineCallbacks
     def _populate_user_directory_cleanup(self, progress, batch_size):
@@ -131,7 +131,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
         )
 
         yield self._end_background_update("populate_user_directory_cleanup")
-        defer.returnValue(1)
+        return 1
 
     @defer.inlineCallbacks
     def _populate_user_directory_process_rooms(self, progress, batch_size):
@@ -177,7 +177,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
         # No more rooms -- complete the transaction.
         if not rooms_to_work_on:
             yield self._end_background_update("populate_user_directory_process_rooms")
-            defer.returnValue(1)
+            return 1
 
         logger.info(
             "Processing the next %d rooms of %d remaining"
@@ -257,9 +257,9 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
 
             if processed_event_count > batch_size:
                 # Don't process any more rooms, we've hit our batch size.
-                defer.returnValue(processed_event_count)
+                return processed_event_count
 
-        defer.returnValue(processed_event_count)
+        return processed_event_count
 
     @defer.inlineCallbacks
     def _populate_user_directory_process_users(self, progress, batch_size):
@@ -268,7 +268,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
         """
         if not self.hs.config.user_directory_search_all_users:
             yield self._end_background_update("populate_user_directory_process_users")
-            defer.returnValue(1)
+            return 1
 
         def _get_next_batch(txn):
             sql = "SELECT user_id FROM %s LIMIT %s" % (
@@ -298,7 +298,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
         # No more users -- complete the transaction.
         if not users_to_work_on:
             yield self._end_background_update("populate_user_directory_process_users")
-            defer.returnValue(1)
+            return 1
 
         logger.info(
             "Processing the next %d users of %d remaining"
@@ -322,7 +322,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
                 progress,
             )
 
-        defer.returnValue(len(users_to_work_on))
+        return len(users_to_work_on)
 
     @defer.inlineCallbacks
     def is_room_world_readable_or_publicly_joinable(self, room_id):
@@ -344,16 +344,16 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
             join_rule_ev = yield self.get_event(join_rules_id, allow_none=True)
             if join_rule_ev:
                 if join_rule_ev.content.get("join_rule") == JoinRules.PUBLIC:
-                    defer.returnValue(True)
+                    return True
 
         hist_vis_id = current_state_ids.get((EventTypes.RoomHistoryVisibility, ""))
         if hist_vis_id:
             hist_vis_ev = yield self.get_event(hist_vis_id, allow_none=True)
             if hist_vis_ev:
                 if hist_vis_ev.content.get("history_visibility") == "world_readable":
-                    defer.returnValue(True)
+                    return True
 
-        defer.returnValue(False)
+        return False
 
     def update_profile_in_user_dir(self, user_id, display_name, avatar_url):
         """
@@ -499,7 +499,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
         user_ids = set(user_ids_share_pub)
         user_ids.update(user_ids_share_priv)
 
-        defer.returnValue(user_ids)
+        return user_ids
 
     def add_users_who_share_private_room(self, room_id, user_id_tuples):
         """Insert entries into the users_who_share_private_rooms table. The first
@@ -609,7 +609,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
 
         users = set(pub_rows)
         users.update(rows)
-        defer.returnValue(list(users))
+        return list(users)
 
     @defer.inlineCallbacks
     def get_rooms_in_common_for_users(self, user_id, other_user_id):
@@ -635,7 +635,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
             "get_rooms_in_common_for_users", None, sql, user_id, other_user_id
         )
 
-        defer.returnValue([room_id for room_id, in rows])
+        return [room_id for room_id, in rows]
 
     def delete_all_from_user_dir(self):
         """Delete the entire user directory
@@ -782,7 +782,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
 
         limited = len(results) > limit
 
-        defer.returnValue({"limited": limited, "results": results})
+        return {"limited": limited, "results": results}
 
 
 def _parse_query_sqlite(search_term):
diff --git a/synapse/storage/user_erasure_store.py b/synapse/storage/user_erasure_store.py
index 1815fdc0dd..05cabc2282 100644
--- a/synapse/storage/user_erasure_store.py
+++ b/synapse/storage/user_erasure_store.py
@@ -12,9 +12,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import operator
 
-from twisted.internet import defer
+import operator
 
 from synapse.storage._base import SQLBaseStore
 from synapse.util.caches.descriptors import cached, cachedList
@@ -67,7 +66,7 @@ class UserErasureWorkerStore(SQLBaseStore):
 
         erased_users = yield self.runInteraction("are_users_erased", _get_erased_users)
         res = dict((u, u in erased_users) for u in user_ids)
-        defer.returnValue(res)
+        return res
 
 
 class UserErasureStore(UserErasureWorkerStore):
diff --git a/synapse/streams/events.py b/synapse/streams/events.py
index 488c49747a..b91fb2db7b 100644
--- a/synapse/streams/events.py
+++ b/synapse/streams/events.py
@@ -56,7 +56,7 @@ class EventSources(object):
             device_list_key=device_list_key,
             groups_key=groups_key,
         )
-        defer.returnValue(token)
+        return token
 
     @defer.inlineCallbacks
     def get_current_token_for_pagination(self):
@@ -80,4 +80,4 @@ class EventSources(object):
             device_list_key=0,
             groups_key=0,
         )
-        defer.returnValue(token)
+        return token
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index f506b2a695..7856353002 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -49,7 +49,7 @@ class Clock(object):
         with context.PreserveLoggingContext():
             self._reactor.callLater(seconds, d.callback, seconds)
             res = yield d
-        defer.returnValue(res)
+        return res
 
     def time(self):
         """Returns the current system time in seconds since epoch."""
@@ -59,7 +59,7 @@ class Clock(object):
         """Returns the current system time in miliseconds since epoch."""
         return int(self.time() * 1000)
 
-    def looping_call(self, f, msec):
+    def looping_call(self, f, msec, *args, **kwargs):
         """Call a function repeatedly.
 
         Waits `msec` initially before calling `f` for the first time.
@@ -70,8 +70,10 @@ class Clock(object):
         Args:
             f(function): The function to call repeatedly.
             msec(float): How long to wait between calls in milliseconds.
+            *args: Postional arguments to pass to function.
+            **kwargs: Key arguments to pass to function.
         """
-        call = task.LoopingCall(f)
+        call = task.LoopingCall(f, *args, **kwargs)
         call.clock = self._reactor
         d = call.start(msec / 1000.0, now=False)
         d.addErrback(log_failure, "Looping call died", consumeErrors=False)
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 58a6b8764f..f1c46836b1 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -366,7 +366,7 @@ class ReadWriteLock(object):
                 new_defer.callback(None)
                 self.key_to_current_readers.get(key, set()).discard(new_defer)
 
-        defer.returnValue(_ctx_manager())
+        return _ctx_manager()
 
     @defer.inlineCallbacks
     def write(self, key):
@@ -396,7 +396,7 @@ class ReadWriteLock(object):
                 if self.key_to_current_writer[key] == new_defer:
                     self.key_to_current_writer.pop(key)
 
-        defer.returnValue(_ctx_manager())
+        return _ctx_manager()
 
 
 def _cancelled_to_timed_out_error(value, timeout):
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
index 8271229015..b50e3503f0 100644
--- a/synapse/util/caches/__init__.py
+++ b/synapse/util/caches/__init__.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -51,7 +52,19 @@ response_cache_evicted = Gauge(
 response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["name"])
 
 
-def register_cache(cache_type, cache_name, cache):
+def register_cache(cache_type, cache_name, cache, collect_callback=None):
+    """Register a cache object for metric collection.
+
+    Args:
+        cache_type (str):
+        cache_name (str): name of the cache
+        cache (object): cache itself
+        collect_callback (callable|None): if not None, a function which is called during
+            metric collection to update additional metrics.
+
+    Returns:
+        CacheMetric: an object which provides inc_{hits,misses,evictions} methods
+    """
 
     # Check if the metric is already registered. Unregister it, if so.
     # This usually happens during tests, as at runtime these caches are
@@ -90,6 +103,8 @@ def register_cache(cache_type, cache_name, cache):
                     cache_hits.labels(cache_name).set(self.hits)
                     cache_evicted.labels(cache_name).set(self.evicted_size)
                     cache_total.labels(cache_name).set(self.hits + self.misses)
+                if collect_callback:
+                    collect_callback()
             except Exception as e:
                 logger.warn("Error calculating metrics for %s: %s", cache_name, e)
                 raise
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 675db2f448..43f66ec4be 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -19,8 +19,9 @@ import logging
 import threading
 from collections import namedtuple
 
-import six
-from six import itervalues, string_types
+from six import itervalues
+
+from prometheus_client import Gauge
 
 from twisted.internet import defer
 
@@ -30,13 +31,18 @@ from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.caches import get_cache_factor_for
 from synapse.util.caches.lrucache import LruCache
 from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
-from synapse.util.stringutils import to_ascii
 
 from . import register_cache
 
 logger = logging.getLogger(__name__)
 
 
+cache_pending_metric = Gauge(
+    "synapse_util_caches_cache_pending",
+    "Number of lookups currently pending for this cache",
+    ["name"],
+)
+
 _CacheSentinel = object()
 
 
@@ -82,11 +88,19 @@ class Cache(object):
         self.name = name
         self.keylen = keylen
         self.thread = None
-        self.metrics = register_cache("cache", name, self.cache)
+        self.metrics = register_cache(
+            "cache",
+            name,
+            self.cache,
+            collect_callback=self._metrics_collection_callback,
+        )
 
     def _on_evicted(self, evicted_count):
         self.metrics.inc_evictions(evicted_count)
 
+    def _metrics_collection_callback(self):
+        cache_pending_metric.labels(self.name).set(len(self._pending_deferred_cache))
+
     def check_thread(self):
         expected_thread = self.thread
         if expected_thread is None:
@@ -108,7 +122,7 @@ class Cache(object):
             update_metrics (bool): whether to update the cache hit rate metrics
 
         Returns:
-            Either a Deferred or the raw result
+            Either an ObservableDeferred or the raw result
         """
         callbacks = [callback] if callback else []
         val = self._pending_deferred_cache.get(key, _CacheSentinel)
@@ -132,9 +146,14 @@ class Cache(object):
             return default
 
     def set(self, key, value, callback=None):
+        if not isinstance(value, defer.Deferred):
+            raise TypeError("not a Deferred")
+
         callbacks = [callback] if callback else []
         self.check_thread()
-        entry = CacheEntry(deferred=value, callbacks=callbacks)
+        observable = ObservableDeferred(value, consumeErrors=True)
+        observer = defer.maybeDeferred(observable.observe)
+        entry = CacheEntry(deferred=observable, callbacks=callbacks)
 
         existing_entry = self._pending_deferred_cache.pop(key, None)
         if existing_entry:
@@ -142,20 +161,31 @@ class Cache(object):
 
         self._pending_deferred_cache[key] = entry
 
-        def shuffle(result):
+        def compare_and_pop():
+            """Check if our entry is still the one in _pending_deferred_cache, and
+            if so, pop it.
+
+            Returns true if the entries matched.
+            """
             existing_entry = self._pending_deferred_cache.pop(key, None)
             if existing_entry is entry:
+                return True
+
+            # oops, the _pending_deferred_cache has been updated since
+            # we started our query, so we are out of date.
+            #
+            # Better put back whatever we took out. (We do it this way
+            # round, rather than peeking into the _pending_deferred_cache
+            # and then removing on a match, to make the common case faster)
+            if existing_entry is not None:
+                self._pending_deferred_cache[key] = existing_entry
+
+            return False
+
+        def cb(result):
+            if compare_and_pop():
                 self.cache.set(key, result, entry.callbacks)
             else:
-                # oops, the _pending_deferred_cache has been updated since
-                # we started our query, so we are out of date.
-                #
-                # Better put back whatever we took out. (We do it this way
-                # round, rather than peeking into the _pending_deferred_cache
-                # and then removing on a match, to make the common case faster)
-                if existing_entry is not None:
-                    self._pending_deferred_cache[key] = existing_entry
-
                 # we're not going to put this entry into the cache, so need
                 # to make sure that the invalidation callbacks are called.
                 # That was probably done when _pending_deferred_cache was
@@ -163,9 +193,16 @@ class Cache(object):
                 # `invalidate` being previously called, in which case it may
                 # not have been. Either way, let's double-check now.
                 entry.invalidate()
-            return result
 
-        entry.deferred.addCallback(shuffle)
+        def eb(_fail):
+            compare_and_pop()
+            entry.invalidate()
+
+        # once the deferred completes, we can move the entry from the
+        # _pending_deferred_cache to the real cache.
+        #
+        observer.addCallbacks(cb, eb)
+        return observable
 
     def prefill(self, key, value, callback=None):
         callbacks = [callback] if callback else []
@@ -289,7 +326,7 @@ class CacheDescriptor(_CacheDescriptorBase):
         def foo(self, key, cache_context):
             r1 = yield self.bar1(key, on_invalidate=cache_context.invalidate)
             r2 = yield self.bar2(key, on_invalidate=cache_context.invalidate)
-            defer.returnValue(r1 + r2)
+            return r1 + r2
 
     Args:
         num_args (int): number of positional arguments (excluding ``self`` and
@@ -398,20 +435,10 @@ class CacheDescriptor(_CacheDescriptorBase):
 
                 ret.addErrback(onErr)
 
-                # If our cache_key is a string on py2, try to convert to ascii
-                # to save a bit of space in large caches. Py3 does this
-                # internally automatically.
-                if six.PY2 and isinstance(cache_key, string_types):
-                    cache_key = to_ascii(cache_key)
-
-                result_d = ObservableDeferred(ret, consumeErrors=True)
-                cache.set(cache_key, result_d, callback=invalidate_callback)
+                result_d = cache.set(cache_key, ret, callback=invalidate_callback)
                 observer = result_d.observe()
 
-            if isinstance(observer, defer.Deferred):
-                return make_deferred_yieldable(observer)
-            else:
-                return observer
+            return make_deferred_yieldable(observer)
 
         if self.num_args == 1:
             wrapped.invalidate = lambda key: cache.invalidate(key[0])
@@ -527,7 +554,7 @@ class CacheListDescriptor(_CacheDescriptorBase):
                     missing.add(arg)
 
             if missing:
-                # we need an observable deferred for each entry in the list,
+                # we need a deferred for each entry in the list,
                 # which we put in the cache. Each deferred resolves with the
                 # relevant result for that key.
                 deferreds_map = {}
@@ -535,8 +562,7 @@ class CacheListDescriptor(_CacheDescriptorBase):
                     deferred = defer.Deferred()
                     deferreds_map[arg] = deferred
                     key = arg_to_cache_key(arg)
-                    observable = ObservableDeferred(deferred)
-                    cache.set(key, observable, callback=invalidate_callback)
+                    cache.set(key, deferred, callback=invalidate_callback)
 
                 def complete_all(res):
                     # the wrapped function has completed. It returns a
diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py
index d6908e169d..82d3eefe0e 100644
--- a/synapse/util/caches/response_cache.py
+++ b/synapse/util/caches/response_cache.py
@@ -121,7 +121,7 @@ class ResponseCache(object):
             @defer.inlineCallbacks
             def handle_request(request):
                 # etc
-                defer.returnValue(result)
+                return result
 
             result = yield response_cache.wrap(
                 key,
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index c30b6de19c..0910930c21 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -67,7 +67,7 @@ def measure_func(name):
         def measured_func(self, *args, **kwargs):
             with Measure(self.clock, name):
                 r = yield func(self, *args, **kwargs)
-            defer.returnValue(r)
+            return r
 
         return measured_func
 
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index d8d0ceae51..0862b5ca5a 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -95,15 +95,13 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs)
     # maximum backoff even though it might only have been down briefly
     backoff_on_failure = not ignore_backoff
 
-    defer.returnValue(
-        RetryDestinationLimiter(
-            destination,
-            clock,
-            store,
-            retry_interval,
-            backoff_on_failure=backoff_on_failure,
-            **kwargs
-        )
+    return RetryDestinationLimiter(
+        destination,
+        clock,
+        store,
+        retry_interval,
+        backoff_on_failure=backoff_on_failure,
+        **kwargs
     )
 
 
diff --git a/synapse/visibility.py b/synapse/visibility.py
index 2a11c83596..bf0f1eebd8 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -208,7 +208,7 @@ def filter_events_for_client(
     filtered_events = filter(operator.truth, filtered_events)
 
     # we turn it into a list before returning it.
-    defer.returnValue(list(filtered_events))
+    return list(filtered_events)
 
 
 @defer.inlineCallbacks
@@ -317,11 +317,11 @@ def filter_events_for_server(
                 elif redact:
                     to_return.append(prune_event(e))
 
-            defer.returnValue(to_return)
+            return to_return
 
         # If there are no erased users then we can just return the given list
         # of events without having to copy it.
-        defer.returnValue(events)
+        return events
 
     # Ok, so we're dealing with events that have non-trivial visibility
     # rules, so we need to also get the memberships of the room.
@@ -384,4 +384,4 @@ def filter_events_for_server(
         elif redact:
             to_return.append(prune_event(e))
 
-    defer.returnValue(to_return)
+    return to_return