summary refs log tree commit diff
path: root/synapse
diff options
context:
space:
mode:
Diffstat (limited to 'synapse')
-rw-r--r--synapse/__init__.py9
-rw-r--r--synapse/api/auth.py28
-rw-r--r--synapse/api/constants.py17
-rw-r--r--synapse/api/errors.py28
-rw-r--r--synapse/api/room_versions.py37
-rw-r--r--synapse/api/urls.py6
-rw-r--r--synapse/app/_base.py89
-rw-r--r--synapse/app/client_reader.py13
-rw-r--r--synapse/app/frontend_proxy.py11
-rwxr-xr-xsynapse/app/homeserver.py5
-rw-r--r--synapse/config/emailconfig.py244
-rw-r--r--synapse/config/homeserver.py44
-rw-r--r--synapse/config/key.py247
-rw-r--r--synapse/config/ratelimiting.py115
-rw-r--r--synapse/config/registration.py65
-rw-r--r--synapse/config/repository.py31
-rw-r--r--synapse/config/saml2_config.py2
-rw-r--r--synapse/config/server.py155
-rw-r--r--synapse/config/stats.py60
-rw-r--r--synapse/config/third_party_event_rules.py42
-rw-r--r--synapse/config/tls.py95
-rw-r--r--synapse/config/user_directory.py6
-rw-r--r--synapse/crypto/context_factory.py165
-rw-r--r--synapse/crypto/event_signing.py6
-rw-r--r--synapse/crypto/keyring.py994
-rw-r--r--synapse/events/__init__.py31
-rw-r--r--synapse/events/builder.py13
-rw-r--r--synapse/events/snapshot.py8
-rw-r--r--synapse/events/spamcheck.py2
-rw-r--r--synapse/events/third_party_rules.py114
-rw-r--r--synapse/events/utils.py95
-rw-r--r--synapse/events/validator.py15
-rw-r--r--synapse/federation/federation_base.py63
-rw-r--r--synapse/federation/federation_client.py50
-rw-r--r--synapse/federation/federation_server.py14
-rw-r--r--synapse/federation/sender/per_destination_queue.py151
-rw-r--r--synapse/federation/transport/server.py53
-rw-r--r--synapse/groups/attestations.py18
-rw-r--r--synapse/handlers/_base.py4
-rw-r--r--synapse/handlers/account_validity.py256
-rw-r--r--synapse/handlers/auth.py65
-rw-r--r--synapse/handlers/deactivate_account.py10
-rw-r--r--synapse/handlers/directory.py23
-rw-r--r--synapse/handlers/events.py11
-rw-r--r--synapse/handlers/federation.py422
-rw-r--r--synapse/handlers/groups_local.py4
-rw-r--r--synapse/handlers/identity.py13
-rw-r--r--synapse/handlers/initial_sync.py44
-rw-r--r--synapse/handlers/message.py70
-rw-r--r--synapse/handlers/pagination.py22
-rw-r--r--synapse/handlers/presence.py123
-rw-r--r--synapse/handlers/profile.py85
-rw-r--r--synapse/handlers/register.py13
-rw-r--r--synapse/handlers/room.py37
-rw-r--r--synapse/handlers/room_member.py32
-rw-r--r--synapse/handlers/search.py42
-rw-r--r--synapse/handlers/stats.py333
-rw-r--r--synapse/handlers/sync.py87
-rw-r--r--synapse/http/client.py64
-rw-r--r--synapse/http/federation/matrix_federation_agent.py2
-rw-r--r--synapse/http/matrixfederationclient.py123
-rw-r--r--synapse/http/servlet.py2
-rw-r--r--synapse/metrics/__init__.py111
-rw-r--r--synapse/push/baserules.py17
-rw-r--r--synapse/push/emailpusher.py19
-rw-r--r--synapse/push/mailer.py99
-rw-r--r--synapse/push/presentable_names.py11
-rw-r--r--synapse/push/pusher.py10
-rw-r--r--synapse/push/pusherpool.py30
-rw-r--r--synapse/python_dependencies.py69
-rw-r--r--synapse/replication/http/_base.py10
-rw-r--r--synapse/replication/slave/storage/events.py14
-rw-r--r--synapse/replication/slave/storage/keys.py21
-rw-r--r--synapse/replication/tcp/streams/_base.py1
-rw-r--r--synapse/replication/tcp/streams/events.py11
-rw-r--r--synapse/res/templates/mail-expiry.css4
-rw-r--r--synapse/res/templates/notice_expiry.html43
-rw-r--r--synapse/res/templates/notice_expiry.txt7
-rw-r--r--synapse/res/templates/password_reset.html9
-rw-r--r--synapse/res/templates/password_reset.txt7
-rw-r--r--synapse/res/templates/password_reset_failure.html6
-rw-r--r--synapse/res/templates/password_reset_success.html6
-rw-r--r--synapse/rest/__init__.py21
-rw-r--r--synapse/rest/admin/__init__.py (renamed from synapse/rest/client/v1/admin.py)286
-rw-r--r--synapse/rest/admin/_base.py59
-rw-r--r--synapse/rest/admin/server_notice_servlet.py100
-rw-r--r--synapse/rest/client/v1/base.py65
-rw-r--r--synapse/rest/client/v1/directory.py28
-rw-r--r--synapse/rest/client/v1/events.py22
-rw-r--r--synapse/rest/client/v1/initial_sync.py12
-rw-r--r--synapse/rest/client/v1/login.py24
-rw-r--r--synapse/rest/client/v1/logout.py44
-rw-r--r--synapse/rest/client/v1/presence.py13
-rw-r--r--synapse/rest/client/v1/profile.py69
-rw-r--r--synapse/rest/client/v1/push_rule.py33
-rw-r--r--synapse/rest/client/v1/pusher.py21
-rw-r--r--synapse/rest/client/v1/room.py174
-rw-r--r--synapse/rest/client/v1/voip.py12
-rw-r--r--synapse/rest/client/v2_alpha/_base.py15
-rw-r--r--synapse/rest/client/v2_alpha/account.py275
-rw-r--r--synapse/rest/client/v2_alpha/account_data.py6
-rw-r--r--synapse/rest/client/v2_alpha/account_validity.py91
-rw-r--r--synapse/rest/client/v2_alpha/auth.py22
-rw-r--r--synapse/rest/client/v2_alpha/capabilities.py9
-rw-r--r--synapse/rest/client/v2_alpha/devices.py8
-rw-r--r--synapse/rest/client/v2_alpha/filter.py6
-rw-r--r--synapse/rest/client/v2_alpha/groups.py50
-rw-r--r--synapse/rest/client/v2_alpha/keys.py10
-rw-r--r--synapse/rest/client/v2_alpha/notifications.py14
-rw-r--r--synapse/rest/client/v2_alpha/openid.py4
-rw-r--r--synapse/rest/client/v2_alpha/read_marker.py4
-rw-r--r--synapse/rest/client/v2_alpha/receipts.py4
-rw-r--r--synapse/rest/client/v2_alpha/register.py77
-rw-r--r--synapse/rest/client/v2_alpha/relations.py338
-rw-r--r--synapse/rest/client/v2_alpha/report_event.py4
-rw-r--r--synapse/rest/client/v2_alpha/room_keys.py8
-rw-r--r--synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py5
-rw-r--r--synapse/rest/client/v2_alpha/sendtodevice.py5
-rw-r--r--synapse/rest/client/v2_alpha/sync.py66
-rw-r--r--synapse/rest/client/v2_alpha/tags.py6
-rw-r--r--synapse/rest/client/v2_alpha/thirdparty.py10
-rw-r--r--synapse/rest/client/v2_alpha/tokenrefresh.py4
-rw-r--r--synapse/rest/client/v2_alpha/user_directory.py4
-rw-r--r--synapse/rest/client/versions.py1
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py14
-rw-r--r--synapse/rest/media/v1/_base.py6
-rw-r--r--synapse/rest/media/v1/media_repository.py15
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py10
-rw-r--r--synapse/rest/media/v1/storage_provider.py1
-rw-r--r--synapse/rest/media/v1/thumbnail_resource.py4
-rw-r--r--synapse/rest/media/v1/thumbnailer.py35
-rw-r--r--synapse/rest/well_known.py4
-rw-r--r--synapse/server.py23
-rw-r--r--synapse/server.pyi1
-rw-r--r--synapse/storage/__init__.py50
-rw-r--r--synapse/storage/_base.py94
-rw-r--r--synapse/storage/appservice.py4
-rw-r--r--synapse/storage/deviceinbox.py6
-rw-r--r--synapse/storage/devices.py152
-rw-r--r--synapse/storage/engines/postgres.py8
-rw-r--r--synapse/storage/event_federation.py6
-rw-r--r--synapse/storage/events.py319
-rw-r--r--synapse/storage/events_bg_updates.py401
-rw-r--r--synapse/storage/events_worker.py166
-rw-r--r--synapse/storage/keys.py207
-rw-r--r--synapse/storage/prepare_database.py18
-rw-r--r--synapse/storage/registration.py587
-rw-r--r--synapse/storage/relations.py476
-rw-r--r--synapse/storage/roommember.py21
-rw-r--r--synapse/storage/schema/delta/13/v13.sql20
-rw-r--r--synapse/storage/schema/delta/14/upgrade_appservice_db.py42
-rw-r--r--synapse/storage/schema/delta/16/unique_constraints.sql8
-rw-r--r--synapse/storage/schema/delta/24/stats_reporting.sql12
-rw-r--r--synapse/storage/schema/delta/30/state_stream.sql9
-rw-r--r--synapse/storage/schema/delta/32/remove_indices.sql4
-rw-r--r--synapse/storage/schema/delta/54/account_validity_with_renewal.sql30
-rw-r--r--synapse/storage/schema/delta/54/add_validity_to_server_keys.sql (renamed from synapse/storage/schema/full_schemas/11/room_aliases.sql)15
-rw-r--r--synapse/storage/schema/delta/54/delete_forward_extremities.sql23
-rw-r--r--synapse/storage/schema/delta/54/drop_legacy_tables.sql30
-rw-r--r--synapse/storage/schema/delta/54/relations.sql27
-rw-r--r--synapse/storage/schema/delta/54/stats.sql80
-rw-r--r--synapse/storage/schema/delta/54/stats2.sql28
-rw-r--r--synapse/storage/schema/delta/55/track_threepid_validations.sql31
-rw-r--r--synapse/storage/schema/delta/55/users_alter_deactivated.sql (renamed from synapse/storage/schema/full_schemas/11/profiles.sql)12
-rw-r--r--synapse/storage/schema/full_schemas/11/event_edges.sql91
-rw-r--r--synapse/storage/schema/full_schemas/11/event_signatures.sql55
-rw-r--r--synapse/storage/schema/full_schemas/11/im.sql123
-rw-r--r--synapse/storage/schema/full_schemas/11/keys.sql31
-rw-r--r--synapse/storage/schema/full_schemas/11/media_repository.sql65
-rw-r--r--synapse/storage/schema/full_schemas/11/presence.sql35
-rw-r--r--synapse/storage/schema/full_schemas/11/redactions.sql22
-rw-r--r--synapse/storage/schema/full_schemas/11/state.sql40
-rw-r--r--synapse/storage/schema/full_schemas/11/transactions.sql44
-rw-r--r--synapse/storage/schema/full_schemas/11/users.sql43
-rw-r--r--synapse/storage/schema/full_schemas/16/application_services.sql19
-rw-r--r--synapse/storage/schema/full_schemas/16/event_edges.sql30
-rw-r--r--synapse/storage/schema/full_schemas/16/event_signatures.sql23
-rw-r--r--synapse/storage/schema/full_schemas/16/im.sql21
-rw-r--r--synapse/storage/schema/full_schemas/16/keys.sql11
-rw-r--r--synapse/storage/schema/full_schemas/16/presence.sql2
-rw-r--r--synapse/storage/schema/full_schemas/54/full.sql.postgres2052
-rw-r--r--synapse/storage/schema/full_schemas/54/full.sql.sqlite260
-rw-r--r--synapse/storage/schema/full_schemas/54/stream_positions.sql7
-rw-r--r--synapse/storage/schema/full_schemas/README.txt19
-rw-r--r--synapse/storage/search.py26
-rw-r--r--synapse/storage/state_deltas.py30
-rw-r--r--synapse/storage/stats.py468
-rw-r--r--synapse/storage/stream.py224
-rw-r--r--synapse/util/async_helpers.py21
-rw-r--r--synapse/util/distributor.py1
-rw-r--r--synapse/util/logcontext.py22
-rw-r--r--synapse/util/manhole.py59
-rw-r--r--synapse/util/ratelimitutils.py47
-rw-r--r--synapse/util/retryutils.py55
-rw-r--r--synapse/util/stringutils.py9
195 files changed, 11391 insertions, 3125 deletions
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 6bb5a8b24d..0c01546789 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -17,6 +17,13 @@
 """ This is a reference implementation of a Matrix home server.
 """
 
+import sys
+
+# Check that we're not running on an unsupported Python version.
+if sys.version_info < (3, 5):
+    print("Synapse requires Python 3.5 or above.")
+    sys.exit(1)
+
 try:
     from twisted.internet import protocol
     from twisted.internet.protocol import Factory
@@ -27,4 +34,4 @@ try:
 except ImportError:
     pass
 
-__version__ = "0.99.3"
+__version__ = "1.0.0"
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index e8112d5f05..79e2808dc5 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -64,6 +64,8 @@ class Auth(object):
         self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
         register_cache("cache", "token_cache", self.token_cache)
 
+        self._account_validity = hs.config.account_validity
+
     @defer.inlineCallbacks
     def check_from_context(self, room_version, event, context, do_sig_check=True):
         prev_state_ids = yield context.get_prev_state_ids(self.store)
@@ -182,11 +184,22 @@ class Auth(object):
         return event_auth.get_public_keys(invite_event)
 
     @defer.inlineCallbacks
-    def get_user_by_req(self, request, allow_guest=False, rights="access"):
+    def get_user_by_req(
+        self,
+        request,
+        allow_guest=False,
+        rights="access",
+        allow_expired=False,
+    ):
         """ Get a registered user's ID.
 
         Args:
             request - An HTTP request with an access_token query parameter.
+            allow_expired - Whether to allow the request through even if the account is
+                expired. If true, Synapse will still require an access token to be
+                provided but won't check if the account it belongs to has expired. This
+                works thanks to /login delivering access tokens regardless of accounts'
+                expiration.
         Returns:
             defer.Deferred: resolves to a ``synapse.types.Requester`` object
         Raises:
@@ -226,6 +239,17 @@ class Auth(object):
             token_id = user_info["token_id"]
             is_guest = user_info["is_guest"]
 
+            # Deny the request if the user account has expired.
+            if self._account_validity.enabled and not allow_expired:
+                user_id = user.to_string()
+                expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
+                if expiration_ts is not None and self.clock.time_msec() >= expiration_ts:
+                    raise AuthError(
+                        403,
+                        "User account has expired",
+                        errcode=Codes.EXPIRED_ACCOUNT,
+                    )
+
             # device_id may not be present if get_user_by_access_token has been
             # stubbed out.
             device_id = user_info.get("device_id")
@@ -543,7 +567,7 @@ class Auth(object):
         """ Check if the given user is a local server admin.
 
         Args:
-            user (str): mxid of user to check
+            user (UserID): user to check
 
         Returns:
             bool: True if the user is an admin
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index dd373fa4b8..ee129c8689 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
 # Copyright 2017 Vector Creations Ltd
-# Copyright 2018 New Vector Ltd.
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,6 +20,12 @@
 # the "depth" field on events is limited to 2**63 - 1
 MAX_DEPTH = 2**63 - 1
 
+# the maximum length for a room alias is 255 characters
+MAX_ALIAS_LENGTH = 255
+
+# the maximum length for a user id is 255 characters
+MAX_USERID_LENGTH = 255
+
 
 class Membership(object):
 
@@ -73,6 +79,7 @@ class EventTypes(object):
 
     RoomHistoryVisibility = "m.room.history_visibility"
     CanonicalAlias = "m.room.canonical_alias"
+    Encryption = "m.room.encryption"
     RoomAvatar = "m.room.avatar"
     RoomEncryption = "m.room.encryption"
     GuestAccess = "m.room.guest_access"
@@ -113,3 +120,11 @@ class UserTypes(object):
     """
     SUPPORT = "support"
     ALL_USER_TYPES = (SUPPORT,)
+
+
+class RelationTypes(object):
+    """The types of relations known to this server.
+    """
+    ANNOTATION = "m.annotation"
+    REPLACE = "m.replace"
+    REFERENCE = "m.reference"
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 0b464834ce..66201d6efe 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd.
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -60,6 +60,7 @@ class Codes(object):
     UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
     INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
     WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
+    EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
 
 
 class CodeMessageException(RuntimeError):
@@ -327,9 +328,32 @@ class RoomKeysVersionError(SynapseError):
         self.current_version = current_version
 
 
+class UnsupportedRoomVersionError(SynapseError):
+    """The client's request to create a room used a room version that the server does
+    not support."""
+    def __init__(self):
+        super(UnsupportedRoomVersionError, self).__init__(
+            code=400,
+            msg="Homeserver does not support this room version",
+            errcode=Codes.UNSUPPORTED_ROOM_VERSION,
+        )
+
+
+class ThreepidValidationError(SynapseError):
+    """An error raised when there was a problem authorising an event."""
+
+    def __init__(self, *args, **kwargs):
+        if "errcode" not in kwargs:
+            kwargs["errcode"] = Codes.FORBIDDEN
+        super(ThreepidValidationError, self).__init__(*args, **kwargs)
+
+
 class IncompatibleRoomVersionError(SynapseError):
-    """A server is trying to join a room whose version it does not support."""
+    """A server is trying to join a room whose version it does not support.
 
+    Unlike UnsupportedRoomVersionError, it is specific to the case of the make_join
+    failing.
+    """
     def __init__(self, room_version):
         super(IncompatibleRoomVersionError, self).__init__(
             code=400,
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index e77abe1040..d644803d38 100644
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -19,13 +19,15 @@ class EventFormatVersions(object):
     """This is an internal enum for tracking the version of the event format,
     independently from the room version.
     """
-    V1 = 1   # $id:server format
-    V2 = 2   # MSC1659-style $hash format: introduced for room v3
+    V1 = 1   # $id:server event id format
+    V2 = 2   # MSC1659-style $hash event id format: introduced for room v3
+    V3 = 3   # MSC1884-style $hash format: introduced for room v4
 
 
 KNOWN_EVENT_FORMAT_VERSIONS = {
     EventFormatVersions.V1,
     EventFormatVersions.V2,
+    EventFormatVersions.V3,
 }
 
 
@@ -48,6 +50,7 @@ class RoomVersion(object):
     disposition = attr.ib()     # str; one of the RoomDispositions
     event_format = attr.ib()    # int; one of the EventFormatVersions
     state_res = attr.ib()       # int; one of the StateResolutionVersions
+    enforce_key_validity = attr.ib()  # bool
 
 
 class RoomVersions(object):
@@ -56,29 +59,36 @@ class RoomVersions(object):
         RoomDisposition.STABLE,
         EventFormatVersions.V1,
         StateResolutionVersions.V1,
-    )
-    STATE_V2_TEST = RoomVersion(
-        "state-v2-test",
-        RoomDisposition.UNSTABLE,
-        EventFormatVersions.V1,
-        StateResolutionVersions.V2,
+        enforce_key_validity=False,
     )
     V2 = RoomVersion(
         "2",
         RoomDisposition.STABLE,
         EventFormatVersions.V1,
         StateResolutionVersions.V2,
+        enforce_key_validity=False,
     )
     V3 = RoomVersion(
         "3",
         RoomDisposition.STABLE,
         EventFormatVersions.V2,
         StateResolutionVersions.V2,
+        enforce_key_validity=False,
+    )
+    V4 = RoomVersion(
+        "4",
+        RoomDisposition.STABLE,
+        EventFormatVersions.V3,
+        StateResolutionVersions.V2,
+        enforce_key_validity=False,
+    )
+    V5 = RoomVersion(
+        "5",
+        RoomDisposition.STABLE,
+        EventFormatVersions.V3,
+        StateResolutionVersions.V2,
+        enforce_key_validity=True,
     )
-
-
-# the version we will give rooms which are created on this server
-DEFAULT_ROOM_VERSION = RoomVersions.V1
 
 
 KNOWN_ROOM_VERSIONS = {
@@ -86,6 +96,7 @@ KNOWN_ROOM_VERSIONS = {
         RoomVersions.V1,
         RoomVersions.V2,
         RoomVersions.V3,
-        RoomVersions.STATE_V2_TEST,
+        RoomVersions.V4,
+        RoomVersions.V5,
     )
 }   # type: dict[str, RoomVersion]
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index 8102176653..e16c386a14 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd.
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,11 +22,11 @@ from six.moves.urllib.parse import urlencode
 
 from synapse.config import ConfigError
 
-CLIENT_PREFIX = "/_matrix/client/api/v1"
-CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
+CLIENT_API_PREFIX = "/_matrix/client"
 FEDERATION_PREFIX = "/_matrix/federation"
 FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
 FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
+FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
 STATIC_PREFIX = "/_matrix/static"
 WEB_CLIENT_PREFIX = "/_matrix/client"
 CONTENT_REPO_PREFIX = "/_matrix/content"
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index d4c6c4c8e2..8cc990399f 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -22,13 +22,14 @@ import traceback
 import psutil
 from daemonize import Daemonize
 
-from twisted.internet import error, reactor
+from twisted.internet import defer, error, reactor
 from twisted.protocols.tls import TLSMemoryBIOFactory
 
 import synapse
 from synapse.app import check_bind_error
 from synapse.crypto import context_factory
 from synapse.util import PreserveLoggingContext
+from synapse.util.async_helpers import Linearizer
 from synapse.util.rlimit import change_resource_limit
 from synapse.util.versionstring import get_version_string
 
@@ -99,6 +100,8 @@ def start_reactor(
         logger (logging.Logger): logger instance to pass to Daemonize
     """
 
+    install_dns_limiter(reactor)
+
     def run():
         # make sure that we run the reactor with the sentinel log context,
         # otherwise other PreserveLoggingContext instances will get confused
@@ -312,3 +315,87 @@ def setup_sentry(hs):
         name = hs.config.worker_name if hs.config.worker_name else "master"
         scope.set_tag("worker_app", app)
         scope.set_tag("worker_name", name)
+
+
+def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
+    """Replaces the resolver with one that limits the number of in flight DNS
+    requests.
+
+    This is to workaround https://twistedmatrix.com/trac/ticket/9620, where we
+    can run out of file descriptors and infinite loop if we attempt to do too
+    many DNS queries at once
+    """
+    new_resolver = _LimitedHostnameResolver(
+        reactor.nameResolver, max_dns_requests_in_flight,
+    )
+
+    reactor.installNameResolver(new_resolver)
+
+
+class _LimitedHostnameResolver(object):
+    """Wraps a IHostnameResolver, limiting the number of in-flight DNS lookups.
+    """
+
+    def __init__(self, resolver, max_dns_requests_in_flight):
+        self._resolver = resolver
+        self._limiter = Linearizer(
+            name="dns_client_limiter", max_count=max_dns_requests_in_flight,
+        )
+
+    def resolveHostName(self, resolutionReceiver, hostName, portNumber=0,
+                        addressTypes=None, transportSemantics='TCP'):
+        # We need this function to return `resolutionReceiver` so we do all the
+        # actual logic involving deferreds in a separate function.
+
+        # even though this is happening within the depths of twisted, we need to drop
+        # our logcontext before starting _resolve, otherwise: (a) _resolve will drop
+        # the logcontext if it returns an incomplete deferred; (b) _resolve will
+        # call the resolutionReceiver *with* a logcontext, which it won't be expecting.
+        with PreserveLoggingContext():
+            self._resolve(
+                resolutionReceiver,
+                hostName,
+                portNumber,
+                addressTypes,
+                transportSemantics,
+            )
+
+        return resolutionReceiver
+
+    @defer.inlineCallbacks
+    def _resolve(self, resolutionReceiver, hostName, portNumber=0,
+                 addressTypes=None, transportSemantics='TCP'):
+
+        with (yield self._limiter.queue(())):
+            # resolveHostName doesn't return a Deferred, so we need to hook into
+            # the receiver interface to get told when resolution has finished.
+
+            deferred = defer.Deferred()
+            receiver = _DeferredResolutionReceiver(resolutionReceiver, deferred)
+
+            self._resolver.resolveHostName(
+                receiver, hostName, portNumber,
+                addressTypes, transportSemantics,
+            )
+
+            yield deferred
+
+
+class _DeferredResolutionReceiver(object):
+    """Wraps a IResolutionReceiver and simply resolves the given deferred when
+    resolution is complete
+    """
+
+    def __init__(self, receiver, deferred):
+        self._receiver = receiver
+        self._deferred = deferred
+
+    def resolutionBegan(self, resolutionInProgress):
+        self._receiver.resolutionBegan(resolutionInProgress)
+
+    def addressResolved(self, address):
+        self._receiver.addressResolved(address)
+
+    def resolutionComplete(self):
+        self._deferred.callback(())
+        self._receiver.resolutionComplete()
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index beaea64a61..a16e037f32 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -38,6 +38,7 @@ from synapse.replication.slave.storage.devices import SlavedDeviceStore
 from synapse.replication.slave.storage.directory import DirectoryStore
 from synapse.replication.slave.storage.events import SlavedEventStore
 from synapse.replication.slave.storage.keys import SlavedKeyStore
+from synapse.replication.slave.storage.profile import SlavedProfileStore
 from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
 from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
@@ -45,6 +46,7 @@ from synapse.replication.slave.storage.room import RoomStore
 from synapse.replication.slave.storage.transactions import SlavedTransactionStore
 from synapse.replication.tcp.client import ReplicationClientHandler
 from synapse.rest.client.v1.login import LoginRestServlet
+from synapse.rest.client.v1.push_rule import PushRuleRestServlet
 from synapse.rest.client.v1.room import (
     JoinedRoomMemberListRestServlet,
     PublicRoomListRestServlet,
@@ -52,9 +54,11 @@ from synapse.rest.client.v1.room import (
     RoomMemberListRestServlet,
     RoomStateRestServlet,
 )
+from synapse.rest.client.v1.voip import VoipRestServlet
 from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
 from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
 from synapse.rest.client.v2_alpha.register import RegisterRestServlet
+from synapse.rest.client.versions import VersionsRestServlet
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
 from synapse.util.httpresourcetree import create_resource_tree
@@ -78,6 +82,7 @@ class ClientReaderSlavedStore(
     SlavedApplicationServiceStore,
     SlavedRegistrationStore,
     SlavedTransactionStore,
+    SlavedProfileStore,
     SlavedClientIpStore,
     BaseSlavedStore,
 ):
@@ -109,12 +114,12 @@ class ClientReaderServer(HomeServer):
                     ThreepidRestServlet(self).register(resource)
                     KeyQueryServlet(self).register(resource)
                     KeyChangesServlet(self).register(resource)
+                    VoipRestServlet(self).register(resource)
+                    PushRuleRestServlet(self).register(resource)
+                    VersionsRestServlet().register(resource)
 
                     resources.update({
-                        "/_matrix/client/r0": resource,
-                        "/_matrix/client/unstable": resource,
-                        "/_matrix/client/v2_alpha": resource,
-                        "/_matrix/client/api/v1": resource,
+                        "/_matrix/client": resource,
                     })
 
         root_resource = create_resource_tree(resources, NoResource())
diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index 8479fee738..6504da5278 100644
--- a/synapse/app/frontend_proxy.py
+++ b/synapse/app/frontend_proxy.py
@@ -37,8 +37,7 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
 from synapse.replication.slave.storage.devices import SlavedDeviceStore
 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
 from synapse.replication.tcp.client import ReplicationClientHandler
-from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns
-from synapse.rest.client.v2_alpha._base import client_v2_patterns
+from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.server import HomeServer
 from synapse.storage.engines import create_engine
 from synapse.util.httpresourcetree import create_resource_tree
@@ -49,11 +48,11 @@ from synapse.util.versionstring import get_version_string
 logger = logging.getLogger("synapse.app.frontend_proxy")
 
 
-class PresenceStatusStubServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
+class PresenceStatusStubServlet(RestServlet):
+    PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status")
 
     def __init__(self, hs):
-        super(PresenceStatusStubServlet, self).__init__(hs)
+        super(PresenceStatusStubServlet, self).__init__()
         self.http_client = hs.get_simple_http_client()
         self.auth = hs.get_auth()
         self.main_uri = hs.config.worker_main_http_uri
@@ -84,7 +83,7 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
 
 
 class KeyUploadServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
+    PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
 
     def __init__(self, hs):
         """
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 869c028d1f..b27b12e73d 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -62,6 +62,7 @@ from synapse.python_dependencies import check_requirements
 from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
 from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
 from synapse.rest import ClientRestResource
+from synapse.rest.admin import AdminRestResource
 from synapse.rest.key.v2 import KeyApiV2Resource
 from synapse.rest.media.v0.content_repository import ContentRepoResource
 from synapse.rest.well_known import WellKnownResource
@@ -180,6 +181,7 @@ class SynapseHomeServer(HomeServer):
                 "/_matrix/client/v2_alpha": client_resource,
                 "/_matrix/client/versions": client_resource,
                 "/.well-known/matrix/client": WellKnownResource(self),
+                "/_synapse/admin": AdminRestResource(self),
             })
 
             if self.get_config().saml2_enabled:
@@ -518,6 +520,7 @@ def run(hs):
             uptime = 0
 
         stats["homeserver"] = hs.config.server_name
+        stats["server_context"] = hs.config.server_context
         stats["timestamp"] = now
         stats["uptime_seconds"] = uptime
         version = sys.version_info
@@ -537,6 +540,7 @@ def run(hs):
         stats["total_room_count"] = room_count
 
         stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
+        stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
         stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
         stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
 
@@ -558,7 +562,6 @@ def run(hs):
 
         stats["database_engine"] = hs.get_datastore().database_engine_name
         stats["database_server_version"] = hs.get_datastore().get_server_version()
-
         logger.info("Reporting stats to matrix.org: %s" % (stats,))
         try:
             yield hs.get_simple_http_client().put_json(
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 93d70cff14..86018dfcce 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -1,5 +1,7 @@
 # -*- coding: utf-8 -*-
-# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2015-2016 OpenMarket Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,24 +19,87 @@ from __future__ import print_function
 
 # This file can't be called email.py because if it is, we cannot:
 import email.utils
-import logging
 import os
 
 import pkg_resources
 
 from ._base import Config, ConfigError
 
-logger = logging.getLogger(__name__)
-
 
 class EmailConfig(Config):
     def read_config(self, config):
+        # TODO: We should separate better the email configuration from the notification
+        # and account validity config.
+
         self.email_enable_notifs = False
 
         email_config = config.get("email", {})
+
+        self.email_smtp_host = email_config.get("smtp_host", None)
+        self.email_smtp_port = email_config.get("smtp_port", None)
+        self.email_smtp_user = email_config.get("smtp_user", None)
+        self.email_smtp_pass = email_config.get("smtp_pass", None)
+        self.require_transport_security = email_config.get(
+            "require_transport_security", False
+        )
+        if "app_name" in email_config:
+            self.email_app_name = email_config["app_name"]
+        else:
+            self.email_app_name = "Matrix"
+
+        # TODO: Rename notif_from to something more generic, or have a separate
+        # from for password resets, message notifications, etc?
+        # Currently the email section is a bit bogged down with settings for
+        # multiple functions. Would be good to split it out into separate
+        # sections and only put the common ones under email:
+        self.email_notif_from = email_config.get("notif_from", None)
+        if self.email_notif_from is not None:
+            # make sure it's valid
+            parsed = email.utils.parseaddr(self.email_notif_from)
+            if parsed[1] == '':
+                raise RuntimeError("Invalid notif_from address")
+
+        template_dir = email_config.get("template_dir")
+        # we need an absolute path, because we change directory after starting (and
+        # we don't yet know what auxilliary templates like mail.css we will need).
+        # (Note that loading as package_resources with jinja.PackageLoader doesn't
+        # work for the same reason.)
+        if not template_dir:
+            template_dir = pkg_resources.resource_filename(
+                'synapse', 'res/templates'
+            )
+
+        self.email_template_dir = os.path.abspath(template_dir)
+
         self.email_enable_notifs = email_config.get("enable_notifs", False)
+        account_validity_renewal_enabled = config.get(
+            "account_validity", {},
+        ).get("renew_at")
 
-        if self.email_enable_notifs:
+        email_trust_identity_server_for_password_resets = email_config.get(
+            "trust_identity_server_for_password_resets", False,
+        )
+        self.email_password_reset_behaviour = (
+            "remote" if email_trust_identity_server_for_password_resets else "local"
+        )
+        self.password_resets_were_disabled_due_to_email_config = False
+        if self.email_password_reset_behaviour == "local" and email_config == {}:
+            # We cannot warn the user this has happened here
+            # Instead do so when a user attempts to reset their password
+            self.password_resets_were_disabled_due_to_email_config = True
+
+            self.email_password_reset_behaviour = "off"
+
+        # Get lifetime of a validation token in milliseconds
+        self.email_validation_token_lifetime = self.parse_duration(
+            email_config.get("validation_token_lifetime", "1h")
+        )
+
+        if (
+            self.email_enable_notifs
+            or account_validity_renewal_enabled
+            or self.email_password_reset_behaviour == "local"
+        ):
             # make sure we can import the required deps
             import jinja2
             import bleach
@@ -42,6 +107,68 @@ class EmailConfig(Config):
             jinja2
             bleach
 
+        if self.email_password_reset_behaviour == "local":
+            required = [
+                "smtp_host",
+                "smtp_port",
+                "notif_from",
+            ]
+
+            missing = []
+            for k in required:
+                if k not in email_config:
+                    missing.append(k)
+
+            if (len(missing) > 0):
+                raise RuntimeError(
+                    "email.password_reset_behaviour is set to 'local' "
+                    "but required keys are missing: %s" %
+                    (", ".join(["email." + k for k in missing]),)
+                )
+
+            # Templates for password reset emails
+            self.email_password_reset_template_html = email_config.get(
+                "password_reset_template_html", "password_reset.html",
+            )
+            self.email_password_reset_template_text = email_config.get(
+                "password_reset_template_text", "password_reset.txt",
+            )
+            self.email_password_reset_failure_template = email_config.get(
+                "password_reset_failure_template", "password_reset_failure.html",
+            )
+            # This template does not support any replaceable variables, so we will
+            # read it from the disk once during setup
+            email_password_reset_success_template = email_config.get(
+                "password_reset_success_template", "password_reset_success.html",
+            )
+
+            # Check templates exist
+            for f in [self.email_password_reset_template_html,
+                      self.email_password_reset_template_text,
+                      self.email_password_reset_failure_template,
+                      email_password_reset_success_template]:
+                p = os.path.join(self.email_template_dir, f)
+                if not os.path.isfile(p):
+                    raise ConfigError("Unable to find template file %s" % (p, ))
+
+            # Retrieve content of web templates
+            filepath = os.path.join(
+                self.email_template_dir,
+                email_password_reset_success_template,
+            )
+            self.email_password_reset_success_html_content = self.read_file(
+                filepath,
+                "email.password_reset_template_success_html",
+            )
+
+            if config.get("public_baseurl") is None:
+                raise RuntimeError(
+                    "email.password_reset_behaviour is set to 'local' but no "
+                    "public_baseurl is set. This is necessary to generate password "
+                    "reset links"
+                )
+
+        if self.email_enable_notifs:
             required = [
                 "smtp_host",
                 "smtp_port",
@@ -66,28 +193,13 @@ class EmailConfig(Config):
                     "email.enable_notifs is True but no public_baseurl is set"
                 )
 
-            self.email_smtp_host = email_config["smtp_host"]
-            self.email_smtp_port = email_config["smtp_port"]
-            self.email_notif_from = email_config["notif_from"]
             self.email_notif_template_html = email_config["notif_template_html"]
             self.email_notif_template_text = email_config["notif_template_text"]
 
-            template_dir = email_config.get("template_dir")
-            # we need an absolute path, because we change directory after starting (and
-            # we don't yet know what auxilliary templates like mail.css we will need).
-            # (Note that loading as package_resources with jinja.PackageLoader doesn't
-            # work for the same reason.)
-            if not template_dir:
-                template_dir = pkg_resources.resource_filename(
-                    'synapse', 'res/templates'
-                )
-            template_dir = os.path.abspath(template_dir)
-
             for f in self.email_notif_template_text, self.email_notif_template_html:
-                p = os.path.join(template_dir, f)
+                p = os.path.join(self.email_template_dir, f)
                 if not os.path.isfile(p):
                     raise ConfigError("Unable to find email template file %s" % (p, ))
-            self.email_template_dir = template_dir
 
             self.email_notif_for_new_users = email_config.get(
                 "notif_for_new_users", True
@@ -95,35 +207,24 @@ class EmailConfig(Config):
             self.email_riot_base_url = email_config.get(
                 "riot_base_url", None
             )
-            self.email_smtp_user = email_config.get(
-                "smtp_user", None
-            )
-            self.email_smtp_pass = email_config.get(
-                "smtp_pass", None
+
+        if account_validity_renewal_enabled:
+            self.email_expiry_template_html = email_config.get(
+                "expiry_template_html", "notice_expiry.html",
             )
-            self.require_transport_security = email_config.get(
-                "require_transport_security", False
+            self.email_expiry_template_text = email_config.get(
+                "expiry_template_text", "notice_expiry.txt",
             )
-            if "app_name" in email_config:
-                self.email_app_name = email_config["app_name"]
-            else:
-                self.email_app_name = "Matrix"
 
-            # make sure it's valid
-            parsed = email.utils.parseaddr(self.email_notif_from)
-            if parsed[1] == '':
-                raise RuntimeError("Invalid notif_from address")
-        else:
-            self.email_enable_notifs = False
-            # Not much point setting defaults for the rest: it would be an
-            # error for them to be used.
+            for f in self.email_expiry_template_text, self.email_expiry_template_html:
+                p = os.path.join(self.email_template_dir, f)
+                if not os.path.isfile(p):
+                    raise ConfigError("Unable to find email template file %s" % (p, ))
 
     def default_config(self, config_dir_path, server_name, **kwargs):
         return """
-        # Enable sending emails for notification events
-        # Defining a custom URL for Riot is only needed if email notifications
-        # should contain links to a self-hosted installation of Riot; when set
-        # the "app_name" setting is ignored.
+        # Enable sending emails for password resets, notification events or
+        # account expiry notices
         #
         # If your SMTP server requires authentication, the optional smtp_user &
         # smtp_pass variables should be used
@@ -131,17 +232,62 @@ class EmailConfig(Config):
         #email:
         #   enable_notifs: false
         #   smtp_host: "localhost"
-        #   smtp_port: 25
+        #   smtp_port: 25 # SSL: 465, STARTTLS: 587
         #   smtp_user: "exampleusername"
         #   smtp_pass: "examplepassword"
         #   require_transport_security: False
         #   notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
         #   app_name: Matrix
-        #   # if template_dir is unset, uses the example templates that are part of
-        #   # the Synapse distribution.
+        #
+        #   # Enable email notifications by default
+        #   notif_for_new_users: True
+        #
+        #   # Defining a custom URL for Riot is only needed if email notifications
+        #   # should contain links to a self-hosted installation of Riot; when set
+        #   # the "app_name" setting is ignored
+        #   riot_base_url: "http://localhost/riot"
+        #
+        #   # Enable sending password reset emails via the configured, trusted
+        #   # identity servers
+        #   #
+        #   # IMPORTANT! This will give a malicious or overtaken identity server
+        #   # the ability to reset passwords for your users! Make absolutely sure
+        #   # that you want to do this! It is strongly recommended that password
+        #   # reset emails be sent by the homeserver instead
+        #   #
+        #   # If this option is set to false and SMTP options have not been
+        #   # configured, resetting user passwords via email will be disabled
+        #   #trust_identity_server_for_password_resets: false
+        #
+        #   # Configure the time that a validation email or text message code
+        #   # will expire after sending
+        #   #
+        #   # This is currently used for password resets
+        #   #validation_token_lifetime: 1h
+        #
+        #   # Template directory. All template files should be stored within this
+        #   # directory
+        #   #
         #   #template_dir: res/templates
+        #
+        #   # Templates for email notifications
+        #   #
         #   notif_template_html: notif_mail.html
         #   notif_template_text: notif_mail.txt
-        #   notif_for_new_users: True
-        #   riot_base_url: "http://localhost/riot"
+        #
+        #   # Templates for account expiry notices
+        #   #
+        #   expiry_template_html: notice_expiry.html
+        #   expiry_template_text: notice_expiry.txt
+        #
+        #   # Templates for password reset emails sent by the homeserver
+        #   #
+        #   #password_reset_template_html: password_reset.html
+        #   #password_reset_template_text: password_reset.txt
+        #
+        #   # Templates for password reset success and failure pages that a user
+        #   # will see after attempting to reset their password
+        #   #
+        #   #password_reset_template_success_html: password_reset_success.html
+        #   #password_reset_template_failure_html: password_reset_failure.html
         """
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 727fdc54d8..acadef4fd3 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -13,6 +13,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
 from .api import ApiConfig
 from .appservice import AppServiceConfig
 from .captcha import CaptchaConfig
@@ -36,20 +37,43 @@ from .saml2_config import SAML2Config
 from .server import ServerConfig
 from .server_notices_config import ServerNoticesConfig
 from .spam_checker import SpamCheckerConfig
+from .stats import StatsConfig
+from .third_party_event_rules import ThirdPartyRulesConfig
 from .tls import TlsConfig
 from .user_directory import UserDirectoryConfig
 from .voip import VoipConfig
 from .workers import WorkerConfig
 
 
-class HomeServerConfig(ServerConfig, TlsConfig, DatabaseConfig, LoggingConfig,
-                       RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
-                       VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
-                       AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
-                       JWTConfig, PasswordConfig, EmailConfig,
-                       WorkerConfig, PasswordAuthProviderConfig, PushConfig,
-                       SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
-                       ConsentConfig,
-                       ServerNoticesConfig, RoomDirectoryConfig,
-                       ):
+class HomeServerConfig(
+    ServerConfig,
+    TlsConfig,
+    DatabaseConfig,
+    LoggingConfig,
+    RatelimitConfig,
+    ContentRepositoryConfig,
+    CaptchaConfig,
+    VoipConfig,
+    RegistrationConfig,
+    MetricsConfig,
+    ApiConfig,
+    AppServiceConfig,
+    KeyConfig,
+    SAML2Config,
+    CasConfig,
+    JWTConfig,
+    PasswordConfig,
+    EmailConfig,
+    WorkerConfig,
+    PasswordAuthProviderConfig,
+    PushConfig,
+    SpamCheckerConfig,
+    GroupsConfig,
+    UserDirectoryConfig,
+    ConsentConfig,
+    StatsConfig,
+    ServerNoticesConfig,
+    RoomDirectoryConfig,
+    ThirdPartyRulesConfig,
+):
     pass
diff --git a/synapse/config/key.py b/synapse/config/key.py
index eb10259818..424875feae 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,6 +18,8 @@ import hashlib
 import logging
 import os
 
+import attr
+import jsonschema
 from signedjson.key import (
     NACL_ED25519,
     decode_signing_key_base64,
@@ -32,11 +35,36 @@ from synapse.util.stringutils import random_string, random_string_with_symbols
 
 from ._base import Config, ConfigError
 
+INSECURE_NOTARY_ERROR = """\
+Your server is configured to accept key server responses without signature
+validation or TLS certificate validation. This is likely to be very insecure. If
+you are *sure* you want to do this, set 'accept_keys_insecurely' on the
+keyserver configuration."""
+
+RELYING_ON_MATRIX_KEY_ERROR = """\
+Your server is configured to accept key server responses without TLS certificate
+validation, and which are only signed by the old (possibly compromised)
+matrix.org signing key 'ed25519:auto'. This likely isn't what you want to do,
+and you should enable 'federation_verify_certificates' in your configuration.
+
+If you are *sure* you want to do this, set 'accept_keys_insecurely' on the
+trusted_key_server configuration."""
+
+
 logger = logging.getLogger(__name__)
 
 
-class KeyConfig(Config):
+@attr.s
+class TrustedKeyServer(object):
+    # string: name of the server.
+    server_name = attr.ib()
+
+    # dict[str,VerifyKey]|None: map from key id to key object, or None to disable
+    # signature verification.
+    verify_keys = attr.ib(default=None)
+
 
+class KeyConfig(Config):
     def read_config(self, config):
         # the signing key can be specified inline or in a separate file
         if "signing_key" in config:
@@ -49,16 +77,27 @@ class KeyConfig(Config):
             config.get("old_signing_keys", {})
         )
         self.key_refresh_interval = self.parse_duration(
-            config.get("key_refresh_interval", "1d"),
+            config.get("key_refresh_interval", "1d")
         )
-        self.perspectives = self.read_perspectives(
-            config.get("perspectives", {}).get("servers", {
-                "matrix.org": {"verify_keys": {
-                    "ed25519:auto": {
-                        "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
-                    }
-                }}
-            })
+
+        # if neither trusted_key_servers nor perspectives are given, use the default.
+        if "perspectives" not in config and "trusted_key_servers" not in config:
+            key_servers = [{"server_name": "matrix.org"}]
+        else:
+            key_servers = config.get("trusted_key_servers", [])
+
+            if not isinstance(key_servers, list):
+                raise ConfigError(
+                    "trusted_key_servers, if given, must be a list, not a %s"
+                    % (type(key_servers).__name__,)
+                )
+
+            # merge the 'perspectives' config into the 'trusted_key_servers' config.
+            key_servers.extend(_perspectives_to_key_servers(config))
+
+        # list of TrustedKeyServer objects
+        self.key_servers = list(
+            _parse_key_servers(key_servers, self.federation_verify_certificates)
         )
 
         self.macaroon_secret_key = config.get(
@@ -78,8 +117,9 @@ class KeyConfig(Config):
         # falsification of values
         self.form_secret = config.get("form_secret", None)
 
-    def default_config(self, config_dir_path, server_name, generate_secrets=False,
-                       **kwargs):
+    def default_config(
+        self, config_dir_path, server_name, generate_secrets=False, **kwargs
+    ):
         base_key_name = os.path.join(config_dir_path, server_name)
 
         if generate_secrets:
@@ -91,7 +131,8 @@ class KeyConfig(Config):
             macaroon_secret_key = "# macaroon_secret_key: <PRIVATE STRING>"
             form_secret = "# form_secret: <PRIVATE STRING>"
 
-        return """\
+        return (
+            """\
         # a secret which is used to sign access tokens. If none is specified,
         # the registration_shared_secret is used, if one is given; otherwise,
         # a secret key is derived from the signing key.
@@ -133,33 +174,53 @@ class KeyConfig(Config):
 
         # The trusted servers to download signing keys from.
         #
-        #perspectives:
-        #  servers:
-        #    "matrix.org":
-        #      verify_keys:
-        #        "ed25519:auto":
-        #          key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
-        """ % locals()
-
-    def read_perspectives(self, perspectives_servers):
-        servers = {}
-        for server_name, server_config in perspectives_servers.items():
-            for key_id, key_data in server_config["verify_keys"].items():
-                if is_signing_algorithm_supported(key_id):
-                    key_base64 = key_data["key"]
-                    key_bytes = decode_base64(key_base64)
-                    verify_key = decode_verify_key_bytes(key_id, key_bytes)
-                    servers.setdefault(server_name, {})[key_id] = verify_key
-        return servers
+        # When we need to fetch a signing key, each server is tried in parallel.
+        #
+        # Normally, the connection to the key server is validated via TLS certificates.
+        # Additional security can be provided by configuring a `verify key`, which
+        # will make synapse check that the response is signed by that key.
+        #
+        # This setting supercedes an older setting named `perspectives`. The old format
+        # is still supported for backwards-compatibility, but it is deprecated.
+        #
+        # Options for each entry in the list include:
+        #
+        #    server_name: the name of the server. required.
+        #
+        #    verify_keys: an optional map from key id to base64-encoded public key.
+        #       If specified, we will check that the response is signed by at least
+        #       one of the given keys.
+        #
+        #    accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset,
+        #       and federation_verify_certificates is not `true`, synapse will refuse
+        #       to start, because this would allow anyone who can spoof DNS responses
+        #       to masquerade as the trusted key server. If you know what you are doing
+        #       and are sure that your network environment provides a secure connection
+        #       to the key server, you can set this to `true` to override this
+        #       behaviour.
+        #
+        # An example configuration might look like:
+        #
+        #trusted_key_servers:
+        #  - server_name: "my_trusted_server.example.com"
+        #    verify_keys:
+        #      "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
+        #  - server_name: "my_other_trusted_server.example.com"
+        #
+        # The default configuration is:
+        #
+        #trusted_key_servers:
+        #  - server_name: "matrix.org"
+        """
+            % locals()
+        )
 
     def read_signing_key(self, signing_key_path):
         signing_keys = self.read_file(signing_key_path, "signing_key")
         try:
             return read_signing_keys(signing_keys.splitlines(True))
         except Exception as e:
-            raise ConfigError(
-                "Error reading signing_key: %s" % (str(e))
-            )
+            raise ConfigError("Error reading signing_key: %s" % (str(e)))
 
     def read_old_signing_keys(self, old_signing_keys):
         keys = {}
@@ -182,9 +243,7 @@ class KeyConfig(Config):
         if not self.path_exists(signing_key_path):
             with open(signing_key_path, "w") as signing_key_file:
                 key_id = "a_" + random_string(4)
-                write_signing_keys(
-                    signing_key_file, (generate_signing_key(key_id),),
-                )
+                write_signing_keys(signing_key_file, (generate_signing_key(key_id),))
         else:
             signing_keys = self.read_file(signing_key_path, "signing_key")
             if len(signing_keys.split("\n")[0].split()) == 1:
@@ -194,6 +253,116 @@ class KeyConfig(Config):
                     NACL_ED25519, key_id, signing_keys.split("\n")[0]
                 )
                 with open(signing_key_path, "w") as signing_key_file:
-                    write_signing_keys(
-                        signing_key_file, (key,),
+                    write_signing_keys(signing_key_file, (key,))
+
+
+def _perspectives_to_key_servers(config):
+    """Convert old-style 'perspectives' configs into new-style 'trusted_key_servers'
+
+    Returns an iterable of entries to add to trusted_key_servers.
+    """
+
+    # 'perspectives' looks like:
+    #
+    # {
+    #     "servers": {
+    #         "matrix.org": {
+    #             "verify_keys": {
+    #                 "ed25519:auto": {
+    #                     "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
+    #                 }
+    #             }
+    #         }
+    #     }
+    # }
+    #
+    # 'trusted_keys' looks like:
+    #
+    # [
+    #     {
+    #         "server_name": "matrix.org",
+    #         "verify_keys": {
+    #             "ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
+    #         }
+    #     }
+    # ]
+
+    perspectives_servers = config.get("perspectives", {}).get("servers", {})
+
+    for server_name, server_opts in perspectives_servers.items():
+        trusted_key_server_entry = {"server_name": server_name}
+        verify_keys = server_opts.get("verify_keys")
+        if verify_keys is not None:
+            trusted_key_server_entry["verify_keys"] = {
+                key_id: key_data["key"] for key_id, key_data in verify_keys.items()
+            }
+        yield trusted_key_server_entry
+
+
+TRUSTED_KEY_SERVERS_SCHEMA = {
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "description": "schema for the trusted_key_servers setting",
+    "type": "array",
+    "items": {
+        "type": "object",
+        "properties": {
+            "server_name": {"type": "string"},
+            "verify_keys": {
+                "type": "object",
+                # each key must be a base64 string
+                "additionalProperties": {"type": "string"},
+            },
+        },
+        "required": ["server_name"],
+    },
+}
+
+
+def _parse_key_servers(key_servers, federation_verify_certificates):
+    try:
+        jsonschema.validate(key_servers, TRUSTED_KEY_SERVERS_SCHEMA)
+    except jsonschema.ValidationError as e:
+        raise ConfigError("Unable to parse 'trusted_key_servers': " + e.message)
+
+    for server in key_servers:
+        server_name = server["server_name"]
+        result = TrustedKeyServer(server_name=server_name)
+
+        verify_keys = server.get("verify_keys")
+        if verify_keys is not None:
+            result.verify_keys = {}
+            for key_id, key_base64 in verify_keys.items():
+                if not is_signing_algorithm_supported(key_id):
+                    raise ConfigError(
+                        "Unsupported signing algorithm on key %s for server %s in "
+                        "trusted_key_servers" % (key_id, server_name)
+                    )
+                try:
+                    key_bytes = decode_base64(key_base64)
+                    verify_key = decode_verify_key_bytes(key_id, key_bytes)
+                except Exception as e:
+                    raise ConfigError(
+                        "Unable to parse key %s for server %s in "
+                        "trusted_key_servers: %s" % (key_id, server_name, e)
                     )
+
+                result.verify_keys[key_id] = verify_key
+
+        if (
+            not federation_verify_certificates and
+            not server.get("accept_keys_insecurely")
+        ):
+            _assert_keyserver_has_verify_keys(result)
+
+        yield result
+
+
+def _assert_keyserver_has_verify_keys(trusted_key_server):
+    if not trusted_key_server.verify_keys:
+        raise ConfigError(INSECURE_NOTARY_ERROR)
+
+    # also check that they are not blindly checking the old matrix.org key
+    if trusted_key_server.server_name == "matrix.org" and any(
+        key_id == "ed25519:auto" for key_id in trusted_key_server.verify_keys
+    ):
+        raise ConfigError(RELYING_ON_MATRIX_KEY_ERROR)
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 5a68399e63..5a9adac480 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -16,16 +16,56 @@ from ._base import Config
 
 
 class RateLimitConfig(object):
-    def __init__(self, config):
-        self.per_second = config.get("per_second", 0.17)
-        self.burst_count = config.get("burst_count", 3.0)
+    def __init__(self, config, defaults={"per_second": 0.17, "burst_count": 3.0}):
+        self.per_second = config.get("per_second", defaults["per_second"])
+        self.burst_count = config.get("burst_count", defaults["burst_count"])
 
 
-class RatelimitConfig(Config):
+class FederationRateLimitConfig(object):
+    _items_and_default = {
+        "window_size": 10000,
+        "sleep_limit": 10,
+        "sleep_delay": 500,
+        "reject_limit": 50,
+        "concurrent": 3,
+    }
+
+    def __init__(self, **kwargs):
+        for i in self._items_and_default.keys():
+            setattr(self, i, kwargs.get(i) or self._items_and_default[i])
+
 
+class RatelimitConfig(Config):
     def read_config(self, config):
-        self.rc_messages_per_second = config.get("rc_messages_per_second", 0.2)
-        self.rc_message_burst_count = config.get("rc_message_burst_count", 10.0)
+
+        # Load the new-style messages config if it exists. Otherwise fall back
+        # to the old method.
+        if "rc_message" in config:
+            self.rc_message = RateLimitConfig(
+                config["rc_message"], defaults={"per_second": 0.2, "burst_count": 10.0}
+            )
+        else:
+            self.rc_message = RateLimitConfig(
+                {
+                    "per_second": config.get("rc_messages_per_second", 0.2),
+                    "burst_count": config.get("rc_message_burst_count", 10.0),
+                }
+            )
+
+        # Load the new-style federation config, if it exists. Otherwise, fall
+        # back to the old method.
+        if "federation_rc" in config:
+            self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
+        else:
+            self.rc_federation = FederationRateLimitConfig(
+                **{
+                    "window_size": config.get("federation_rc_window_size"),
+                    "sleep_limit": config.get("federation_rc_sleep_limit"),
+                    "sleep_delay": config.get("federation_rc_sleep_delay"),
+                    "reject_limit": config.get("federation_rc_reject_limit"),
+                    "concurrent": config.get("federation_rc_concurrent"),
+                }
+            )
 
         self.rc_registration = RateLimitConfig(config.get("rc_registration", {}))
 
@@ -33,38 +73,26 @@ class RatelimitConfig(Config):
         self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {}))
         self.rc_login_account = RateLimitConfig(rc_login_config.get("account", {}))
         self.rc_login_failed_attempts = RateLimitConfig(
-            rc_login_config.get("failed_attempts", {}),
+            rc_login_config.get("failed_attempts", {})
         )
 
-        self.federation_rc_window_size = config.get("federation_rc_window_size", 1000)
-        self.federation_rc_sleep_limit = config.get("federation_rc_sleep_limit", 10)
-        self.federation_rc_sleep_delay = config.get("federation_rc_sleep_delay", 500)
-        self.federation_rc_reject_limit = config.get("federation_rc_reject_limit", 50)
-        self.federation_rc_concurrent = config.get("federation_rc_concurrent", 3)
-
         self.federation_rr_transactions_per_room_per_second = config.get(
-            "federation_rr_transactions_per_room_per_second", 50,
+            "federation_rr_transactions_per_room_per_second", 50
         )
 
     def default_config(self, **kwargs):
         return """\
         ## Ratelimiting ##
 
-        # Number of messages a client can send per second
-        #
-        #rc_messages_per_second: 0.2
-
-        # Number of message a client can send before being throttled
-        #
-        #rc_message_burst_count: 10.0
-
-        # Ratelimiting settings for registration and login.
+        # Ratelimiting settings for client actions (registration, login, messaging).
         #
         # Each ratelimiting configuration is made of two parameters:
         #   - per_second: number of requests a client can send per second.
         #   - burst_count: number of requests a client can send before being throttled.
         #
         # Synapse currently uses the following configurations:
+        #   - one for messages that ratelimits sending based on the account the client
+        #     is using
         #   - one for registration that ratelimits registration requests based on the
         #     client's IP address.
         #   - one for login that ratelimits login requests based on the client's IP
@@ -77,6 +105,10 @@ class RatelimitConfig(Config):
         #
         # The defaults are as shown below.
         #
+        #rc_message:
+        #  per_second: 0.2
+        #  burst_count: 10
+        #
         #rc_registration:
         #  per_second: 0.17
         #  burst_count: 3
@@ -92,29 +124,28 @@ class RatelimitConfig(Config):
         #    per_second: 0.17
         #    burst_count: 3
 
-        # The federation window size in milliseconds
-        #
-        #federation_rc_window_size: 1000
-
-        # The number of federation requests from a single server in a window
-        # before the server will delay processing the request.
-        #
-        #federation_rc_sleep_limit: 10
 
-        # The duration in milliseconds to delay processing events from
-        # remote servers by if they go over the sleep limit.
+        # Ratelimiting settings for incoming federation
         #
-        #federation_rc_sleep_delay: 500
-
-        # The maximum number of concurrent federation requests allowed
-        # from a single server
+        # The rc_federation configuration is made up of the following settings:
+        #   - window_size: window size in milliseconds
+        #   - sleep_limit: number of federation requests from a single server in
+        #     a window before the server will delay processing the request.
+        #   - sleep_delay: duration in milliseconds to delay processing events
+        #     from remote servers by if they go over the sleep limit.
+        #   - reject_limit: maximum number of concurrent federation requests
+        #     allowed from a single server
+        #   - concurrent: number of federation requests to concurrently process
+        #     from a single server
         #
-        #federation_rc_reject_limit: 50
-
-        # The number of federation requests to concurrently process from a
-        # single server
+        # The defaults are as shown below.
         #
-        #federation_rc_concurrent: 3
+        #rc_federation:
+        #  window_size: 1000
+        #  sleep_limit: 10
+        #  sleep_delay: 500
+        #  reject_limit: 50
+        #  concurrent: 3
 
         # Target outgoing federation transaction frequency for sending read-receipts,
         # per-room.
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index fcfda341e9..aad3400819 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -20,6 +20,31 @@ from synapse.types import RoomAlias
 from synapse.util.stringutils import random_string_with_symbols
 
 
+class AccountValidityConfig(Config):
+    def __init__(self, config, synapse_config):
+        self.enabled = config.get("enabled", False)
+        self.renew_by_email_enabled = ("renew_at" in config)
+
+        if self.enabled:
+            if "period" in config:
+                self.period = self.parse_duration(config["period"])
+            else:
+                raise ConfigError("'period' is required when using account validity")
+
+            if "renew_at" in config:
+                self.renew_at = self.parse_duration(config["renew_at"])
+
+            if "renew_email_subject" in config:
+                self.renew_email_subject = config["renew_email_subject"]
+            else:
+                self.renew_email_subject = "Renew your %(app)s account"
+
+            self.startup_job_max_delta = self.period * 10. / 100.
+
+        if self.renew_by_email_enabled and "public_baseurl" not in synapse_config:
+            raise ConfigError("Can't send renewal emails without 'public_baseurl'")
+
+
 class RegistrationConfig(Config):
 
     def read_config(self, config):
@@ -31,6 +56,10 @@ class RegistrationConfig(Config):
                 strtobool(str(config["disable_registration"]))
             )
 
+        self.account_validity = AccountValidityConfig(
+            config.get("account_validity", {}), config,
+        )
+
         self.registrations_require_3pid = config.get("registrations_require_3pid", [])
         self.allowed_local_3pids = config.get("allowed_local_3pids", [])
         self.enable_3pid_lookup = config.get("enable_3pid_lookup", True)
@@ -76,6 +105,42 @@ class RegistrationConfig(Config):
         #
         #enable_registration: false
 
+        # Optional account validity configuration. This allows for accounts to be denied
+        # any request after a given period.
+        #
+        # ``enabled`` defines whether the account validity feature is enabled. Defaults
+        # to False.
+        #
+        # ``period`` allows setting the period after which an account is valid
+        # after its registration. When renewing the account, its validity period
+        # will be extended by this amount of time. This parameter is required when using
+        # the account validity feature.
+        #
+        # ``renew_at`` is the amount of time before an account's expiry date at which
+        # Synapse will send an email to the account's email address with a renewal link.
+        # This needs the ``email`` and ``public_baseurl`` configuration sections to be
+        # filled.
+        #
+        # ``renew_email_subject`` is the subject of the email sent out with the renewal
+        # link. ``%%(app)s`` can be used as a placeholder for the ``app_name`` parameter
+        # from the ``email`` section.
+        #
+        # Once this feature is enabled, Synapse will look for registered users without an
+        # expiration date at startup and will add one to every account it found using the
+        # current settings at that time.
+        # This means that, if a validity period is set, and Synapse is restarted (it will
+        # then derive an expiration date from the current validity period), and some time
+        # after that the validity period changes and Synapse is restarted, the users'
+        # expiration dates won't be updated unless their account is manually renewed. This
+        # date will be randomly selected within a range [now + period - d ; now + period],
+        # where d is equal to 10%% of the validity period.
+        #
+        #account_validity:
+        #  enabled: True
+        #  period: 6w
+        #  renew_at: 1w
+        #  renew_email_subject: "Renew your %%(app)s account"
+
         # The user must provide all of the below types of 3PID when registering.
         #
         #registrations_require_3pid:
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 3f34ad9b2a..fbfcecc240 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -186,17 +186,21 @@ class ContentRepositoryConfig(Config):
             except ImportError:
                 raise ConfigError(MISSING_NETADDR)
 
-            if "url_preview_ip_range_blacklist" in config:
-                self.url_preview_ip_range_blacklist = IPSet(
-                    config["url_preview_ip_range_blacklist"]
-                )
-            else:
+            if "url_preview_ip_range_blacklist" not in config:
                 raise ConfigError(
                     "For security, you must specify an explicit target IP address "
                     "blacklist in url_preview_ip_range_blacklist for url previewing "
                     "to work"
                 )
 
+            self.url_preview_ip_range_blacklist = IPSet(
+                config["url_preview_ip_range_blacklist"]
+            )
+
+            # we always blacklist '0.0.0.0' and '::', which are supposed to be
+            # unroutable addresses.
+            self.url_preview_ip_range_blacklist.update(['0.0.0.0', '::'])
+
             self.url_preview_ip_range_whitelist = IPSet(
                 config.get("url_preview_ip_range_whitelist", ())
             )
@@ -260,11 +264,12 @@ class ContentRepositoryConfig(Config):
         #thumbnail_sizes:
 %(formatted_thumbnail_sizes)s
 
-        # Is the preview URL API enabled?  If enabled, you *must* specify
-        # an explicit url_preview_ip_range_blacklist of IPs that the spider is
-        # denied from accessing.
+        # Is the preview URL API enabled?
+        #
+        # 'false' by default: uncomment the following to enable it (and specify a
+        # url_preview_ip_range_blacklist blacklist).
         #
-        #url_preview_enabled: false
+        #url_preview_enabled: true
 
         # List of IP address CIDR ranges that the URL preview spider is denied
         # from accessing.  There are no defaults: you must explicitly
@@ -274,6 +279,12 @@ class ContentRepositoryConfig(Config):
         # synapse to issue arbitrary GET requests to your internal services,
         # causing serious security issues.
         #
+        # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
+        # listed here, since they correspond to unroutable addresses.)
+        #
+        # This must be specified if url_preview_enabled is set. It is recommended that
+        # you uncomment the following list as a starting point.
+        #
         #url_preview_ip_range_blacklist:
         #  - '127.0.0.0/8'
         #  - '10.0.0.0/8'
@@ -284,7 +295,7 @@ class ContentRepositoryConfig(Config):
         #  - '::1/128'
         #  - 'fe80::/64'
         #  - 'fc00::/7'
-        #
+
         # List of IP address CIDR ranges that the URL preview spider is allowed
         # to access even if they are specified in url_preview_ip_range_blacklist.
         # This is useful for specifying exceptions to wide-ranging blacklisted
diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py
index 39b9eb29c2..aa6eac271f 100644
--- a/synapse/config/saml2_config.py
+++ b/synapse/config/saml2_config.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright 2018 New Vector Ltd.
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 08e4e45482..7d56e2d141 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
 # Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,6 +18,9 @@
 import logging
 import os.path
 
+from netaddr import IPSet
+
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.http.endpoint import parse_and_validate_server_name
 from synapse.python_dependencies import DependencyException, check_requirements
 
@@ -32,11 +36,14 @@ logger = logging.Logger(__name__)
 # in the list.
 DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0']
 
+DEFAULT_ROOM_VERSION = "4"
+
 
 class ServerConfig(Config):
 
     def read_config(self, config):
         self.server_name = config["server_name"]
+        self.server_context = config.get("server_context", None)
 
         try:
             parse_and_validate_server_name(self.server_name)
@@ -71,6 +78,35 @@ class ServerConfig(Config):
         # master, potentially causing inconsistency.
         self.enable_media_repo = config.get("enable_media_repo", True)
 
+        # Whether to require authentication to retrieve profile data (avatars,
+        # display names) of other users through the client API.
+        self.require_auth_for_profile_requests = config.get(
+            "require_auth_for_profile_requests", False,
+        )
+
+        # If set to 'True', requires authentication to access the server's
+        # public rooms directory through the client API, and forbids any other
+        # homeserver to fetch it via federation.
+        self.restrict_public_rooms_to_local_users = config.get(
+            "restrict_public_rooms_to_local_users", False,
+        )
+
+        default_room_version = config.get(
+            "default_room_version", DEFAULT_ROOM_VERSION,
+        )
+
+        # Ensure room version is a str
+        default_room_version = str(default_room_version)
+
+        if default_room_version not in KNOWN_ROOM_VERSIONS:
+            raise ConfigError(
+                "Unknown default_room_version: %s, known room versions: %s" %
+                (default_room_version, list(KNOWN_ROOM_VERSIONS.keys()))
+            )
+
+        # Get the actual room version object rather than just the identifier
+        self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]
+
         # whether to enable search. If disabled, new entries will not be inserted
         # into the search tables and they will not be indexed. Users will receive
         # errors when attempting to search for messages.
@@ -84,6 +120,11 @@ class ServerConfig(Config):
             "block_non_admin_invites", False,
         )
 
+        # Whether to enable experimental MSC1849 (aka relations) support
+        self.experimental_msc1849_support_enabled = config.get(
+            "experimental_msc1849_support_enabled", False,
+        )
+
         # Options to control access by tracking MAU
         self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
         self.max_mau_value = 0
@@ -113,14 +154,34 @@ class ServerConfig(Config):
         # FIXME: federation_domain_whitelist needs sytests
         self.federation_domain_whitelist = None
         federation_domain_whitelist = config.get(
-            "federation_domain_whitelist", None
+            "federation_domain_whitelist", None,
         )
-        # turn the whitelist into a hash for speed of lookup
+
         if federation_domain_whitelist is not None:
+            # turn the whitelist into a hash for speed of lookup
             self.federation_domain_whitelist = {}
+
             for domain in federation_domain_whitelist:
                 self.federation_domain_whitelist[domain] = True
 
+        self.federation_ip_range_blacklist = config.get(
+            "federation_ip_range_blacklist", [],
+        )
+
+        # Attempt to create an IPSet from the given ranges
+        try:
+            self.federation_ip_range_blacklist = IPSet(
+                self.federation_ip_range_blacklist
+            )
+
+            # Always blacklist 0.0.0.0, ::
+            self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
+        except Exception as e:
+            raise ConfigError(
+                "Invalid range(s) provided in "
+                "federation_ip_range_blacklist: %s" % e
+            )
+
         if self.public_baseurl is not None:
             if self.public_baseurl[-1] != '/':
                 self.public_baseurl += '/'
@@ -131,6 +192,16 @@ class ServerConfig(Config):
         # sending out any replication updates.
         self.replication_torture_level = config.get("replication_torture_level")
 
+        # Whether to require a user to be in the room to add an alias to it.
+        # Defaults to True.
+        self.require_membership_for_aliases = config.get(
+            "require_membership_for_aliases", True,
+        )
+
+        # Whether to allow per-room membership profiles through the send of membership
+        # events with profile information that differ from the target's global profile.
+        self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
+
         self.listeners = []
         for listener in config.get("listeners", []):
             if not isinstance(listener.get("port", None), int):
@@ -258,6 +329,10 @@ class ServerConfig(Config):
             unsecure_port = 8008
 
         pid_file = os.path.join(data_dir_path, "homeserver.pid")
+
+        # Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
+        # default config string
+        default_room_version = DEFAULT_ROOM_VERSION
         return """\
         ## Server ##
 
@@ -318,6 +393,30 @@ class ServerConfig(Config):
         #
         #use_presence: false
 
+        # Whether to require authentication to retrieve profile data (avatars,
+        # display names) of other users through the client API. Defaults to
+        # 'false'. Note that profile data is also available via the federation
+        # API, so this setting is of limited value if federation is enabled on
+        # the server.
+        #
+        #require_auth_for_profile_requests: true
+
+        # If set to 'true', requires authentication to access the server's
+        # public rooms directory through the client API, and forbids any other
+        # homeserver to fetch it via federation. Defaults to 'false'.
+        #
+        #restrict_public_rooms_to_local_users: true
+
+        # The default room version for newly created rooms.
+        #
+        # Known room versions are listed here:
+        # https://matrix.org/docs/spec/#complete-list-of-room-versions
+        #
+        # For example, for room version 1, default_room_version should be set
+        # to "1".
+        #
+        #default_room_version: "%(default_room_version)s"
+
         # The GC threshold parameters to pass to `gc.set_threshold`, if defined
         #
         #gc_thresholds: [700, 10, 10]
@@ -350,6 +449,24 @@ class ServerConfig(Config):
         #  - nyc.example.com
         #  - syd.example.com
 
+        # Prevent federation requests from being sent to the following
+        # blacklist IP address CIDR ranges. If this option is not specified, or
+        # specified with an empty list, no ip range blacklist will be enforced.
+        #
+        # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
+        # listed here, since they correspond to unroutable addresses.)
+        #
+        federation_ip_range_blacklist:
+          - '127.0.0.0/8'
+          - '10.0.0.0/8'
+          - '172.16.0.0/12'
+          - '192.168.0.0/16'
+          - '100.64.0.0/10'
+          - '169.254.0.0/16'
+          - '::1/128'
+          - 'fe80::/64'
+          - 'fc00::/7'
+
         # List of ports that Synapse should listen on, their purpose and their
         # configuration.
         #
@@ -385,8 +502,8 @@ class ServerConfig(Config):
         #
         # Valid resource names are:
         #
-        #   client: the client-server API (/_matrix/client). Also implies 'media' and
-        #       'static'.
+        #   client: the client-server API (/_matrix/client), and the synapse admin
+        #       API (/_synapse/admin). Also implies 'media' and 'static'.
         #
         #   consent: user consent forms (/_matrix/consent). See
         #       docs/consent_tracking.md.
@@ -468,6 +585,22 @@ class ServerConfig(Config):
 
         # Monthly Active User Blocking
         #
+        # Used in cases where the admin or server owner wants to limit to the
+        # number of monthly active users.
+        #
+        # 'limit_usage_by_mau' disables/enables monthly active user blocking. When
+        # anabled and a limit is reached the server returns a 'ResourceLimitError'
+        # with error type Codes.RESOURCE_LIMIT_EXCEEDED
+        #
+        # 'max_mau_value' is the hard limit of monthly active users above which
+        # the server will start blocking user actions.
+        #
+        # 'mau_trial_days' is a means to add a grace period for active users. It
+        # means that users must be active for this number of days before they
+        # can be considered active and guards against the case where lots of users
+        # sign up in a short space of time never to return after their initial
+        # session.
+        #
         #limit_usage_by_mau: False
         #max_mau_value: 50
         #mau_trial_days: 2
@@ -484,6 +617,20 @@ class ServerConfig(Config):
         #mau_limit_reserved_threepids:
         #  - medium: 'email'
         #    address: 'reserved_user@example.com'
+
+        # Used by phonehome stats to group together related servers.
+        #server_context: context
+
+        # Whether to require a user to be in the room to add an alias to it.
+        # Defaults to 'true'.
+        #
+        #require_membership_for_aliases: false
+
+        # Whether to allow per-room membership profiles through the send of membership
+        # events with profile information that differ from the target's global profile.
+        # Defaults to 'true'.
+        #
+        #allow_per_room_profiles: false
         """ % locals()
 
     def read_arguments(self, args):
diff --git a/synapse/config/stats.py b/synapse/config/stats.py
new file mode 100644
index 0000000000..80fc1b9dd0
--- /dev/null
+++ b/synapse/config/stats.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import division
+
+import sys
+
+from ._base import Config
+
+
+class StatsConfig(Config):
+    """Stats Configuration
+    Configuration for the behaviour of synapse's stats engine
+    """
+
+    def read_config(self, config):
+        self.stats_enabled = True
+        self.stats_bucket_size = 86400
+        self.stats_retention = sys.maxsize
+        stats_config = config.get("stats", None)
+        if stats_config:
+            self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
+            self.stats_bucket_size = (
+                self.parse_duration(stats_config.get("bucket_size", "1d")) / 1000
+            )
+            self.stats_retention = (
+                self.parse_duration(
+                    stats_config.get("retention", "%ds" % (sys.maxsize,))
+                )
+                / 1000
+            )
+
+    def default_config(self, config_dir_path, server_name, **kwargs):
+        return """
+        # Local statistics collection. Used in populating the room directory.
+        #
+        # 'bucket_size' controls how large each statistics timeslice is. It can
+        # be defined in a human readable short form -- e.g. "1d", "1y".
+        #
+        # 'retention' controls how long historical statistics will be kept for.
+        # It can be defined in a human readable short form -- e.g. "1d", "1y".
+        #
+        #
+        #stats:
+        #   enabled: true
+        #   bucket_size: 1d
+        #   retention: 1y
+        """
diff --git a/synapse/config/third_party_event_rules.py b/synapse/config/third_party_event_rules.py
new file mode 100644
index 0000000000..a89dd5f98a
--- /dev/null
+++ b/synapse/config/third_party_event_rules.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.util.module_loader import load_module
+
+from ._base import Config
+
+
+class ThirdPartyRulesConfig(Config):
+    def read_config(self, config):
+        self.third_party_event_rules = None
+
+        provider = config.get("third_party_event_rules", None)
+        if provider is not None:
+            self.third_party_event_rules = load_module(provider)
+
+    def default_config(self, **kwargs):
+        return """\
+        # Server admins can define a Python module that implements extra rules for
+        # allowing or denying incoming events. In order to work, this module needs to
+        # override the methods defined in synapse/events/third_party_rules.py.
+        #
+        # This feature is designed to be used in closed federations only, where each
+        # participating server enforces the same rules.
+        #
+        #third_party_event_rules:
+        #  module: "my_custom_project.SuperRulesSet"
+        #  config:
+        #    example_option: 'things'
+        """
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index f0014902da..658f9dd361 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -24,8 +24,10 @@ import six
 from unpaddedbase64 import encode_base64
 
 from OpenSSL import crypto
+from twisted.internet._sslverify import Certificate, trustRootFromCertificates
 
 from synapse.config._base import Config, ConfigError
+from synapse.util import glob_to_regex
 
 logger = logging.getLogger(__name__)
 
@@ -70,6 +72,53 @@ class TlsConfig(Config):
 
         self.tls_fingerprints = list(self._original_tls_fingerprints)
 
+        # Whether to verify certificates on outbound federation traffic
+        self.federation_verify_certificates = config.get(
+            "federation_verify_certificates", True,
+        )
+
+        # Whitelist of domains to not verify certificates for
+        fed_whitelist_entries = config.get(
+            "federation_certificate_verification_whitelist", [],
+        )
+
+        # Support globs (*) in whitelist values
+        self.federation_certificate_verification_whitelist = []
+        for entry in fed_whitelist_entries:
+            # Convert globs to regex
+            entry_regex = glob_to_regex(entry)
+            self.federation_certificate_verification_whitelist.append(entry_regex)
+
+        # List of custom certificate authorities for federation traffic validation
+        custom_ca_list = config.get(
+            "federation_custom_ca_list", None,
+        )
+
+        # Read in and parse custom CA certificates
+        self.federation_ca_trust_root = None
+        if custom_ca_list is not None:
+            if len(custom_ca_list) == 0:
+                # A trustroot cannot be generated without any CA certificates.
+                # Raise an error if this option has been specified without any
+                # corresponding certificates.
+                raise ConfigError("federation_custom_ca_list specified without "
+                                  "any certificate files")
+
+            certs = []
+            for ca_file in custom_ca_list:
+                logger.debug("Reading custom CA certificate file: %s", ca_file)
+                content = self.read_file(ca_file, "federation_custom_ca_list")
+
+                # Parse the CA certificates
+                try:
+                    cert_base = Certificate.loadPEM(content)
+                    certs.append(cert_base)
+                except Exception as e:
+                    raise ConfigError("Error parsing custom CA certificate file %s: %s"
+                                      % (ca_file, e))
+
+            self.federation_ca_trust_root = trustRootFromCertificates(certs)
+
         # This config option applies to non-federation HTTP clients
         # (e.g. for talking to recaptcha, identity servers, and such)
         # It should never be used in production, and is intended for
@@ -99,15 +148,15 @@ class TlsConfig(Config):
         try:
             with open(self.tls_certificate_file, 'rb') as f:
                 cert_pem = f.read()
-        except Exception:
-            logger.exception("Failed to read existing certificate off disk!")
-            raise
+        except Exception as e:
+            raise ConfigError("Failed to read existing certificate file %s: %s"
+                              % (self.tls_certificate_file, e))
 
         try:
             tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
-        except Exception:
-            logger.exception("Failed to parse existing certificate off disk!")
-            raise
+        except Exception as e:
+            raise ConfigError("Failed to parse existing certificate file %s: %s"
+                              % (self.tls_certificate_file, e))
 
         if not allow_self_signed:
             if tls_certificate.get_subject() == tls_certificate.get_issuer():
@@ -192,6 +241,40 @@ class TlsConfig(Config):
         #
         #tls_private_key_path: "%(tls_private_key_path)s"
 
+        # Whether to verify TLS server certificates for outbound federation requests.
+        #
+        # Defaults to `true`. To disable certificate verification, uncomment the
+        # following line.
+        #
+        #federation_verify_certificates: false
+
+        # Skip federation certificate verification on the following whitelist
+        # of domains.
+        #
+        # This setting should only be used in very specific cases, such as
+        # federation over Tor hidden services and similar. For private networks
+        # of homeservers, you likely want to use a private CA instead.
+        #
+        # Only effective if federation_verify_certicates is `true`.
+        #
+        #federation_certificate_verification_whitelist:
+        #  - lon.example.com
+        #  - *.domain.com
+        #  - *.onion
+
+        # List of custom certificate authorities for federation traffic.
+        #
+        # This setting should only normally be used within a private network of
+        # homeservers.
+        #
+        # Note that this list will replace those that are provided by your
+        # operating environment. Certificates must be in PEM format.
+        #
+        #federation_custom_ca_list:
+        #  - myCA1.pem
+        #  - myCA2.pem
+        #  - myCA3.pem
+
         # ACME support: This will configure Synapse to request a valid TLS certificate
         # for your configured `server_name` via Let's Encrypt.
         #
diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py
index 142754a7dc..023997ccde 100644
--- a/synapse/config/user_directory.py
+++ b/synapse/config/user_directory.py
@@ -43,9 +43,9 @@ class UserDirectoryConfig(Config):
         #
         # 'search_all_users' defines whether to search all users visible to your HS
         # when searching the user directory, rather than limiting to users visible
-        # in public rooms.  Defaults to false.  If you set it True, you'll have to run
-        # UPDATE user_directory_stream_pos SET stream_id = NULL;
-        # on your database to tell it to rebuild the user_directory search indexes.
+        # in public rooms.  Defaults to false.  If you set it True, you'll have to
+        # rebuild the user_directory search indexes, see
+        # https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
         #
         #user_directory:
         #  enabled: true
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
index 49cbc7098f..2bc5cc3807 100644
--- a/synapse/crypto/context_factory.py
+++ b/synapse/crypto/context_factory.py
@@ -15,13 +15,16 @@
 
 import logging
 
+import idna
+from service_identity import VerificationError
+from service_identity.pyopenssl import verify_hostname, verify_ip_address
 from zope.interface import implementer
 
 from OpenSSL import SSL, crypto
 from twisted.internet._sslverify import _defaultCurveName
 from twisted.internet.abstract import isIPAddress, isIPv6Address
 from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
-from twisted.internet.ssl import CertificateOptions, ContextFactory
+from twisted.internet.ssl import CertificateOptions, ContextFactory, platformTrust
 from twisted.python.failure import Failure
 
 logger = logging.getLogger(__name__)
@@ -56,80 +59,130 @@ class ServerContextFactory(ContextFactory):
         return self._context
 
 
-def _idnaBytes(text):
-    """
-    Convert some text typed by a human into some ASCII bytes. This is a
-    copy of twisted.internet._idna._idnaBytes. For documentation, see the
-    twisted documentation.
-    """
-    try:
-        import idna
-    except ImportError:
-        return text.encode("idna")
-    else:
-        return idna.encode(text)
+class ClientTLSOptionsFactory(object):
+    """Factory for Twisted SSLClientConnectionCreators that are used to make connections
+    to remote servers for federation.
 
+    Uses one of two OpenSSL context objects for all connections, depending on whether
+    we should do SSL certificate verification.
 
-def _tolerateErrors(wrapped):
-    """
-    Wrap up an info_callback for pyOpenSSL so that if something goes wrong
-    the error is immediately logged and the connection is dropped if possible.
-    This is a copy of twisted.internet._sslverify._tolerateErrors. For
-    documentation, see the twisted documentation.
+    get_options decides whether we should do SSL certificate verification and
+    constructs an SSLClientConnectionCreator factory accordingly.
     """
 
-    def infoCallback(connection, where, ret):
+    def __init__(self, config):
+        self._config = config
+
+        # Check if we're using a custom list of a CA certificates
+        trust_root = config.federation_ca_trust_root
+        if trust_root is None:
+            # Use CA root certs provided by OpenSSL
+            trust_root = platformTrust()
+
+        self._verify_ssl_context = CertificateOptions(trustRoot=trust_root).getContext()
+        self._verify_ssl_context.set_info_callback(self._context_info_cb)
+
+        self._no_verify_ssl_context = CertificateOptions().getContext()
+        self._no_verify_ssl_context.set_info_callback(self._context_info_cb)
+
+    def get_options(self, host):
+        # Check if certificate verification has been enabled
+        should_verify = self._config.federation_verify_certificates
+
+        # Check if we've disabled certificate verification for this host
+        if should_verify:
+            for regex in self._config.federation_certificate_verification_whitelist:
+                if regex.match(host):
+                    should_verify = False
+                    break
+
+        ssl_context = (
+            self._verify_ssl_context if should_verify else self._no_verify_ssl_context
+        )
+
+        return SSLClientConnectionCreator(host, ssl_context, should_verify)
+
+    @staticmethod
+    def _context_info_cb(ssl_connection, where, ret):
+        """The 'information callback' for our openssl context object."""
+        # we assume that the app_data on the connection object has been set to
+        # a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator)
+        tls_protocol = ssl_connection.get_app_data()
         try:
-            return wrapped(connection, where, ret)
+            # ... we further assume that SSLClientConnectionCreator has set the
+            # '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
+            tls_protocol._synapse_tls_verifier.verify_context_info_cb(
+                ssl_connection, where
+            )
         except:  # noqa: E722, taken from the twisted implementation
-            f = Failure()
             logger.exception("Error during info_callback")
-            connection.get_app_data().failVerification(f)
-
-    return infoCallback
+            f = Failure()
+            tls_protocol.failVerification(f)
 
 
 @implementer(IOpenSSLClientConnectionCreator)
-class ClientTLSOptions(object):
-    """
-    Client creator for TLS without certificate identity verification. This is a
-    copy of twisted.internet._sslverify.ClientTLSOptions with the identity
-    verification left out. For documentation, see the twisted documentation.
+class SSLClientConnectionCreator(object):
+    """Creates openssl connection objects for client connections.
+
+    Replaces twisted.internet.ssl.ClientTLSOptions
     """
 
-    def __init__(self, hostname, ctx):
+    def __init__(self, hostname, ctx, verify_certs):
         self._ctx = ctx
+        self._verifier = ConnectionVerifier(hostname, verify_certs)
 
-        if isIPAddress(hostname) or isIPv6Address(hostname):
-            self._hostnameBytes = hostname.encode('ascii')
-            self._sendSNI = False
-        else:
-            self._hostnameBytes = _idnaBytes(hostname)
-            self._sendSNI = True
-
-        ctx.set_info_callback(_tolerateErrors(self._identityVerifyingInfoCallback))
-
-    def clientConnectionForTLS(self, tlsProtocol):
+    def clientConnectionForTLS(self, tls_protocol):
         context = self._ctx
         connection = SSL.Connection(context, None)
-        connection.set_app_data(tlsProtocol)
+
+        # as per twisted.internet.ssl.ClientTLSOptions, we set the application
+        # data to our TLSMemoryBIOProtocol...
+        connection.set_app_data(tls_protocol)
+
+        # ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the
+        # tls_protocol so that the SSL context's info callback has something to
+        # call to do the cert verification.
+        setattr(tls_protocol, "_synapse_tls_verifier", self._verifier)
         return connection
 
-    def _identityVerifyingInfoCallback(self, connection, where, ret):
-        # Literal IPv4 and IPv6 addresses are not permitted
-        # as host names according to the RFCs
-        if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI:
-            connection.set_tlsext_host_name(self._hostnameBytes)
 
+class ConnectionVerifier(object):
+    """Set the SNI, and do cert verification
 
-class ClientTLSOptionsFactory(object):
-    """Factory for Twisted ClientTLSOptions that are used to make connections
-    to remote servers for federation."""
+    This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by
+    the ssl context's info callback.
+    """
 
-    def __init__(self, config):
-        # We don't use config options yet
-        self._options = CertificateOptions(verify=False)
+    # This code is based on twisted.internet.ssl.ClientTLSOptions.
 
-    def get_options(self, host):
-        # Use _makeContext so that we get a fresh OpenSSL CTX each time.
-        return ClientTLSOptions(host, self._options._makeContext())
+    def __init__(self, hostname, verify_certs):
+        self._verify_certs = verify_certs
+
+        if isIPAddress(hostname) or isIPv6Address(hostname):
+            self._hostnameBytes = hostname.encode("ascii")
+            self._is_ip_address = True
+        else:
+            # twisted's ClientTLSOptions falls back to the stdlib impl here if
+            # idna is not installed, but points out that lacks support for
+            # IDNA2008 (http://bugs.python.org/issue17305).
+            #
+            # We can rely on having idna.
+            self._hostnameBytes = idna.encode(hostname)
+            self._is_ip_address = False
+
+        self._hostnameASCII = self._hostnameBytes.decode("ascii")
+
+    def verify_context_info_cb(self, ssl_connection, where):
+        if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address:
+            ssl_connection.set_tlsext_host_name(self._hostnameBytes)
+
+        if where & SSL.SSL_CB_HANDSHAKE_DONE and self._verify_certs:
+            try:
+                if self._is_ip_address:
+                    verify_ip_address(ssl_connection, self._hostnameASCII)
+                else:
+                    verify_hostname(ssl_connection, self._hostnameASCII)
+            except VerificationError:
+                f = Failure()
+                tls_protocol = ssl_connection.get_app_data()
+                tls_protocol.failVerification(f)
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
index 1dfa727fcf..99a586655b 100644
--- a/synapse/crypto/event_signing.py
+++ b/synapse/crypto/event_signing.py
@@ -31,7 +31,11 @@ logger = logging.getLogger(__name__)
 def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
     """Check whether the hash for this PDU matches the contents"""
     name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm)
-    logger.debug("Expecting hash: %s", encode_base64(expected_hash))
+    logger.debug(
+        "Verifying content hash on %s (expecting: %s)",
+        event.event_id,
+        encode_base64(expected_hash),
+    )
 
     # some malformed events lack a 'hashes'. Protect against it being missing
     # or a weird type by basically treating it the same as an unhashed event.
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 0207cd989a..6f603f1961 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2017, 2018 New Vector Ltd.
+# Copyright 2017, 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,11 +15,13 @@
 # limitations under the License.
 
 import logging
-from collections import namedtuple
+from collections import defaultdict
 
+import six
 from six import raise_from
 from six.moves import urllib
 
+import attr
 from signedjson.key import (
     decode_verify_key_bytes,
     encode_verify_key_base64,
@@ -42,7 +44,9 @@ from synapse.api.errors import (
     RequestSendFailed,
     SynapseError,
 )
+from synapse.storage.keys import FetchKeyResult
 from synapse.util import logcontext, unwrapFirstError
+from synapse.util.async_helpers import yieldable_gather_results
 from synapse.util.logcontext import (
     LoggingContext,
     PreserveLoggingContext,
@@ -55,22 +59,40 @@ from synapse.util.retryutils import NotRetryingDestination
 logger = logging.getLogger(__name__)
 
 
-VerifyKeyRequest = namedtuple("VerifyRequest", (
-    "server_name", "key_ids", "json_object", "deferred"
-))
-"""
-A request for a verify key to verify a JSON object.
+@attr.s(slots=True, cmp=False)
+class VerifyJsonRequest(object):
+    """
+    A request to verify a JSON object.
+
+    Attributes:
+        server_name(str): The name of the server to verify against.
+
+        key_ids(set[str]): The set of key_ids to that could be used to verify the
+            JSON object
+
+        json_object(dict): The JSON object to verify.
+
+        minimum_valid_until_ts (int): time at which we require the signing key to
+            be valid. (0 implies we don't care)
+
+        key_ready (Deferred[str, str, nacl.signing.VerifyKey]):
+            A deferred (server_name, key_id, verify_key) tuple that resolves when
+            a verify key has been fetched. The deferreds' callbacks are run with no
+            logcontext.
+
+            If we are unable to find a key which satisfies the request, the deferred
+            errbacks with an M_UNAUTHORIZED SynapseError.
+    """
 
-Attributes:
-    server_name(str): The name of the server to verify against.
-    key_ids(set(str)): The set of key_ids to that could be used to verify the
-        JSON object
-    json_object(dict): The JSON object to verify.
-    deferred(Deferred[str, str, nacl.signing.VerifyKey]):
-        A deferred (server_name, key_id, verify_key) tuple that resolves when
-        a verify key has been fetched. The deferreds' callbacks are run with no
-        logcontext.
-"""
+    server_name = attr.ib()
+    json_object = attr.ib()
+    minimum_valid_until_ts = attr.ib()
+    request_name = attr.ib()
+    key_ids = attr.ib(init=False)
+    key_ready = attr.ib(default=attr.Factory(defer.Deferred))
+
+    def __attrs_post_init__(self):
+        self.key_ids = signature_ids(self.json_object, self.server_name)
 
 
 class KeyLookupError(ValueError):
@@ -78,13 +100,16 @@ class KeyLookupError(ValueError):
 
 
 class Keyring(object):
-    def __init__(self, hs):
-        self.store = hs.get_datastore()
+    def __init__(self, hs, key_fetchers=None):
         self.clock = hs.get_clock()
-        self.client = hs.get_http_client()
-        self.config = hs.get_config()
-        self.perspective_servers = self.config.perspectives
-        self.hs = hs
+
+        if key_fetchers is None:
+            key_fetchers = (
+                StoreKeyFetcher(hs),
+                PerspectivesKeyFetcher(hs),
+                ServerKeyFetcher(hs),
+            )
+        self._key_fetchers = key_fetchers
 
         # map from server name to Deferred. Has an entry for each server with
         # an ongoing key download; the Deferred completes once the download
@@ -93,68 +118,122 @@ class Keyring(object):
         # These are regular, logcontext-agnostic Deferreds.
         self.key_downloads = {}
 
-    def verify_json_for_server(self, server_name, json_object):
-        return logcontext.make_deferred_yieldable(
-            self.verify_json_objects_for_server(
-                [(server_name, json_object)]
-            )[0]
-        )
+    def verify_json_for_server(
+        self, server_name, json_object, validity_time, request_name
+    ):
+        """Verify that a JSON object has been signed by a given server
+
+        Args:
+            server_name (str): name of the server which must have signed this object
+
+            json_object (dict): object to be checked
+
+            validity_time (int): timestamp at which we require the signing key to
+                be valid. (0 implies we don't care)
+
+            request_name (str): an identifier for this json object (eg, an event id)
+                for logging.
+
+        Returns:
+            Deferred[None]: completes if the the object was correctly signed, otherwise
+                errbacks with an error
+        """
+        req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
+        requests = (req,)
+        return logcontext.make_deferred_yieldable(self._verify_objects(requests)[0])
 
     def verify_json_objects_for_server(self, server_and_json):
         """Bulk verifies signatures of json objects, bulk fetching keys as
         necessary.
 
         Args:
-            server_and_json (list): List of pairs of (server_name, json_object)
+            server_and_json (iterable[Tuple[str, dict, int, str]):
+                Iterable of (server_name, json_object, validity_time, request_name)
+                tuples.
+
+                validity_time is a timestamp at which the signing key must be
+                valid.
+
+                request_name is an identifier for this json object (eg, an event id)
+                for logging.
 
         Returns:
-            List<Deferred>: for each input pair, a deferred indicating success
+            List<Deferred[None]>: for each input triplet, a deferred indicating success
                 or failure to verify each json object's signature for the given
                 server_name. The deferreds run their callbacks in the sentinel
                 logcontext.
         """
-        verify_requests = []
-
-        for server_name, json_object in server_and_json:
-
-            key_ids = signature_ids(json_object, server_name)
-            if not key_ids:
-                logger.warn("Request from %s: no supported signature keys",
-                            server_name)
-                deferred = defer.fail(SynapseError(
-                    400,
-                    "Not signed with a supported algorithm",
-                    Codes.UNAUTHORIZED,
-                ))
-            else:
-                deferred = defer.Deferred()
-
-            logger.debug("Verifying for %s with key_ids %s",
-                         server_name, key_ids)
-
-            verify_request = VerifyKeyRequest(
-                server_name, key_ids, json_object, deferred
-            )
+        return self._verify_objects(
+            VerifyJsonRequest(server_name, json_object, validity_time, request_name)
+            for server_name, json_object, validity_time, request_name in server_and_json
+        )
 
-            verify_requests.append(verify_request)
+    def _verify_objects(self, verify_requests):
+        """Does the work of verify_json_[objects_]for_server
 
-        run_in_background(self._start_key_lookups, verify_requests)
 
-        # Pass those keys to handle_key_deferred so that the json object
-        # signatures can be verified
+        Args:
+            verify_requests (iterable[VerifyJsonRequest]):
+                Iterable of verification requests.
+
+        Returns:
+            List<Deferred[None]>: for each input item, a deferred indicating success
+                or failure to verify each json object's signature for the given
+                server_name. The deferreds run their callbacks in the sentinel
+                logcontext.
+        """
+        # a list of VerifyJsonRequests which are awaiting a key lookup
+        key_lookups = []
         handle = preserve_fn(_handle_key_deferred)
-        return [
-            handle(rq) for rq in verify_requests
-        ]
+
+        def process(verify_request):
+            """Process an entry in the request list
+
+            Adds a key request to key_lookups, and returns a deferred which
+            will complete or fail (in the sentinel context) when verification completes.
+            """
+            if not verify_request.key_ids:
+                return defer.fail(
+                    SynapseError(
+                        400,
+                        "Not signed by %s" % (verify_request.server_name,),
+                        Codes.UNAUTHORIZED,
+                    )
+                )
+
+            logger.debug(
+                "Verifying %s for %s with key_ids %s, min_validity %i",
+                verify_request.request_name,
+                verify_request.server_name,
+                verify_request.key_ids,
+                verify_request.minimum_valid_until_ts,
+            )
+
+            # add the key request to the queue, but don't start it off yet.
+            key_lookups.append(verify_request)
+
+            # now run _handle_key_deferred, which will wait for the key request
+            # to complete and then do the verification.
+            #
+            # We want _handle_key_request to log to the right context, so we
+            # wrap it with preserve_fn (aka run_in_background)
+            return handle(verify_request)
+
+        results = [process(r) for r in verify_requests]
+
+        if key_lookups:
+            run_in_background(self._start_key_lookups, key_lookups)
+
+        return results
 
     @defer.inlineCallbacks
     def _start_key_lookups(self, verify_requests):
         """Sets off the key fetches for each verify request
 
-        Once each fetch completes, verify_request.deferred will be resolved.
+        Once each fetch completes, verify_request.key_ready will be resolved.
 
         Args:
-            verify_requests (List[VerifyKeyRequest]):
+            verify_requests (List[VerifyJsonRequest]):
         """
 
         try:
@@ -164,16 +243,12 @@ class Keyring(object):
             # any other lookups until we have finished.
             # The deferreds are called with no logcontext.
             server_to_deferred = {
-                rq.server_name: defer.Deferred()
-                for rq in verify_requests
+                rq.server_name: defer.Deferred() for rq in verify_requests
             }
 
             # We want to wait for any previous lookups to complete before
             # proceeding.
-            yield self.wait_for_previous_lookups(
-                [rq.server_name for rq in verify_requests],
-                server_to_deferred,
-            )
+            yield self.wait_for_previous_lookups(server_to_deferred)
 
             # Actually start fetching keys.
             self._get_server_verify_keys(verify_requests)
@@ -201,19 +276,16 @@ class Keyring(object):
                 return res
 
             for verify_request in verify_requests:
-                verify_request.deferred.addBoth(
-                    remove_deferreds, verify_request,
-                )
+                verify_request.key_ready.addBoth(remove_deferreds, verify_request)
         except Exception:
             logger.exception("Error starting key lookups")
 
     @defer.inlineCallbacks
-    def wait_for_previous_lookups(self, server_names, server_to_deferred):
+    def wait_for_previous_lookups(self, server_to_deferred):
         """Waits for any previous key lookups for the given servers to finish.
 
         Args:
-            server_names (list): list of server_names we want to lookup
-            server_to_deferred (dict): server_name to deferred which gets
+            server_to_deferred (dict[str, Deferred]): server_name to deferred which gets
                 resolved once we've finished looking up keys for that server.
                 The Deferreds should be regular twisted ones which call their
                 callbacks with no logcontext.
@@ -226,14 +298,15 @@ class Keyring(object):
         while True:
             wait_on = [
                 (server_name, self.key_downloads[server_name])
-                for server_name in server_names
+                for server_name in server_to_deferred.keys()
                 if server_name in self.key_downloads
             ]
             if not wait_on:
                 break
             logger.info(
                 "Waiting for existing lookups for %s to complete [loop %i]",
-                [w[0] for w in wait_on], loop_count,
+                [w[0] for w in wait_on],
+                loop_count,
             )
             with PreserveLoggingContext():
                 yield defer.DeferredList((w[1] for w in wait_on))
@@ -256,144 +329,298 @@ class Keyring(object):
     def _get_server_verify_keys(self, verify_requests):
         """Tries to find at least one key for each verify request
 
-        For each verify_request, verify_request.deferred is called back with
+        For each verify_request, verify_request.key_ready is called back with
         params (server_name, key_id, VerifyKey) if a key is found, or errbacked
         with a SynapseError if none of the keys are found.
 
         Args:
-            verify_requests (list[VerifyKeyRequest]): list of verify requests
+            verify_requests (list[VerifyJsonRequest]): list of verify requests
         """
 
-        # These are functions that produce keys given a list of key ids
-        key_fetch_fns = (
-            self.get_keys_from_store,  # First try the local store
-            self.get_keys_from_perspectives,  # Then try via perspectives
-            self.get_keys_from_server,  # Then try directly
+        remaining_requests = set(
+            (rq for rq in verify_requests if not rq.key_ready.called)
         )
 
         @defer.inlineCallbacks
         def do_iterations():
             with Measure(self.clock, "get_server_verify_keys"):
-                # dict[str, dict[str, VerifyKey]]: results so far.
-                # map server_name -> key_id -> VerifyKey
-                merged_results = {}
-
-                # dict[str, set(str)]: keys to fetch for each server
-                missing_keys = {}
-                for verify_request in verify_requests:
-                    missing_keys.setdefault(verify_request.server_name, set()).update(
-                        verify_request.key_ids
-                    )
-
-                for fn in key_fetch_fns:
-                    results = yield fn(missing_keys.items())
-                    merged_results.update(results)
-
-                    # We now need to figure out which verify requests we have keys
-                    # for and which we don't
-                    missing_keys = {}
-                    requests_missing_keys = []
-                    for verify_request in verify_requests:
-                        server_name = verify_request.server_name
-                        result_keys = merged_results[server_name]
-
-                        if verify_request.deferred.called:
-                            # We've already called this deferred, which probably
-                            # means that we've already found a key for it.
-                            continue
-
-                        for key_id in verify_request.key_ids:
-                            if key_id in result_keys:
-                                with PreserveLoggingContext():
-                                    verify_request.deferred.callback((
-                                        server_name,
-                                        key_id,
-                                        result_keys[key_id],
-                                    ))
-                                break
-                        else:
-                            # The else block is only reached if the loop above
-                            # doesn't break.
-                            missing_keys.setdefault(server_name, set()).update(
-                                verify_request.key_ids
-                            )
-                            requests_missing_keys.append(verify_request)
-
-                    if not missing_keys:
-                        break
+                for f in self._key_fetchers:
+                    if not remaining_requests:
+                        return
+                    yield self._attempt_key_fetches_with_fetcher(f, remaining_requests)
 
+                # look for any requests which weren't satisfied
                 with PreserveLoggingContext():
-                    for verify_request in requests_missing_keys:
-                        verify_request.deferred.errback(SynapseError(
-                            401,
-                            "No key for %s with id %s" % (
-                                verify_request.server_name, verify_request.key_ids,
-                            ),
-                            Codes.UNAUTHORIZED,
-                        ))
+                    for verify_request in remaining_requests:
+                        verify_request.key_ready.errback(
+                            SynapseError(
+                                401,
+                                "No key for %s with ids in %s (min_validity %i)"
+                                % (
+                                    verify_request.server_name,
+                                    verify_request.key_ids,
+                                    verify_request.minimum_valid_until_ts,
+                                ),
+                                Codes.UNAUTHORIZED,
+                            )
+                        )
 
         def on_err(err):
+            # we don't really expect to get here, because any errors should already
+            # have been caught and logged. But if we do, let's log the error and make
+            # sure that all of the deferreds are resolved.
+            logger.error("Unexpected error in _get_server_verify_keys: %s", err)
             with PreserveLoggingContext():
-                for verify_request in verify_requests:
-                    if not verify_request.deferred.called:
-                        verify_request.deferred.errback(err)
+                for verify_request in remaining_requests:
+                    if not verify_request.key_ready.called:
+                        verify_request.key_ready.errback(err)
 
         run_in_background(do_iterations).addErrback(on_err)
 
     @defer.inlineCallbacks
-    def get_keys_from_store(self, server_name_and_key_ids):
+    def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests):
+        """Use a key fetcher to attempt to satisfy some key requests
+
+        Args:
+            fetcher (KeyFetcher): fetcher to use to fetch the keys
+            remaining_requests (set[VerifyJsonRequest]): outstanding key requests.
+                Any successfully-completed requests will be removed from the list.
+        """
+        # dict[str, dict[str, int]]: keys to fetch.
+        # server_name -> key_id -> min_valid_ts
+        missing_keys = defaultdict(dict)
+
+        for verify_request in remaining_requests:
+            # any completed requests should already have been removed
+            assert not verify_request.key_ready.called
+            keys_for_server = missing_keys[verify_request.server_name]
+
+            for key_id in verify_request.key_ids:
+                # If we have several requests for the same key, then we only need to
+                # request that key once, but we should do so with the greatest
+                # min_valid_until_ts of the requests, so that we can satisfy all of
+                # the requests.
+                keys_for_server[key_id] = max(
+                    keys_for_server.get(key_id, -1),
+                    verify_request.minimum_valid_until_ts,
+                )
+
+        results = yield fetcher.get_keys(missing_keys)
+
+        completed = list()
+        for verify_request in remaining_requests:
+            server_name = verify_request.server_name
+
+            # see if any of the keys we got this time are sufficient to
+            # complete this VerifyJsonRequest.
+            result_keys = results.get(server_name, {})
+            for key_id in verify_request.key_ids:
+                fetch_key_result = result_keys.get(key_id)
+                if not fetch_key_result:
+                    # we didn't get a result for this key
+                    continue
+
+                if (
+                    fetch_key_result.valid_until_ts
+                    < verify_request.minimum_valid_until_ts
+                ):
+                    # key was not valid at this point
+                    continue
+
+                with PreserveLoggingContext():
+                    verify_request.key_ready.callback(
+                        (server_name, key_id, fetch_key_result.verify_key)
+                    )
+                completed.append(verify_request)
+                break
+
+        remaining_requests.difference_update(completed)
+
+
+class KeyFetcher(object):
+    def get_keys(self, keys_to_fetch):
+        """
+        Args:
+            keys_to_fetch (dict[str, dict[str, int]]):
+                the keys to be fetched. server_name -> key_id -> min_valid_ts
+
+        Returns:
+            Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]:
+                map from server_name -> key_id -> FetchKeyResult
         """
+        raise NotImplementedError
+
+
+class StoreKeyFetcher(KeyFetcher):
+    """KeyFetcher impl which fetches keys from our data store"""
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def get_keys(self, keys_to_fetch):
+        """see KeyFetcher.get_keys"""
+
+        keys_to_fetch = (
+            (server_name, key_id)
+            for server_name, keys_for_server in keys_to_fetch.items()
+            for key_id in keys_for_server.keys()
+        )
+
+        res = yield self.store.get_server_verify_keys(keys_to_fetch)
+        keys = {}
+        for (server_name, key_id), key in res.items():
+            keys.setdefault(server_name, {})[key_id] = key
+        defer.returnValue(keys)
+
+
+class BaseV2KeyFetcher(object):
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.config = hs.get_config()
+
+    @defer.inlineCallbacks
+    def process_v2_response(self, from_server, response_json, time_added_ms):
+        """Parse a 'Server Keys' structure from the result of a /key request
+
+        This is used to parse either the entirety of the response from
+        GET /_matrix/key/v2/server, or a single entry from the list returned by
+        POST /_matrix/key/v2/query.
+
+        Checks that each signature in the response that claims to come from the origin
+        server is valid, and that there is at least one such signature.
+
+        Stores the json in server_keys_json so that it can be used for future responses
+        to /_matrix/key/v2/query.
 
         Args:
-            server_name_and_key_ids (list[(str, iterable[str])]):
-                list of (server_name, iterable[key_id]) tuples to fetch keys for
+            from_server (str): the name of the server producing this result: either
+                the origin server for a /_matrix/key/v2/server request, or the notary
+                for a /_matrix/key/v2/query.
+
+            response_json (dict): the json-decoded Server Keys response object
+
+            time_added_ms (int): the timestamp to record in server_keys_json
 
         Returns:
-            Deferred: resolves to dict[str, dict[str, VerifyKey]]: map from
-                server_name -> key_id -> VerifyKey
+            Deferred[dict[str, FetchKeyResult]]: map from key_id to result object
         """
-        res = yield logcontext.make_deferred_yieldable(defer.gatherResults(
-            [
-                run_in_background(
-                    self.store.get_server_verify_keys,
-                    server_name, key_ids,
-                ).addCallback(lambda ks, server: (server, ks), server_name)
-                for server_name, key_ids in server_name_and_key_ids
-            ],
-            consumeErrors=True,
-        ).addErrback(unwrapFirstError))
-
-        defer.returnValue(dict(res))
+        ts_valid_until_ms = response_json[u"valid_until_ts"]
+
+        # start by extracting the keys from the response, since they may be required
+        # to validate the signature on the response.
+        verify_keys = {}
+        for key_id, key_data in response_json["verify_keys"].items():
+            if is_signing_algorithm_supported(key_id):
+                key_base64 = key_data["key"]
+                key_bytes = decode_base64(key_base64)
+                verify_key = decode_verify_key_bytes(key_id, key_bytes)
+                verify_keys[key_id] = FetchKeyResult(
+                    verify_key=verify_key, valid_until_ts=ts_valid_until_ms
+                )
+
+        server_name = response_json["server_name"]
+        verified = False
+        for key_id in response_json["signatures"].get(server_name, {}):
+            # each of the keys used for the signature must be present in the response
+            # json.
+            key = verify_keys.get(key_id)
+            if not key:
+                raise KeyLookupError(
+                    "Key response is signed by key id %s:%s but that key is not "
+                    "present in the response" % (server_name, key_id)
+                )
+
+            verify_signed_json(response_json, server_name, key.verify_key)
+            verified = True
+
+        if not verified:
+            raise KeyLookupError(
+                "Key response for %s is not signed by the origin server"
+                % (server_name,)
+            )
+
+        for key_id, key_data in response_json["old_verify_keys"].items():
+            if is_signing_algorithm_supported(key_id):
+                key_base64 = key_data["key"]
+                key_bytes = decode_base64(key_base64)
+                verify_key = decode_verify_key_bytes(key_id, key_bytes)
+                verify_keys[key_id] = FetchKeyResult(
+                    verify_key=verify_key, valid_until_ts=key_data["expired_ts"]
+                )
+
+        # re-sign the json with our own key, so that it is ready if we are asked to
+        # give it out as a notary server
+        signed_key_json = sign_json(
+            response_json, self.config.server_name, self.config.signing_key[0]
+        )
+
+        signed_key_json_bytes = encode_canonical_json(signed_key_json)
+
+        yield logcontext.make_deferred_yieldable(
+            defer.gatherResults(
+                [
+                    run_in_background(
+                        self.store.store_server_keys_json,
+                        server_name=server_name,
+                        key_id=key_id,
+                        from_server=from_server,
+                        ts_now_ms=time_added_ms,
+                        ts_expires_ms=ts_valid_until_ms,
+                        key_json_bytes=signed_key_json_bytes,
+                    )
+                    for key_id in verify_keys
+                ],
+                consumeErrors=True,
+            ).addErrback(unwrapFirstError)
+        )
+
+        defer.returnValue(verify_keys)
+
+
+class PerspectivesKeyFetcher(BaseV2KeyFetcher):
+    """KeyFetcher impl which fetches keys from the "perspectives" servers"""
+
+    def __init__(self, hs):
+        super(PerspectivesKeyFetcher, self).__init__(hs)
+        self.clock = hs.get_clock()
+        self.client = hs.get_http_client()
+        self.key_servers = self.config.key_servers
 
     @defer.inlineCallbacks
-    def get_keys_from_perspectives(self, server_name_and_key_ids):
+    def get_keys(self, keys_to_fetch):
+        """see KeyFetcher.get_keys"""
+
         @defer.inlineCallbacks
-        def get_key(perspective_name, perspective_keys):
+        def get_key(key_server):
             try:
                 result = yield self.get_server_verify_key_v2_indirect(
-                    server_name_and_key_ids, perspective_name, perspective_keys
+                    keys_to_fetch, key_server
                 )
                 defer.returnValue(result)
             except KeyLookupError as e:
                 logger.warning(
-                    "Key lookup failed from %r: %s", perspective_name, e,
+                    "Key lookup failed from %r: %s", key_server.server_name, e
                 )
             except Exception as e:
                 logger.exception(
                     "Unable to get key from %r: %s %s",
-                    perspective_name,
-                    type(e).__name__, str(e),
+                    key_server.server_name,
+                    type(e).__name__,
+                    str(e),
                 )
 
             defer.returnValue({})
 
-        results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
-            [
-                run_in_background(get_key, p_name, p_keys)
-                for p_name, p_keys in self.perspective_servers.items()
-            ],
-            consumeErrors=True,
-        ).addErrback(unwrapFirstError))
+        results = yield logcontext.make_deferred_yieldable(
+            defer.gatherResults(
+                [
+                    run_in_background(get_key, server)
+                    for server in self.key_servers
+                ],
+                consumeErrors=True,
+            ).addErrback(unwrapFirstError)
+        )
 
         union_of_keys = {}
         for result in results:
@@ -403,36 +630,32 @@ class Keyring(object):
         defer.returnValue(union_of_keys)
 
     @defer.inlineCallbacks
-    def get_keys_from_server(self, server_name_and_key_ids):
-        results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
-            [
-                run_in_background(
-                    self.get_server_verify_key_v2_direct,
-                    server_name,
-                    key_ids,
-                )
-                for server_name, key_ids in server_name_and_key_ids
-            ],
-            consumeErrors=True,
-        ).addErrback(unwrapFirstError))
+    def get_server_verify_key_v2_indirect(
+        self, keys_to_fetch, key_server
+    ):
+        """
+        Args:
+            keys_to_fetch (dict[str, dict[str, int]]):
+                the keys to be fetched. server_name -> key_id -> min_valid_ts
 
-        merged = {}
-        for result in results:
-            merged.update(result)
+            key_server (synapse.config.key.TrustedKeyServer): notary server to query for
+                the keys
 
-        defer.returnValue({
-            server_name: keys
-            for server_name, keys in merged.items()
-            if keys
-        })
+        Returns:
+            Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map
+                from server_name -> key_id -> FetchKeyResult
+
+        Raises:
+            KeyLookupError if there was an error processing the entire response from
+                the server
+        """
+        perspective_name = key_server.server_name
+        logger.info(
+            "Requesting keys %s from notary server %s",
+            keys_to_fetch.items(),
+            perspective_name,
+        )
 
-    @defer.inlineCallbacks
-    def get_server_verify_key_v2_indirect(self, server_names_and_key_ids,
-                                          perspective_name,
-                                          perspective_keys):
-        # TODO(mark): Set the minimum_valid_until_ts to that needed by
-        # the events being validated or the current time if validating
-        # an incoming request.
         try:
             query_response = yield self.client.post_json(
                 destination=perspective_name,
@@ -440,241 +663,215 @@ class Keyring(object):
                 data={
                     u"server_keys": {
                         server_name: {
-                            key_id: {
-                                u"minimum_valid_until_ts": 0
-                            } for key_id in key_ids
+                            key_id: {u"minimum_valid_until_ts": min_valid_ts}
+                            for key_id, min_valid_ts in server_keys.items()
                         }
-                        for server_name, key_ids in server_names_and_key_ids
+                        for server_name, server_keys in keys_to_fetch.items()
                     }
                 },
-                long_retries=True,
             )
         except (NotRetryingDestination, RequestSendFailed) as e:
-            raise_from(
-                KeyLookupError("Failed to connect to remote server"), e,
-            )
+            raise_from(KeyLookupError("Failed to connect to remote server"), e)
         except HttpResponseException as e:
-            raise_from(
-                KeyLookupError("Remote server returned an error"), e,
-            )
+            raise_from(KeyLookupError("Remote server returned an error"), e)
 
         keys = {}
+        added_keys = []
 
-        responses = query_response["server_keys"]
+        time_now_ms = self.clock.time_msec()
 
-        for response in responses:
-            if (u"signatures" not in response
-                    or perspective_name not in response[u"signatures"]):
+        for response in query_response["server_keys"]:
+            # do this first, so that we can give useful errors thereafter
+            server_name = response.get("server_name")
+            if not isinstance(server_name, six.string_types):
                 raise KeyLookupError(
-                    "Key response not signed by perspective server"
-                    " %r" % (perspective_name,)
+                    "Malformed response from key notary server %s: invalid server_name"
+                    % (perspective_name,)
                 )
 
-            verified = False
-            for key_id in response[u"signatures"][perspective_name]:
-                if key_id in perspective_keys:
-                    verify_signed_json(
-                        response,
-                        perspective_name,
-                        perspective_keys[key_id]
-                    )
-                    verified = True
+            try:
+                self._validate_perspectives_response(
+                    key_server,
+                    response,
+                )
 
-            if not verified:
-                logging.info(
-                    "Response from perspective server %r not signed with a"
-                    " known key, signed with: %r, known keys: %r",
-                    perspective_name,
-                    list(response[u"signatures"][perspective_name]),
-                    list(perspective_keys)
+                processed_response = yield self.process_v2_response(
+                    perspective_name, response, time_added_ms=time_now_ms
                 )
-                raise KeyLookupError(
-                    "Response not signed with a known key for perspective"
-                    " server %r" % (perspective_name,)
+            except KeyLookupError as e:
+                logger.warning(
+                    "Error processing response from key notary server %s for origin "
+                    "server %s: %s",
+                    perspective_name,
+                    server_name,
+                    e,
                 )
+                # we continue to process the rest of the response
+                continue
 
-            processed_response = yield self.process_v2_response(
-                perspective_name, response, only_from_server=False
+            added_keys.extend(
+                (server_name, key_id, key) for key_id, key in processed_response.items()
             )
+            keys.setdefault(server_name, {}).update(processed_response)
 
-            for server_name, response_keys in processed_response.items():
-                keys.setdefault(server_name, {}).update(response_keys)
+        yield self.store.store_server_verify_keys(
+            perspective_name, time_now_ms, added_keys
+        )
+
+        defer.returnValue(keys)
+
+    def _validate_perspectives_response(
+        self, key_server, response,
+    ):
+        """Optionally check the signature on the result of a /key/query request
 
-        yield logcontext.make_deferred_yieldable(defer.gatherResults(
-            [
-                run_in_background(
-                    self.store_keys,
-                    server_name=server_name,
-                    from_server=perspective_name,
-                    verify_keys=response_keys,
+        Args:
+            key_server (synapse.config.key.TrustedKeyServer): the notary server that
+                produced this result
+
+            response (dict): the json-decoded Server Keys response object
+        """
+        perspective_name = key_server.server_name
+        perspective_keys = key_server.verify_keys
+
+        if perspective_keys is None:
+            # signature checking is disabled on this server
+            return
+
+        if (
+            u"signatures" not in response
+            or perspective_name not in response[u"signatures"]
+        ):
+            raise KeyLookupError("Response not signed by the notary server")
+
+        verified = False
+        for key_id in response[u"signatures"][perspective_name]:
+            if key_id in perspective_keys:
+                verify_signed_json(response, perspective_name, perspective_keys[key_id])
+                verified = True
+
+        if not verified:
+            raise KeyLookupError(
+                "Response not signed with a known key: signed with: %r, known keys: %r"
+                % (
+                    list(response[u"signatures"][perspective_name].keys()),
+                    list(perspective_keys.keys()),
                 )
-                for server_name, response_keys in keys.items()
-            ],
-            consumeErrors=True
-        ).addErrback(unwrapFirstError))
+            )
 
-        defer.returnValue(keys)
+
+class ServerKeyFetcher(BaseV2KeyFetcher):
+    """KeyFetcher impl which fetches keys from the origin servers"""
+
+    def __init__(self, hs):
+        super(ServerKeyFetcher, self).__init__(hs)
+        self.clock = hs.get_clock()
+        self.client = hs.get_http_client()
+
+    def get_keys(self, keys_to_fetch):
+        """
+        Args:
+            keys_to_fetch (dict[str, iterable[str]]):
+                the keys to be fetched. server_name -> key_ids
+
+        Returns:
+            Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]:
+                map from server_name -> key_id -> FetchKeyResult
+        """
+
+        results = {}
+
+        @defer.inlineCallbacks
+        def get_key(key_to_fetch_item):
+            server_name, key_ids = key_to_fetch_item
+            try:
+                keys = yield self.get_server_verify_key_v2_direct(server_name, key_ids)
+                results[server_name] = keys
+            except KeyLookupError as e:
+                logger.warning(
+                    "Error looking up keys %s from %s: %s", key_ids, server_name, e
+                )
+            except Exception:
+                logger.exception("Error getting keys %s from %s", key_ids, server_name)
+
+        return yieldable_gather_results(get_key, keys_to_fetch.items()).addCallback(
+            lambda _: results
+        )
 
     @defer.inlineCallbacks
     def get_server_verify_key_v2_direct(self, server_name, key_ids):
-        keys = {}
+        """
+
+        Args:
+            server_name (str):
+            key_ids (iterable[str]):
+
+        Returns:
+            Deferred[dict[str, FetchKeyResult]]: map from key ID to lookup result
+
+        Raises:
+            KeyLookupError if there was a problem making the lookup
+        """
+        keys = {}  # type: dict[str, FetchKeyResult]
 
         for requested_key_id in key_ids:
+            # we may have found this key as a side-effect of asking for another.
             if requested_key_id in keys:
                 continue
 
+            time_now_ms = self.clock.time_msec()
             try:
                 response = yield self.client.get_json(
                     destination=server_name,
-                    path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
+                    path="/_matrix/key/v2/server/"
+                    + urllib.parse.quote(requested_key_id),
                     ignore_backoff=True,
+
+                    # we only give the remote server 10s to respond. It should be an
+                    # easy request to handle, so if it doesn't reply within 10s, it's
+                    # probably not going to.
+                    #
+                    # Furthermore, when we are acting as a notary server, we cannot
+                    # wait all day for all of the origin servers, as the requesting
+                    # server will otherwise time out before we can respond.
+                    #
+                    # (Note that get_json may make 4 attempts, so this can still take
+                    # almost 45 seconds to fetch the headers, plus up to another 60s to
+                    # read the response).
+                    timeout=10000,
                 )
             except (NotRetryingDestination, RequestSendFailed) as e:
-                raise_from(
-                    KeyLookupError("Failed to connect to remote server"), e,
-                )
+                raise_from(KeyLookupError("Failed to connect to remote server"), e)
             except HttpResponseException as e:
-                raise_from(
-                    KeyLookupError("Remote server returned an error"), e,
-                )
+                raise_from(KeyLookupError("Remote server returned an error"), e)
 
-            if (u"signatures" not in response
-                    or server_name not in response[u"signatures"]):
-                raise KeyLookupError("Key response not signed by remote server")
+            if response["server_name"] != server_name:
+                raise KeyLookupError(
+                    "Expected a response for server %r not %r"
+                    % (server_name, response["server_name"])
+                )
 
             response_keys = yield self.process_v2_response(
                 from_server=server_name,
-                requested_ids=[requested_key_id],
                 response_json=response,
+                time_added_ms=time_now_ms,
+            )
+            yield self.store.store_server_verify_keys(
+                server_name,
+                time_now_ms,
+                ((server_name, key_id, key) for key_id, key in response_keys.items()),
             )
-
             keys.update(response_keys)
 
-        yield logcontext.make_deferred_yieldable(defer.gatherResults(
-            [
-                run_in_background(
-                    self.store_keys,
-                    server_name=key_server_name,
-                    from_server=server_name,
-                    verify_keys=verify_keys,
-                )
-                for key_server_name, verify_keys in keys.items()
-            ],
-            consumeErrors=True
-        ).addErrback(unwrapFirstError))
-
         defer.returnValue(keys)
 
-    @defer.inlineCallbacks
-    def process_v2_response(self, from_server, response_json,
-                            requested_ids=[], only_from_server=True):
-        time_now_ms = self.clock.time_msec()
-        response_keys = {}
-        verify_keys = {}
-        for key_id, key_data in response_json["verify_keys"].items():
-            if is_signing_algorithm_supported(key_id):
-                key_base64 = key_data["key"]
-                key_bytes = decode_base64(key_base64)
-                verify_key = decode_verify_key_bytes(key_id, key_bytes)
-                verify_key.time_added = time_now_ms
-                verify_keys[key_id] = verify_key
-
-        old_verify_keys = {}
-        for key_id, key_data in response_json["old_verify_keys"].items():
-            if is_signing_algorithm_supported(key_id):
-                key_base64 = key_data["key"]
-                key_bytes = decode_base64(key_base64)
-                verify_key = decode_verify_key_bytes(key_id, key_bytes)
-                verify_key.expired = key_data["expired_ts"]
-                verify_key.time_added = time_now_ms
-                old_verify_keys[key_id] = verify_key
-
-        results = {}
-        server_name = response_json["server_name"]
-        if only_from_server:
-            if server_name != from_server:
-                raise KeyLookupError(
-                    "Expected a response for server %r not %r" % (
-                        from_server, server_name
-                    )
-                )
-        for key_id in response_json["signatures"].get(server_name, {}):
-            if key_id not in response_json["verify_keys"]:
-                raise KeyLookupError(
-                    "Key response must include verification keys for all"
-                    " signatures"
-                )
-            if key_id in verify_keys:
-                verify_signed_json(
-                    response_json,
-                    server_name,
-                    verify_keys[key_id]
-                )
-
-        signed_key_json = sign_json(
-            response_json,
-            self.config.server_name,
-            self.config.signing_key[0],
-        )
-
-        signed_key_json_bytes = encode_canonical_json(signed_key_json)
-        ts_valid_until_ms = signed_key_json[u"valid_until_ts"]
-
-        updated_key_ids = set(requested_ids)
-        updated_key_ids.update(verify_keys)
-        updated_key_ids.update(old_verify_keys)
-
-        response_keys.update(verify_keys)
-        response_keys.update(old_verify_keys)
-
-        yield logcontext.make_deferred_yieldable(defer.gatherResults(
-            [
-                run_in_background(
-                    self.store.store_server_keys_json,
-                    server_name=server_name,
-                    key_id=key_id,
-                    from_server=server_name,
-                    ts_now_ms=time_now_ms,
-                    ts_expires_ms=ts_valid_until_ms,
-                    key_json_bytes=signed_key_json_bytes,
-                )
-                for key_id in updated_key_ids
-            ],
-            consumeErrors=True,
-        ).addErrback(unwrapFirstError))
-
-        results[server_name] = response_keys
-
-        defer.returnValue(results)
-
-    def store_keys(self, server_name, from_server, verify_keys):
-        """Store a collection of verify keys for a given server
-        Args:
-            server_name(str): The name of the server the keys are for.
-            from_server(str): The server the keys were downloaded from.
-            verify_keys(dict): A mapping of key_id to VerifyKey.
-        Returns:
-            A deferred that completes when the keys are stored.
-        """
-        # TODO(markjh): Store whether the keys have expired.
-        return logcontext.make_deferred_yieldable(defer.gatherResults(
-            [
-                run_in_background(
-                    self.store.store_server_verify_key,
-                    server_name, server_name, key.time_added, key
-                )
-                for key_id, key in verify_keys.items()
-            ],
-            consumeErrors=True,
-        ).addErrback(unwrapFirstError))
-
 
 @defer.inlineCallbacks
 def _handle_key_deferred(verify_request):
     """Waits for the key to become available, and then performs a verification
 
     Args:
-        verify_request (VerifyKeyRequest):
+        verify_request (VerifyJsonRequest):
 
     Returns:
         Deferred[None]
@@ -683,48 +880,25 @@ def _handle_key_deferred(verify_request):
         SynapseError if there was a problem performing the verification
     """
     server_name = verify_request.server_name
-    try:
-        with PreserveLoggingContext():
-            _, key_id, verify_key = yield verify_request.deferred
-    except KeyLookupError as e:
-        logger.warn(
-            "Failed to download keys for %s: %s %s",
-            server_name, type(e).__name__, str(e),
-        )
-        raise SynapseError(
-            502,
-            "Error downloading keys for %s" % (server_name,),
-            Codes.UNAUTHORIZED,
-        )
-    except Exception as e:
-        logger.exception(
-            "Got Exception when downloading keys for %s: %s %s",
-            server_name, type(e).__name__, str(e),
-        )
-        raise SynapseError(
-            401,
-            "No key for %s with id %s" % (server_name, verify_request.key_ids),
-            Codes.UNAUTHORIZED,
-        )
+    with PreserveLoggingContext():
+        _, key_id, verify_key = yield verify_request.key_ready
 
     json_object = verify_request.json_object
 
-    logger.debug("Got key %s %s:%s for server %s, verifying" % (
-        key_id, verify_key.alg, verify_key.version, server_name,
-    ))
     try:
         verify_signed_json(json_object, server_name, verify_key)
     except SignatureVerifyException as e:
         logger.debug(
             "Error verifying signature for %s:%s:%s with key %s: %s",
-            server_name, verify_key.alg, verify_key.version,
+            server_name,
+            verify_key.alg,
+            verify_key.version,
             encode_verify_key_base64(verify_key),
             str(e),
         )
         raise SynapseError(
             401,
-            "Invalid signature for server %s with key %s:%s: %s" % (
-                server_name, verify_key.alg, verify_key.version, str(e),
-            ),
+            "Invalid signature for server %s with key %s:%s: %s"
+            % (server_name, verify_key.alg, verify_key.version, str(e)),
             Codes.UNAUTHORIZED,
         )
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 12056d5be2..1edd19cc13 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -21,6 +21,7 @@ import six
 
 from unpaddedbase64 import encode_base64
 
+from synapse.api.errors import UnsupportedRoomVersionError
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
 from synapse.util.caches import intern_dict
 from synapse.util.frozenutils import freeze
@@ -335,13 +336,32 @@ class FrozenEventV2(EventBase):
         return self.__repr__()
 
     def __repr__(self):
-        return "<FrozenEventV2 event_id='%s', type='%s', state_key='%s'>" % (
+        return "<%s event_id='%s', type='%s', state_key='%s'>" % (
+            self.__class__.__name__,
             self.event_id,
             self.get("type", None),
             self.get("state_key", None),
         )
 
 
+class FrozenEventV3(FrozenEventV2):
+    """FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
+    format_version = EventFormatVersions.V3  # All events of this type are V3
+
+    @property
+    def event_id(self):
+        # We have to import this here as otherwise we get an import loop which
+        # is hard to break.
+        from synapse.crypto.event_signing import compute_event_reference_hash
+
+        if self._event_id:
+            return self._event_id
+        self._event_id = "$" + encode_base64(
+            compute_event_reference_hash(self)[1], urlsafe=True
+        )
+        return self._event_id
+
+
 def room_version_to_event_format(room_version):
     """Converts a room version string to the event format
 
@@ -350,12 +370,15 @@ def room_version_to_event_format(room_version):
 
     Returns:
         int
+
+    Raises:
+        UnsupportedRoomVersionError if the room version is unknown
     """
     v = KNOWN_ROOM_VERSIONS.get(room_version)
 
     if not v:
-        # We should have already checked version, so this should not happen
-        raise RuntimeError("Unrecognized room version %s" % (room_version,))
+        # this can happen if support is withdrawn for a room version
+        raise UnsupportedRoomVersionError()
 
     return v.event_format
 
@@ -376,6 +399,8 @@ def event_type_from_format_version(format_version):
         return FrozenEvent
     elif format_version == EventFormatVersions.V2:
         return FrozenEventV2
+    elif format_version == EventFormatVersions.V3:
+        return FrozenEventV3
     else:
         raise Exception(
             "No event format %r" % (format_version,)
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index fba27177c7..546b6f4982 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -18,6 +18,7 @@ import attr
 from twisted.internet import defer
 
 from synapse.api.constants import MAX_DEPTH
+from synapse.api.errors import UnsupportedRoomVersionError
 from synapse.api.room_versions import (
     KNOWN_EVENT_FORMAT_VERSIONS,
     KNOWN_ROOM_VERSIONS,
@@ -75,6 +76,7 @@ class EventBuilder(object):
     # someone tries to get them when they don't exist.
     _state_key = attr.ib(default=None)
     _redacts = attr.ib(default=None)
+    _origin_server_ts = attr.ib(default=None)
 
     internal_metadata = attr.ib(default=attr.Factory(lambda: _EventInternalMetadata({})))
 
@@ -141,6 +143,9 @@ class EventBuilder(object):
         if self._redacts is not None:
             event_dict["redacts"] = self._redacts
 
+        if self._origin_server_ts is not None:
+            event_dict["origin_server_ts"] = self._origin_server_ts
+
         defer.returnValue(
             create_local_event_from_event_dict(
                 clock=self._clock,
@@ -178,9 +183,8 @@ class EventBuilderFactory(object):
         """
         v = KNOWN_ROOM_VERSIONS.get(room_version)
         if not v:
-            raise Exception(
-                "No event format defined for version %r" % (room_version,)
-            )
+            # this can happen if support is withdrawn for a room version
+            raise UnsupportedRoomVersionError()
         return self.for_room_version(v, key_values)
 
     def for_room_version(self, room_version, key_values):
@@ -209,6 +213,7 @@ class EventBuilderFactory(object):
             content=key_values.get("content", {}),
             unsigned=key_values.get("unsigned", {}),
             redacts=key_values.get("redacts", None),
+            origin_server_ts=key_values.get("origin_server_ts", None),
         )
 
 
@@ -245,7 +250,7 @@ def create_local_event_from_event_dict(clock, hostname, signing_key,
         event_dict["event_id"] = _create_event_id(clock, hostname)
 
     event_dict["origin"] = hostname
-    event_dict["origin_server_ts"] = time_now
+    event_dict.setdefault("origin_server_ts", time_now)
 
     event_dict.setdefault("unsigned", {})
     age = event_dict["unsigned"].pop("age", 0)
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index 368b5f6ae4..fa09c132a0 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -187,7 +187,9 @@ class EventContext(object):
 
         Returns:
             Deferred[dict[(str, str), str]|None]: Returns None if state_group
-            is None, which happens when the associated event is an outlier.
+                is None, which happens when the associated event is an outlier.
+                Maps a (type, state_key) to the event ID of the state event matching
+                this tuple.
         """
 
         if not self._fetching_state_deferred:
@@ -205,7 +207,9 @@ class EventContext(object):
 
         Returns:
             Deferred[dict[(str, str), str]|None]: Returns None if state_group
-            is None, which happens when the associated event is an outlier.
+                is None, which happens when the associated event is an outlier.
+                Maps a (type, state_key) to the event ID of the state event matching
+                this tuple.
         """
 
         if not self._fetching_state_deferred:
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 633e068eb8..6058077f75 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright 2017 New Vector Ltd.
+# Copyright 2017 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
new file mode 100644
index 0000000000..50ceeb1e8e
--- /dev/null
+++ b/synapse/events/third_party_rules.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+
+
+class ThirdPartyEventRules(object):
+    """Allows server admins to provide a Python module implementing an extra
+    set of rules to apply when processing events.
+
+    This is designed to help admins of closed federations with enforcing custom
+    behaviours.
+    """
+
+    def __init__(self, hs):
+        self.third_party_rules = None
+
+        self.store = hs.get_datastore()
+
+        module = None
+        config = None
+        if hs.config.third_party_event_rules:
+            module, config = hs.config.third_party_event_rules
+
+        if module is not None:
+            self.third_party_rules = module(
+                config=config,
+                http_client=hs.get_simple_http_client(),
+            )
+
+    @defer.inlineCallbacks
+    def check_event_allowed(self, event, context):
+        """Check if a provided event should be allowed in the given context.
+
+        Args:
+            event (synapse.events.EventBase): The event to be checked.
+            context (synapse.events.snapshot.EventContext): The context of the event.
+
+        Returns:
+            defer.Deferred[bool]: True if the event should be allowed, False if not.
+        """
+        if self.third_party_rules is None:
+            defer.returnValue(True)
+
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
+
+        # Retrieve the state events from the database.
+        state_events = {}
+        for key, event_id in prev_state_ids.items():
+            state_events[key] = yield self.store.get_event(event_id, allow_none=True)
+
+        ret = yield self.third_party_rules.check_event_allowed(event, state_events)
+        defer.returnValue(ret)
+
+    @defer.inlineCallbacks
+    def on_create_room(self, requester, config, is_requester_admin):
+        """Intercept requests to create room to allow, deny or update the
+        request config.
+
+        Args:
+            requester (Requester)
+            config (dict): The creation config from the client.
+            is_requester_admin (bool): If the requester is an admin
+
+        Returns:
+            defer.Deferred
+        """
+
+        if self.third_party_rules is None:
+            return
+
+        yield self.third_party_rules.on_create_room(
+            requester, config, is_requester_admin
+        )
+
+    @defer.inlineCallbacks
+    def check_threepid_can_be_invited(self, medium, address, room_id):
+        """Check if a provided 3PID can be invited in the given room.
+
+        Args:
+            medium (str): The 3PID's medium.
+            address (str): The 3PID's address.
+            room_id (str): The room we want to invite the threepid to.
+
+        Returns:
+            defer.Deferred[bool], True if the 3PID can be invited, False if not.
+        """
+
+        if self.third_party_rules is None:
+            defer.returnValue(True)
+
+        state_ids = yield self.store.get_filtered_current_state_ids(room_id)
+        room_state_events = yield self.store.get_events(state_ids.values())
+
+        state_events = {}
+        for key, event_id in state_ids.items():
+            state_events[key] = room_state_events[event_id]
+
+        ret = yield self.third_party_rules.check_threepid_can_be_invited(
+            medium, address, state_events,
+        )
+        defer.returnValue(ret)
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 07fccdd8f9..e2d4384de1 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -19,7 +19,10 @@ from six import string_types
 
 from frozendict import frozendict
 
-from synapse.api.constants import EventTypes
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, RelationTypes
+from synapse.util.async_helpers import yieldable_gather_results
 
 from . import EventBase
 
@@ -311,3 +314,93 @@ def serialize_event(e, time_now_ms, as_client_event=True,
         d = only_fields(d, only_event_fields)
 
     return d
+
+
+class EventClientSerializer(object):
+    """Serializes events that are to be sent to clients.
+
+    This is used for bundling extra information with any events to be sent to
+    clients.
+    """
+
+    def __init__(self, hs):
+        self.store = hs.get_datastore()
+        self.experimental_msc1849_support_enabled = (
+            hs.config.experimental_msc1849_support_enabled
+        )
+
+    @defer.inlineCallbacks
+    def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs):
+        """Serializes a single event.
+
+        Args:
+            event (EventBase)
+            time_now (int): The current time in milliseconds
+            bundle_aggregations (bool): Whether to bundle in related events
+            **kwargs: Arguments to pass to `serialize_event`
+
+        Returns:
+            Deferred[dict]: The serialized event
+        """
+        # To handle the case of presence events and the like
+        if not isinstance(event, EventBase):
+            defer.returnValue(event)
+
+        event_id = event.event_id
+        serialized_event = serialize_event(event, time_now, **kwargs)
+
+        # If MSC1849 is enabled then we need to look if thre are any relations
+        # we need to bundle in with the event
+        if self.experimental_msc1849_support_enabled and bundle_aggregations:
+            annotations = yield self.store.get_aggregation_groups_for_event(
+                event_id,
+            )
+            references = yield self.store.get_relations_for_event(
+                event_id, RelationTypes.REFERENCE, direction="f",
+            )
+
+            if annotations.chunk:
+                r = serialized_event["unsigned"].setdefault("m.relations", {})
+                r[RelationTypes.ANNOTATION] = annotations.to_dict()
+
+            if references.chunk:
+                r = serialized_event["unsigned"].setdefault("m.relations", {})
+                r[RelationTypes.REFERENCE] = references.to_dict()
+
+            edit = None
+            if event.type == EventTypes.Message:
+                edit = yield self.store.get_applicable_edit(event_id)
+
+            if edit:
+                # If there is an edit replace the content, preserving existing
+                # relations.
+
+                relations = event.content.get("m.relates_to")
+                serialized_event["content"] = edit.content.get("m.new_content", {})
+                if relations:
+                    serialized_event["content"]["m.relates_to"] = relations
+                else:
+                    serialized_event["content"].pop("m.relates_to", None)
+
+                r = serialized_event["unsigned"].setdefault("m.relations", {})
+                r[RelationTypes.REPLACE] = {
+                    "event_id": edit.event_id,
+                }
+
+        defer.returnValue(serialized_event)
+
+    def serialize_events(self, events, time_now, **kwargs):
+        """Serializes multiple events.
+
+        Args:
+            event (iter[EventBase])
+            time_now (int): The current time in milliseconds
+            **kwargs: Arguments to pass to `serialize_event`
+
+        Returns:
+            Deferred[list[dict]]: The list of serialized events
+        """
+        return yieldable_gather_results(
+            self.serialize_event, events,
+            time_now=time_now, **kwargs
+        )
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index 514273c792..711af512b2 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -15,8 +15,8 @@
 
 from six import string_types
 
-from synapse.api.constants import EventTypes, Membership
-from synapse.api.errors import SynapseError
+from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
+from synapse.api.errors import Codes, SynapseError
 from synapse.api.room_versions import EventFormatVersions
 from synapse.types import EventID, RoomID, UserID
 
@@ -56,6 +56,17 @@ class EventValidator(object):
             if not isinstance(getattr(event, s), string_types):
                 raise SynapseError(400, "'%s' not a string type" % (s,))
 
+        if event.type == EventTypes.Aliases:
+            if "aliases" in event.content:
+                for alias in event.content["aliases"]:
+                    if len(alias) > MAX_ALIAS_LENGTH:
+                        raise SynapseError(
+                            400,
+                            ("Can't create aliases longer than"
+                             " %d characters" % (MAX_ALIAS_LENGTH,)),
+                            Codes.INVALID_PARAM,
+                        )
+
     def validate_builder(self, event):
         """Validates that the builder/event has roughly the right format. Only
         checks values that we expect a proto event to have, rather than all the
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index dfe6b4aa5c..fc5cfb7d83 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -223,9 +223,6 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
            the signatures are valid, or fail (with a SynapseError) if not.
     """
 
-    # (currently this is written assuming the v1 room structure; we'll probably want a
-    # separate function for checking v2 rooms)
-
     # we want to check that the event is signed by:
     #
     # (a) the sender's server
@@ -257,6 +254,10 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
         for p in pdus
     ]
 
+    v = KNOWN_ROOM_VERSIONS.get(room_version)
+    if not v:
+        raise RuntimeError("Unrecognized room version %s" % (room_version,))
+
     # First we check that the sender event is signed by the sender's domain
     # (except if its a 3pid invite, in which case it may be sent by any server)
     pdus_to_check_sender = [
@@ -264,34 +265,66 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
         if not _is_invite_via_3pid(p.pdu)
     ]
 
-    more_deferreds = keyring.verify_json_objects_for_server([
-        (p.sender_domain, p.redacted_pdu_json)
-        for p in pdus_to_check_sender
-    ])
+    more_deferreds = keyring.verify_json_objects_for_server(
+        [
+            (
+                p.sender_domain,
+                p.redacted_pdu_json,
+                p.pdu.origin_server_ts if v.enforce_key_validity else 0,
+                p.pdu.event_id,
+            )
+            for p in pdus_to_check_sender
+        ]
+    )
+
+    def sender_err(e, pdu_to_check):
+        errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
+            pdu_to_check.pdu.event_id,
+            pdu_to_check.sender_domain,
+            e.getErrorMessage(),
+        )
+        # XX not really sure if these are the right codes, but they are what
+        # we've done for ages
+        raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
 
     for p, d in zip(pdus_to_check_sender, more_deferreds):
+        d.addErrback(sender_err, p)
         p.deferreds.append(d)
 
     # now let's look for events where the sender's domain is different to the
     # event id's domain (normally only the case for joins/leaves), and add additional
     # checks. Only do this if the room version has a concept of event ID domain
     # (ie, the room version uses old-style non-hash event IDs).
-    v = KNOWN_ROOM_VERSIONS.get(room_version)
-    if not v:
-        raise RuntimeError("Unrecognized room version %s" % (room_version,))
-
     if v.event_format == EventFormatVersions.V1:
         pdus_to_check_event_id = [
             p for p in pdus_to_check
             if p.sender_domain != get_domain_from_id(p.pdu.event_id)
         ]
 
-        more_deferreds = keyring.verify_json_objects_for_server([
-            (get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json)
-            for p in pdus_to_check_event_id
-        ])
+        more_deferreds = keyring.verify_json_objects_for_server(
+            [
+                (
+                    get_domain_from_id(p.pdu.event_id),
+                    p.redacted_pdu_json,
+                    p.pdu.origin_server_ts if v.enforce_key_validity else 0,
+                    p.pdu.event_id,
+                )
+                for p in pdus_to_check_event_id
+            ]
+        )
+
+        def event_err(e, pdu_to_check):
+            errmsg = (
+                "event id %s: unable to verify signature for event id domain: %s" % (
+                    pdu_to_check.pdu.event_id,
+                    e.getErrorMessage(),
+                )
+            )
+            # XX as above: not really sure if these are the right codes
+            raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
 
         for p, d in zip(pdus_to_check_event_id, more_deferreds):
+            d.addErrback(event_err, p)
             p.deferreds.append(d)
 
     # replace lists of deferreds with single Deferreds
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index f3fc897a0a..70573746d6 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -17,7 +17,6 @@
 import copy
 import itertools
 import logging
-import random
 
 from six.moves import range
 
@@ -233,7 +232,8 @@ class FederationClient(FederationBase):
                 moving to the next destination. None indicates no timeout.
 
         Returns:
-            Deferred: Results in the requested PDU.
+            Deferred: Results in the requested PDU, or None if we were unable to find
+               it.
         """
 
         # TODO: Rate limit the number of times we try and get the same event.
@@ -258,7 +258,12 @@ class FederationClient(FederationBase):
                     destination, event_id, timeout=timeout,
                 )
 
-                logger.debug("transaction_data %r", transaction_data)
+                logger.debug(
+                    "retrieved event id %s from %s: %r",
+                    event_id,
+                    destination,
+                    transaction_data,
+                )
 
                 pdu_list = [
                     event_from_pdu_json(p, format_ver, outlier=outlier)
@@ -280,6 +285,7 @@ class FederationClient(FederationBase):
                     "Failed to get PDU %s from %s because %s",
                     event_id, destination, e,
                 )
+                continue
             except NotRetryingDestination as e:
                 logger.info(str(e))
                 continue
@@ -326,12 +332,16 @@ class FederationClient(FederationBase):
             state_event_ids = result["pdu_ids"]
             auth_event_ids = result.get("auth_chain_ids", [])
 
-            fetched_events, failed_to_fetch = yield self.get_events(
-                [destination], room_id, set(state_event_ids + auth_event_ids)
+            fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest(
+                destination, room_id, set(state_event_ids + auth_event_ids)
             )
 
             if failed_to_fetch:
-                logger.warn("Failed to get %r", failed_to_fetch)
+                logger.warning(
+                    "Failed to fetch missing state/auth events for %s: %s",
+                    room_id,
+                    failed_to_fetch
+                )
 
             event_map = {
                 ev.event_id: ev for ev in fetched_events
@@ -397,27 +407,20 @@ class FederationClient(FederationBase):
         defer.returnValue((signed_pdus, signed_auth))
 
     @defer.inlineCallbacks
-    def get_events(self, destinations, room_id, event_ids, return_local=True):
-        """Fetch events from some remote destinations, checking if we already
-        have them.
+    def get_events_from_store_or_dest(self, destination, room_id, event_ids):
+        """Fetch events from a remote destination, checking if we already have them.
 
         Args:
-            destinations (list)
+            destination (str)
             room_id (str)
             event_ids (list)
-            return_local (bool): Whether to include events we already have in
-                the DB in the returned list of events
 
         Returns:
             Deferred: A deferred resolving to a 2-tuple where the first is a list of
             events and the second is a list of event ids that we failed to fetch.
         """
-        if return_local:
-            seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
-            signed_events = list(seen_events.values())
-        else:
-            seen_events = yield self.store.have_seen_events(event_ids)
-            signed_events = []
+        seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
+        signed_events = list(seen_events.values())
 
         failed_to_fetch = set()
 
@@ -428,10 +431,11 @@ class FederationClient(FederationBase):
         if not missing_events:
             defer.returnValue((signed_events, failed_to_fetch))
 
-        def random_server_list():
-            srvs = list(destinations)
-            random.shuffle(srvs)
-            return srvs
+        logger.debug(
+            "Fetching unknown state/auth events %s for room %s",
+            missing_events,
+            event_ids,
+        )
 
         room_version = yield self.store.get_room_version(room_id)
 
@@ -443,7 +447,7 @@ class FederationClient(FederationBase):
             deferreds = [
                 run_in_background(
                     self.get_pdu,
-                    destinations=random_server_list(),
+                    destinations=[destination],
                     event_id=e_id,
                     room_version=room_version,
                 )
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index df60828dba..4c28c1dc3c 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -33,6 +33,7 @@ from synapse.api.errors import (
     IncompatibleRoomVersionError,
     NotFoundError,
     SynapseError,
+    UnsupportedRoomVersionError,
 )
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.crypto.event_signing import compute_event_signature
@@ -198,11 +199,22 @@ class FederationServer(FederationBase):
 
             try:
                 room_version = yield self.store.get_room_version(room_id)
-                format_ver = room_version_to_event_format(room_version)
             except NotFoundError:
                 logger.info("Ignoring PDU for unknown room_id: %s", room_id)
                 continue
 
+            try:
+                format_ver = room_version_to_event_format(room_version)
+            except UnsupportedRoomVersionError:
+                # this can happen if support for a given room version is withdrawn,
+                # so that we still get events for said room.
+                logger.info(
+                    "Ignoring PDU for room %s with unknown version %s",
+                    room_id,
+                    room_version,
+                )
+                continue
+
             event = event_from_pdu_json(p, format_ver)
             pdus_by_room.setdefault(room_id, []).append(event)
 
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index be99211003..22a2735405 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -33,12 +33,14 @@ from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage import UserPresenceState
 from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
 
+# This is defined in the Matrix spec and enforced by the receiver.
+MAX_EDUS_PER_TRANSACTION = 100
+
 logger = logging.getLogger(__name__)
 
 
 sent_edus_counter = Counter(
-    "synapse_federation_client_sent_edus",
-    "Total number of EDUs successfully sent",
+    "synapse_federation_client_sent_edus", "Total number of EDUs successfully sent"
 )
 
 sent_edus_by_type = Counter(
@@ -58,6 +60,7 @@ class PerDestinationQueue(object):
         destination (str): the server_name of the destination that we are managing
             transmission for.
     """
+
     def __init__(self, hs, transaction_manager, destination):
         self._server_name = hs.hostname
         self._clock = hs.get_clock()
@@ -68,17 +71,17 @@ class PerDestinationQueue(object):
         self.transmission_loop_running = False
 
         # a list of tuples of (pending pdu, order)
-        self._pending_pdus = []    # type: list[tuple[EventBase, int]]
-        self._pending_edus = []    # type: list[Edu]
+        self._pending_pdus = []  # type: list[tuple[EventBase, int]]
+        self._pending_edus = []  # type: list[Edu]
 
         # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
         # based on their key (e.g. typing events by room_id)
         # Map of (edu_type, key) -> Edu
-        self._pending_edus_keyed = {}   # type: dict[tuple[str, str], Edu]
+        self._pending_edus_keyed = {}  # type: dict[tuple[str, str], Edu]
 
         # Map of user_id -> UserPresenceState of pending presence to be sent to this
         # destination
-        self._pending_presence = {}   # type: dict[str, UserPresenceState]
+        self._pending_presence = {}  # type: dict[str, UserPresenceState]
 
         # room_id -> receipt_type -> user_id -> receipt_dict
         self._pending_rrs = {}
@@ -120,9 +123,7 @@ class PerDestinationQueue(object):
         Args:
             states (iterable[UserPresenceState]): presence to send
         """
-        self._pending_presence.update({
-            state.user_id: state for state in states
-        })
+        self._pending_presence.update({state.user_id: state for state in states})
         self.attempt_new_transaction()
 
     def queue_read_receipt(self, receipt):
@@ -132,14 +133,9 @@ class PerDestinationQueue(object):
         Args:
             receipt (synapse.api.receipt_info.ReceiptInfo): receipt to be queued
         """
-        self._pending_rrs.setdefault(
-            receipt.room_id, {},
-        ).setdefault(
+        self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
             receipt.receipt_type, {}
-        )[receipt.user_id] = {
-            "event_ids": receipt.event_ids,
-            "data": receipt.data,
-        }
+        )[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
 
     def flush_read_receipts_for_room(self, room_id):
         # if we don't have any read-receipts for this room, it may be that we've already
@@ -170,10 +166,7 @@ class PerDestinationQueue(object):
             # request at which point pending_pdus just keeps growing.
             # we need application-layer timeouts of some flavour of these
             # requests
-            logger.debug(
-                "TX [%s] Transaction already in progress",
-                self._destination
-            )
+            logger.debug("TX [%s] Transaction already in progress", self._destination)
             return
 
         logger.debug("TX [%s] Starting transaction loop", self._destination)
@@ -196,10 +189,21 @@ class PerDestinationQueue(object):
 
             pending_pdus = []
             while True:
-                device_message_edus, device_stream_id, dev_list_id = (
-                    yield self._get_new_device_messages()
+                # We have to keep 2 free slots for presence and rr_edus
+                limit = MAX_EDUS_PER_TRANSACTION - 2
+
+                device_update_edus, dev_list_id = (
+                    yield self._get_device_update_edus(limit)
+                )
+
+                limit -= len(device_update_edus)
+
+                to_device_edus, device_stream_id = (
+                    yield self._get_to_device_message_edus(limit)
                 )
 
+                pending_edus = device_update_edus + to_device_edus
+
                 # BEGIN CRITICAL SECTION
                 #
                 # In order to avoid a race condition, we need to make sure that
@@ -214,21 +218,7 @@ class PerDestinationQueue(object):
                 # We can only include at most 50 PDUs per transactions
                 pending_pdus, self._pending_pdus = pending_pdus[:50], pending_pdus[50:]
 
-                pending_edus = []
-
                 pending_edus.extend(self._get_rr_edus(force_flush=False))
-
-                # We can only include at most 100 EDUs per transactions
-                pending_edus.extend(self._pop_pending_edus(100 - len(pending_edus)))
-
-                pending_edus.extend(
-                    self._pending_edus_keyed.values()
-                )
-
-                self._pending_edus_keyed = {}
-
-                pending_edus.extend(device_message_edus)
-
                 pending_presence = self._pending_presence
                 self._pending_presence = {}
                 if pending_presence:
@@ -248,9 +238,22 @@ class PerDestinationQueue(object):
                         )
                     )
 
+                pending_edus.extend(
+                    self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
+                )
+                while (
+                    len(pending_edus) < MAX_EDUS_PER_TRANSACTION
+                    and self._pending_edus_keyed
+                ):
+                    _, val = self._pending_edus_keyed.popitem()
+                    pending_edus.append(val)
+
                 if pending_pdus:
-                    logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
-                                 self._destination, len(pending_pdus))
+                    logger.debug(
+                        "TX [%s] len(pending_pdus_by_dest[dest]) = %d",
+                        self._destination,
+                        len(pending_pdus),
+                    )
 
                 if not pending_pdus and not pending_edus:
                     logger.debug("TX [%s] Nothing to send", self._destination)
@@ -259,7 +262,7 @@ class PerDestinationQueue(object):
 
                 # if we've decided to send a transaction anyway, and we have room, we
                 # may as well send any pending RRs
-                if len(pending_edus) < 100:
+                if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
                     pending_edus.extend(self._get_rr_edus(force_flush=True))
 
                 # END CRITICAL SECTION
@@ -274,10 +277,13 @@ class PerDestinationQueue(object):
                         sent_edus_by_type.labels(edu.edu_type).inc()
                     # Remove the acknowledged device messages from the database
                     # Only bother if we actually sent some device messages
-                    if device_message_edus:
+                    if to_device_edus:
                         yield self._store.delete_device_msgs_for_remote(
                             self._destination, device_stream_id
                         )
+
+                    # also mark the device updates as sent
+                    if device_update_edus:
                         logger.info(
                             "Marking as sent %r %r", self._destination, dev_list_id
                         )
@@ -303,22 +309,25 @@ class PerDestinationQueue(object):
         except HttpResponseException as e:
             logger.warning(
                 "TX [%s] Received %d response to transaction: %s",
-                self._destination, e.code, e,
+                self._destination,
+                e.code,
+                e,
             )
         except RequestSendFailed as e:
-            logger.warning("TX [%s] Failed to send transaction: %s", self._destination, e)
+            logger.warning(
+                "TX [%s] Failed to send transaction: %s", self._destination, e
+            )
 
             for p, _ in pending_pdus:
-                logger.info("Failed to send event %s to %s", p.event_id,
-                            self._destination)
+                logger.info(
+                    "Failed to send event %s to %s", p.event_id, self._destination
+                )
         except Exception:
-            logger.exception(
-                "TX [%s] Failed to send transaction",
-                self._destination,
-            )
+            logger.exception("TX [%s] Failed to send transaction", self._destination)
             for p, _ in pending_pdus:
-                logger.info("Failed to send event %s to %s", p.event_id,
-                            self._destination)
+                logger.info(
+                    "Failed to send event %s to %s", p.event_id, self._destination
+                )
         finally:
             # We want to be *very* sure we clear this after we stop processing
             self.transmission_loop_running = False
@@ -346,33 +355,45 @@ class PerDestinationQueue(object):
         return pending_edus
 
     @defer.inlineCallbacks
-    def _get_new_device_messages(self):
-        last_device_stream_id = self._last_device_stream_id
-        to_device_stream_id = self._store.get_to_device_stream_token()
-        contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
-            self._destination, last_device_stream_id, to_device_stream_id
+    def _get_device_update_edus(self, limit):
+        last_device_list = self._last_device_list_stream_id
+
+        # Retrieve list of new device updates to send to the destination
+        now_stream_id, results = yield self._store.get_devices_by_remote(
+            self._destination, last_device_list, limit=limit,
         )
         edus = [
             Edu(
                 origin=self._server_name,
                 destination=self._destination,
-                edu_type="m.direct_to_device",
+                edu_type="m.device_list_update",
                 content=content,
             )
-            for content in contents
+            for content in results
         ]
 
-        last_device_list = self._last_device_list_stream_id
-        now_stream_id, results = yield self._store.get_devices_by_remote(
-            self._destination, last_device_list
+        assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
+
+        defer.returnValue((edus, now_stream_id))
+
+    @defer.inlineCallbacks
+    def _get_to_device_message_edus(self, limit):
+        last_device_stream_id = self._last_device_stream_id
+        to_device_stream_id = self._store.get_to_device_stream_token()
+        contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
+            self._destination,
+            last_device_stream_id,
+            to_device_stream_id,
+            limit,
         )
-        edus.extend(
+        edus = [
             Edu(
                 origin=self._server_name,
                 destination=self._destination,
-                edu_type="m.device_list_update",
+                edu_type="m.direct_to_device",
                 content=content,
             )
-            for content in results
-        )
-        defer.returnValue((edus, stream_id, now_stream_id))
+            for content in contents
+        ]
+
+        defer.returnValue((edus, stream_id))
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 452599e1a1..949a5fb2aa 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -23,7 +23,11 @@ from twisted.internet import defer
 import synapse
 from synapse.api.errors import Codes, FederationDeniedError, SynapseError
 from synapse.api.room_versions import RoomVersions
-from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
+from synapse.api.urls import (
+    FEDERATION_UNSTABLE_PREFIX,
+    FEDERATION_V1_PREFIX,
+    FEDERATION_V2_PREFIX,
+)
 from synapse.http.endpoint import parse_and_validate_server_name
 from synapse.http.server import JsonResource
 from synapse.http.servlet import (
@@ -63,11 +67,7 @@ class TransportLayerServer(JsonResource):
         self.authenticator = Authenticator(hs)
         self.ratelimiter = FederationRateLimiter(
             self.clock,
-            window_size=hs.config.federation_rc_window_size,
-            sleep_limit=hs.config.federation_rc_sleep_limit,
-            sleep_msec=hs.config.federation_rc_sleep_delay,
-            reject_limit=hs.config.federation_rc_reject_limit,
-            concurrent_requests=hs.config.federation_rc_concurrent,
+            config=hs.config.rc_federation,
         )
 
         self.register_servlets()
@@ -94,6 +94,7 @@ class NoAuthenticationError(AuthenticationError):
 
 class Authenticator(object):
     def __init__(self, hs):
+        self._clock = hs.get_clock()
         self.keyring = hs.get_keyring()
         self.server_name = hs.hostname
         self.store = hs.get_datastore()
@@ -102,6 +103,7 @@ class Authenticator(object):
     # A method just so we can pass 'self' as the authenticator to the Servlets
     @defer.inlineCallbacks
     def authenticate_request(self, request, content):
+        now = self._clock.time_msec()
         json_request = {
             "method": request.method.decode('ascii'),
             "uri": request.uri.decode('ascii'),
@@ -138,7 +140,9 @@ class Authenticator(object):
                 401, "Missing Authorization headers", Codes.UNAUTHORIZED,
             )
 
-        yield self.keyring.verify_json_for_server(origin, json_request)
+        yield self.keyring.verify_json_for_server(
+            origin, json_request, now, "Incoming request"
+        )
 
         logger.info("Request from %s", origin)
         request.authenticated_entity = origin
@@ -716,8 +720,17 @@ class PublicRoomList(BaseFederationServlet):
 
     PATH = "/publicRooms"
 
+    def __init__(self, handler, authenticator, ratelimiter, server_name, deny_access):
+        super(PublicRoomList, self).__init__(
+            handler, authenticator, ratelimiter, server_name,
+        )
+        self.deny_access = deny_access
+
     @defer.inlineCallbacks
     def on_GET(self, origin, content, query):
+        if self.deny_access:
+            raise FederationDeniedError(origin)
+
         limit = parse_integer_from_args(query, "limit", 0)
         since_token = parse_string_from_args(query, "since", None)
         include_all_networks = parse_boolean_from_args(
@@ -1299,6 +1312,30 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
         defer.returnValue((200, new_content))
 
 
+class RoomComplexityServlet(BaseFederationServlet):
+    """
+    Indicates to other servers how complex (and therefore likely
+    resource-intensive) a public room this server knows about is.
+    """
+    PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
+    PREFIX = FEDERATION_UNSTABLE_PREFIX
+
+    @defer.inlineCallbacks
+    def on_GET(self, origin, content, query, room_id):
+
+        store = self.handler.hs.get_datastore()
+
+        is_public = yield store.is_room_world_readable_or_publicly_joinable(
+            room_id
+        )
+
+        if not is_public:
+            raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
+
+        complexity = yield store.get_room_complexity(room_id)
+        defer.returnValue((200, complexity))
+
+
 FEDERATION_SERVLET_CLASSES = (
     FederationSendServlet,
     FederationEventServlet,
@@ -1322,6 +1359,7 @@ FEDERATION_SERVLET_CLASSES = (
     FederationThirdPartyInviteExchangeServlet,
     On3pidBindServlet,
     FederationVersionServlet,
+    RoomComplexityServlet,
 )
 
 OPENID_SERVLET_CLASSES = (
@@ -1417,6 +1455,7 @@ def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=N
                 authenticator=authenticator,
                 ratelimiter=ratelimiter,
                 server_name=hs.hostname,
+                deny_access=hs.config.restrict_public_rooms_to_local_users,
             ).register(resource)
 
     if "group_server" in servlet_groups:
diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py
index 786149be65..3ba74001d8 100644
--- a/synapse/groups/attestations.py
+++ b/synapse/groups/attestations.py
@@ -42,7 +42,7 @@ from signedjson.sign import sign_json
 
 from twisted.internet import defer
 
-from synapse.api.errors import RequestSendFailed, SynapseError
+from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.types import get_domain_from_id
 from synapse.util.logcontext import run_in_background
@@ -97,10 +97,13 @@ class GroupAttestationSigning(object):
 
         # TODO: We also want to check that *new* attestations that people give
         # us to store are valid for at least a little while.
-        if valid_until_ms < self.clock.time_msec():
+        now = self.clock.time_msec()
+        if valid_until_ms < now:
             raise SynapseError(400, "Attestation expired")
 
-        yield self.keyring.verify_json_for_server(server_name, attestation)
+        yield self.keyring.verify_json_for_server(
+            server_name, attestation, now, "Group attestation"
+        )
 
     def create_attestation(self, group_id, user_id):
         """Create an attestation for the group_id and user_id with default
@@ -129,9 +132,10 @@ class GroupAttestionRenewer(object):
         self.is_mine_id = hs.is_mine_id
         self.attestations = hs.get_groups_attestation_signing()
 
-        self._renew_attestations_loop = self.clock.looping_call(
-            self._start_renew_attestations, 30 * 60 * 1000,
-        )
+        if not hs.config.worker_app:
+            self._renew_attestations_loop = self.clock.looping_call(
+                self._start_renew_attestations, 30 * 60 * 1000,
+            )
 
     @defer.inlineCallbacks
     def on_renew_attestation(self, group_id, user_id, content):
@@ -191,7 +195,7 @@ class GroupAttestionRenewer(object):
                 yield self.store.update_attestation_renewal(
                     group_id, user_id, attestation
                 )
-            except RequestSendFailed as e:
+            except (RequestSendFailed, HttpResponseException) as e:
                 logger.warning(
                     "Failed to renew attestation of %r in %r: %s",
                     user_id, group_id, e,
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index ac09d03ba9..dca337ec61 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -90,8 +90,8 @@ class BaseHandler(object):
             messages_per_second = override.messages_per_second
             burst_count = override.burst_count
         else:
-            messages_per_second = self.hs.config.rc_messages_per_second
-            burst_count = self.hs.config.rc_message_burst_count
+            messages_per_second = self.hs.config.rc_message.per_second
+            burst_count = self.hs.config.rc_message.burst_count
 
         allowed, time_allowed = self.ratelimiter.can_do_action(
             user_id, time_now,
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
new file mode 100644
index 0000000000..5e0b92eb1c
--- /dev/null
+++ b/synapse/handlers/account_validity.py
@@ -0,0 +1,256 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import email.mime.multipart
+import email.utils
+import logging
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+
+from twisted.internet import defer
+
+from synapse.api.errors import StoreError
+from synapse.types import UserID
+from synapse.util import stringutils
+from synapse.util.logcontext import make_deferred_yieldable
+
+try:
+    from synapse.push.mailer import load_jinja2_templates
+except ImportError:
+    load_jinja2_templates = None
+
+logger = logging.getLogger(__name__)
+
+
+class AccountValidityHandler(object):
+    def __init__(self, hs):
+        self.hs = hs
+        self.store = self.hs.get_datastore()
+        self.sendmail = self.hs.get_sendmail()
+        self.clock = self.hs.get_clock()
+
+        self._account_validity = self.hs.config.account_validity
+
+        if self._account_validity.renew_by_email_enabled and load_jinja2_templates:
+            # Don't do email-specific configuration if renewal by email is disabled.
+            try:
+                app_name = self.hs.config.email_app_name
+
+                self._subject = self._account_validity.renew_email_subject % {
+                    "app": app_name,
+                }
+
+                self._from_string = self.hs.config.email_notif_from % {
+                    "app": app_name,
+                }
+            except Exception:
+                # If substitution failed, fall back to the bare strings.
+                self._subject = self._account_validity.renew_email_subject
+                self._from_string = self.hs.config.email_notif_from
+
+            self._raw_from = email.utils.parseaddr(self._from_string)[1]
+
+            self._template_html, self._template_text = load_jinja2_templates(
+                config=self.hs.config,
+                template_html_name=self.hs.config.email_expiry_template_html,
+                template_text_name=self.hs.config.email_expiry_template_text,
+            )
+
+            # Check the renewal emails to send and send them every 30min.
+            self.clock.looping_call(
+                self.send_renewal_emails,
+                30 * 60 * 1000,
+            )
+
+    @defer.inlineCallbacks
+    def send_renewal_emails(self):
+        """Gets the list of users whose account is expiring in the amount of time
+        configured in the ``renew_at`` parameter from the ``account_validity``
+        configuration, and sends renewal emails to all of these users as long as they
+        have an email 3PID attached to their account.
+        """
+        expiring_users = yield self.store.get_users_expiring_soon()
+
+        if expiring_users:
+            for user in expiring_users:
+                yield self._send_renewal_email(
+                    user_id=user["user_id"],
+                    expiration_ts=user["expiration_ts_ms"],
+                )
+
+    @defer.inlineCallbacks
+    def send_renewal_email_to_user(self, user_id):
+        expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
+        yield self._send_renewal_email(user_id, expiration_ts)
+
+    @defer.inlineCallbacks
+    def _send_renewal_email(self, user_id, expiration_ts):
+        """Sends out a renewal email to every email address attached to the given user
+        with a unique link allowing them to renew their account.
+
+        Args:
+            user_id (str): ID of the user to send email(s) to.
+            expiration_ts (int): Timestamp in milliseconds for the expiration date of
+                this user's account (used in the email templates).
+        """
+        addresses = yield self._get_email_addresses_for_user(user_id)
+
+        # Stop right here if the user doesn't have at least one email address.
+        # In this case, they will have to ask their server admin to renew their
+        # account manually.
+        # We don't need to do a specific check to make sure the account isn't
+        # deactivated, as a deactivated account isn't supposed to have any
+        # email address attached to it.
+        if not addresses:
+            return
+
+        try:
+            user_display_name = yield self.store.get_profile_displayname(
+                UserID.from_string(user_id).localpart
+            )
+            if user_display_name is None:
+                user_display_name = user_id
+        except StoreError:
+            user_display_name = user_id
+
+        renewal_token = yield self._get_renewal_token(user_id)
+        url = "%s_matrix/client/unstable/account_validity/renew?token=%s" % (
+            self.hs.config.public_baseurl,
+            renewal_token,
+        )
+
+        template_vars = {
+            "display_name": user_display_name,
+            "expiration_ts": expiration_ts,
+            "url": url,
+        }
+
+        html_text = self._template_html.render(**template_vars)
+        html_part = MIMEText(html_text, "html", "utf8")
+
+        plain_text = self._template_text.render(**template_vars)
+        text_part = MIMEText(plain_text, "plain", "utf8")
+
+        for address in addresses:
+            raw_to = email.utils.parseaddr(address)[1]
+
+            multipart_msg = MIMEMultipart('alternative')
+            multipart_msg['Subject'] = self._subject
+            multipart_msg['From'] = self._from_string
+            multipart_msg['To'] = address
+            multipart_msg['Date'] = email.utils.formatdate()
+            multipart_msg['Message-ID'] = email.utils.make_msgid()
+            multipart_msg.attach(text_part)
+            multipart_msg.attach(html_part)
+
+            logger.info("Sending renewal email to %s", address)
+
+            yield make_deferred_yieldable(self.sendmail(
+                self.hs.config.email_smtp_host,
+                self._raw_from, raw_to, multipart_msg.as_string().encode('utf8'),
+                reactor=self.hs.get_reactor(),
+                port=self.hs.config.email_smtp_port,
+                requireAuthentication=self.hs.config.email_smtp_user is not None,
+                username=self.hs.config.email_smtp_user,
+                password=self.hs.config.email_smtp_pass,
+                requireTransportSecurity=self.hs.config.require_transport_security
+            ))
+
+        yield self.store.set_renewal_mail_status(
+            user_id=user_id,
+            email_sent=True,
+        )
+
+    @defer.inlineCallbacks
+    def _get_email_addresses_for_user(self, user_id):
+        """Retrieve the list of email addresses attached to a user's account.
+
+        Args:
+            user_id (str): ID of the user to lookup email addresses for.
+
+        Returns:
+            defer.Deferred[list[str]]: Email addresses for this account.
+        """
+        threepids = yield self.store.user_get_threepids(user_id)
+
+        addresses = []
+        for threepid in threepids:
+            if threepid["medium"] == "email":
+                addresses.append(threepid["address"])
+
+        defer.returnValue(addresses)
+
+    @defer.inlineCallbacks
+    def _get_renewal_token(self, user_id):
+        """Generates a 32-byte long random string that will be inserted into the
+        user's renewal email's unique link, then saves it into the database.
+
+        Args:
+            user_id (str): ID of the user to generate a string for.
+
+        Returns:
+            defer.Deferred[str]: The generated string.
+
+        Raises:
+            StoreError(500): Couldn't generate a unique string after 5 attempts.
+        """
+        attempts = 0
+        while attempts < 5:
+            try:
+                renewal_token = stringutils.random_string(32)
+                yield self.store.set_renewal_token_for_user(user_id, renewal_token)
+                defer.returnValue(renewal_token)
+            except StoreError:
+                attempts += 1
+        raise StoreError(500, "Couldn't generate a unique string as refresh string.")
+
+    @defer.inlineCallbacks
+    def renew_account(self, renewal_token):
+        """Renews the account attached to a given renewal token by pushing back the
+        expiration date by the current validity period in the server's configuration.
+
+        Args:
+            renewal_token (str): Token sent with the renewal request.
+        """
+        user_id = yield self.store.get_user_from_renewal_token(renewal_token)
+        logger.debug("Renewing an account for user %s", user_id)
+        yield self.renew_account_for_user(user_id)
+
+    @defer.inlineCallbacks
+    def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False):
+        """Renews the account attached to a given user by pushing back the
+        expiration date by the current validity period in the server's
+        configuration.
+
+        Args:
+            renewal_token (str): Token sent with the renewal request.
+            expiration_ts (int): New expiration date. Defaults to now + validity period.
+            email_sent (bool): Whether an email has been sent for this validity period.
+                Defaults to False.
+
+        Returns:
+            defer.Deferred[int]: New expiration date for this account, as a timestamp
+                in milliseconds since epoch.
+        """
+        if expiration_ts is None:
+            expiration_ts = self.clock.time_msec() + self._account_validity.period
+
+        yield self.store.set_account_validity_for_user(
+            user_id=user_id,
+            expiration_ts=expiration_ts,
+            email_sent=email_sent,
+        )
+
+        defer.returnValue(expiration_ts)
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index aa5d89a9ac..a0cf37a9f9 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -162,7 +162,7 @@ class AuthHandler(BaseHandler):
         defer.returnValue(params)
 
     @defer.inlineCallbacks
-    def check_auth(self, flows, clientdict, clientip):
+    def check_auth(self, flows, clientdict, clientip, password_servlet=False):
         """
         Takes a dictionary sent by the client in the login / registration
         protocol and handles the User-Interactive Auth flow.
@@ -186,6 +186,16 @@ class AuthHandler(BaseHandler):
 
             clientip (str): The IP address of the client.
 
+            password_servlet (bool): Whether the request originated from
+                PasswordRestServlet.
+                XXX: This is a temporary hack to distinguish between checking
+                for threepid validations locally (in the case of password
+                resets) and using the identity server (in the case of binding
+                a 3PID during registration). Once we start using the
+                homeserver for both tasks, this distinction will no longer be
+                necessary.
+
+
         Returns:
             defer.Deferred[dict, dict, str]: a deferred tuple of
                 (creds, params, session_id).
@@ -241,7 +251,9 @@ class AuthHandler(BaseHandler):
         if 'type' in authdict:
             login_type = authdict['type']
             try:
-                result = yield self._check_auth_dict(authdict, clientip)
+                result = yield self._check_auth_dict(
+                    authdict, clientip, password_servlet=password_servlet,
+                )
                 if result:
                     creds[login_type] = result
                     self._save_session(session)
@@ -351,7 +363,7 @@ class AuthHandler(BaseHandler):
         return sess.setdefault('serverdict', {}).get(key, default)
 
     @defer.inlineCallbacks
-    def _check_auth_dict(self, authdict, clientip):
+    def _check_auth_dict(self, authdict, clientip, password_servlet=False):
         """Attempt to validate the auth dict provided by a client
 
         Args:
@@ -369,7 +381,13 @@ class AuthHandler(BaseHandler):
         login_type = authdict['type']
         checker = self.checkers.get(login_type)
         if checker is not None:
-            res = yield checker(authdict, clientip)
+            # XXX: Temporary workaround for having Synapse handle password resets
+            # See AuthHandler.check_auth for further details
+            res = yield checker(
+                authdict,
+                clientip=clientip,
+                password_servlet=password_servlet,
+            )
             defer.returnValue(res)
 
         # build a v1-login-style dict out of the authdict and fall back to the
@@ -383,7 +401,7 @@ class AuthHandler(BaseHandler):
         defer.returnValue(canonical_id)
 
     @defer.inlineCallbacks
-    def _check_recaptcha(self, authdict, clientip):
+    def _check_recaptcha(self, authdict, clientip, **kwargs):
         try:
             user_response = authdict["response"]
         except KeyError:
@@ -429,20 +447,20 @@ class AuthHandler(BaseHandler):
                 defer.returnValue(True)
         raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
 
-    def _check_email_identity(self, authdict, _):
-        return self._check_threepid('email', authdict)
+    def _check_email_identity(self, authdict, **kwargs):
+        return self._check_threepid('email', authdict, **kwargs)
 
-    def _check_msisdn(self, authdict, _):
+    def _check_msisdn(self, authdict, **kwargs):
         return self._check_threepid('msisdn', authdict)
 
-    def _check_dummy_auth(self, authdict, _):
+    def _check_dummy_auth(self, authdict, **kwargs):
         return defer.succeed(True)
 
-    def _check_terms_auth(self, authdict, _):
+    def _check_terms_auth(self, authdict, **kwargs):
         return defer.succeed(True)
 
     @defer.inlineCallbacks
-    def _check_threepid(self, medium, authdict):
+    def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs):
         if 'threepid_creds' not in authdict:
             raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
 
@@ -451,7 +469,30 @@ class AuthHandler(BaseHandler):
         identity_handler = self.hs.get_handlers().identity_handler
 
         logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,))
-        threepid = yield identity_handler.threepid_from_creds(threepid_creds)
+        if (
+            not password_servlet
+            or self.hs.config.email_password_reset_behaviour == "remote"
+        ):
+            threepid = yield identity_handler.threepid_from_creds(threepid_creds)
+        elif self.hs.config.email_password_reset_behaviour == "local":
+            row = yield self.store.get_threepid_validation_session(
+                medium,
+                threepid_creds["client_secret"],
+                sid=threepid_creds["sid"],
+                validated=True,
+            )
+
+            threepid = {
+                "medium": row["medium"],
+                "address": row["address"],
+                "validated_at": row["validated_at"],
+            } if row else None
+
+            if row:
+                # Valid threepid returned, delete from the db
+                yield self.store.delete_threepid_session(threepid_creds["sid"])
+        else:
+            raise SynapseError(400, "Password resets are not enabled on this homeserver")
 
         if not threepid:
             raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 6a91f7698e..7378b56c1d 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2017, 2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -42,6 +43,8 @@ class DeactivateAccountHandler(BaseHandler):
         # it left off (if it has work left to do).
         hs.get_reactor().callWhenRunning(self._start_user_parting)
 
+        self._account_validity_enabled = hs.config.account_validity.enabled
+
     @defer.inlineCallbacks
     def deactivate_account(self, user_id, erase_data, id_server=None):
         """Deactivate a user's account
@@ -114,6 +117,13 @@ class DeactivateAccountHandler(BaseHandler):
         # parts users from rooms (if it isn't already running)
         self._start_user_parting()
 
+        # Remove all information on the user from the account_validity table.
+        if self._account_validity_enabled:
+            yield self.store.delete_account_validity_for_user(user_id)
+
+        # Mark the user as deactivated.
+        yield self.store.set_user_deactivated_status(user_id, True)
+
         defer.returnValue(identity_server_supports_unbinding)
 
     def _start_user_parting(self):
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 27bd06df5d..a12f9508d8 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -19,7 +19,7 @@ import string
 
 from twisted.internet import defer
 
-from synapse.api.constants import EventTypes
+from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes
 from synapse.api.errors import (
     AuthError,
     CodeMessageException,
@@ -43,8 +43,10 @@ class DirectoryHandler(BaseHandler):
         self.state = hs.get_state_handler()
         self.appservice_handler = hs.get_application_service_handler()
         self.event_creation_handler = hs.get_event_creation_handler()
+        self.store = hs.get_datastore()
         self.config = hs.config
         self.enable_room_list_search = hs.config.enable_room_list_search
+        self.require_membership = hs.config.require_membership_for_aliases
 
         self.federation = hs.get_federation_client()
         hs.get_federation_registry().register_query_handler(
@@ -83,7 +85,7 @@ class DirectoryHandler(BaseHandler):
 
     @defer.inlineCallbacks
     def create_association(self, requester, room_alias, room_id, servers=None,
-                           send_event=True):
+                           send_event=True, check_membership=True):
         """Attempt to create a new alias
 
         Args:
@@ -93,6 +95,8 @@ class DirectoryHandler(BaseHandler):
             servers (list[str]|None): List of servers that others servers
                 should try and join via
             send_event (bool): Whether to send an updated m.room.aliases event
+            check_membership (bool): Whether to check if the user is in the room
+                before the alias can be set (if the server's config requires it).
 
         Returns:
             Deferred
@@ -100,6 +104,13 @@ class DirectoryHandler(BaseHandler):
 
         user_id = requester.user.to_string()
 
+        if len(room_alias.to_string()) > MAX_ALIAS_LENGTH:
+            raise SynapseError(
+                400,
+                "Can't create aliases longer than %s characters" % MAX_ALIAS_LENGTH,
+                Codes.INVALID_PARAM,
+            )
+
         service = requester.app_service
         if service:
             if not service.is_interested_in_alias(room_alias.to_string()):
@@ -108,6 +119,14 @@ class DirectoryHandler(BaseHandler):
                     " this kind of alias.", errcode=Codes.EXCLUSIVE
                 )
         else:
+            if self.require_membership and check_membership:
+                rooms_for_user = yield self.store.get_rooms_for_user(user_id)
+                if room_id not in rooms_for_user:
+                    raise AuthError(
+                        403,
+                        "You must be in the room to create an alias for it",
+                    )
+
             if not self.spam_checker.user_may_create_room_alias(user_id, room_alias):
                 raise AuthError(
                     403, "This user is not permitted to create this alias",
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index 1b4d8c74ae..eb525070cf 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -21,7 +21,6 @@ from twisted.internet import defer
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import AuthError, SynapseError
 from synapse.events import EventBase
-from synapse.events.utils import serialize_event
 from synapse.types import UserID
 from synapse.util.logutils import log_function
 from synapse.visibility import filter_events_for_client
@@ -50,6 +49,7 @@ class EventStreamHandler(BaseHandler):
         self.notifier = hs.get_notifier()
         self.state = hs.get_state_handler()
         self._server_notices_sender = hs.get_server_notices_sender()
+        self._event_serializer = hs.get_event_client_serializer()
 
     @defer.inlineCallbacks
     @log_function
@@ -120,9 +120,12 @@ class EventStreamHandler(BaseHandler):
 
             time_now = self.clock.time_msec()
 
-            chunks = [
-                serialize_event(e, time_now, as_client_event) for e in events
-            ]
+            chunks = yield self._event_serializer.serialize_events(
+                events, time_now, as_client_event=as_client_event,
+                # We don't bundle "live" events, as otherwise clients
+                # will end up double counting annotations.
+                bundle_aggregations=False,
+            )
 
             chunk = {
                 "chunk": chunks,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index ade47710e7..51d7eb274b 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018, 2019 New Vector Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -33,8 +34,10 @@ from synapse.api.constants import EventTypes, Membership, RejectedReason
 from synapse.api.errors import (
     AuthError,
     CodeMessageException,
+    Codes,
     FederationDeniedError,
     FederationError,
+    RequestSendFailed,
     StoreError,
     SynapseError,
 )
@@ -126,6 +129,8 @@ class FederationHandler(BaseHandler):
         self.room_queues = {}
         self._room_pdu_linearizer = Linearizer("fed_room_pdu")
 
+        self.third_party_event_rules = hs.get_third_party_event_rules()
+
     @defer.inlineCallbacks
     def on_receive_pdu(
             self, origin, pdu, sent_to_us_directly=False,
@@ -1257,6 +1262,15 @@ class FederationHandler(BaseHandler):
             logger.warn("Failed to create join %r because %s", event, e)
             raise e
 
+        event_allowed = yield self.third_party_event_rules.check_event_allowed(
+            event, context,
+        )
+        if not event_allowed:
+            logger.info("Creation of join %s forbidden by third-party rules", event)
+            raise SynapseError(
+                403, "This event is not allowed in this context", Codes.FORBIDDEN,
+            )
+
         # The remote hasn't signed it yet, obviously. We'll do the full checks
         # when we get the event back in `on_send_join_request`
         yield self.auth.check_from_context(
@@ -1299,6 +1313,15 @@ class FederationHandler(BaseHandler):
             origin, event
         )
 
+        event_allowed = yield self.third_party_event_rules.check_event_allowed(
+            event, context,
+        )
+        if not event_allowed:
+            logger.info("Sending of join %s forbidden by third-party rules", event)
+            raise SynapseError(
+                403, "This event is not allowed in this context", Codes.FORBIDDEN,
+            )
+
         logger.debug(
             "on_send_join_request: After _handle_new_event: %s, sigs: %s",
             event.event_id,
@@ -1457,6 +1480,15 @@ class FederationHandler(BaseHandler):
             builder=builder,
         )
 
+        event_allowed = yield self.third_party_event_rules.check_event_allowed(
+            event, context,
+        )
+        if not event_allowed:
+            logger.warning("Creation of leave %s forbidden by third-party rules", event)
+            raise SynapseError(
+                403, "This event is not allowed in this context", Codes.FORBIDDEN,
+            )
+
         try:
             # The remote hasn't signed it yet, obviously. We'll do the full checks
             # when we get the event back in `on_send_leave_request`
@@ -1483,10 +1515,19 @@ class FederationHandler(BaseHandler):
 
         event.internal_metadata.outlier = False
 
-        yield self._handle_new_event(
+        context = yield self._handle_new_event(
             origin, event
         )
 
+        event_allowed = yield self.third_party_event_rules.check_event_allowed(
+            event, context,
+        )
+        if not event_allowed:
+            logger.info("Sending of leave %s forbidden by third-party rules", event)
+            raise SynapseError(
+                403, "This event is not allowed in this context", Codes.FORBIDDEN,
+            )
+
         logger.debug(
             "on_send_leave_request: After _handle_new_event: %s, sigs: %s",
             event.event_id,
@@ -1916,6 +1957,11 @@ class FederationHandler(BaseHandler):
                     event.room_id, latest_event_ids=extrem_ids,
                 )
 
+            logger.debug(
+                "Doing soft-fail check for %s: state %s",
+                event.event_id, current_state_ids,
+            )
+
             # Now check if event pass auth against said current state
             auth_types = auth_types_for_event(event)
             current_state_ids = [
@@ -1932,7 +1978,7 @@ class FederationHandler(BaseHandler):
                 self.auth.check(room_version, event, auth_events=current_auth_events)
             except AuthError as e:
                 logger.warn(
-                    "Failed current state auth resolution for %r because %s",
+                    "Soft-failing %r because %s",
                     event, e,
                 )
                 event.internal_metadata.soft_failed = True
@@ -2008,15 +2054,65 @@ class FederationHandler(BaseHandler):
 
         Args:
             origin (str):
-            event (synapse.events.FrozenEvent):
+            event (synapse.events.EventBase):
+            context (synapse.events.snapshot.EventContext):
+            auth_events (dict[(str, str)->synapse.events.EventBase]):
+                Map from (event_type, state_key) to event
+
+                What we expect the event's auth_events to be, based on the event's
+                position in the dag. I think? maybe??
+
+                Also NB that this function adds entries to it.
+        Returns:
+            defer.Deferred[None]
+        """
+        room_version = yield self.store.get_room_version(event.room_id)
+
+        try:
+            yield self._update_auth_events_and_context_for_auth(
+                origin, event, context, auth_events
+            )
+        except Exception:
+            # We don't really mind if the above fails, so lets not fail
+            # processing if it does. However, it really shouldn't fail so
+            # let's still log as an exception since we'll still want to fix
+            # any bugs.
+            logger.exception(
+                "Failed to double check auth events for %s with remote. "
+                "Ignoring failure and continuing processing of event.",
+                event.event_id,
+            )
+
+        try:
+            self.auth.check(room_version, event, auth_events=auth_events)
+        except AuthError as e:
+            logger.warn("Failed auth resolution for %r because %s", event, e)
+            raise e
+
+    @defer.inlineCallbacks
+    def _update_auth_events_and_context_for_auth(
+        self, origin, event, context, auth_events
+    ):
+        """Helper for do_auth. See there for docs.
+
+        Checks whether a given event has the expected auth events. If it
+        doesn't then we talk to the remote server to compare state to see if
+        we can come to a consensus (e.g. if one server missed some valid
+        state).
+
+        This attempts to resovle any potential divergence of state between
+        servers, but is not essential and so failures should not block further
+        processing of the event.
+
+        Args:
+            origin (str):
+            event (synapse.events.EventBase):
             context (synapse.events.snapshot.EventContext):
-            auth_events (dict[(str, str)->str]):
+            auth_events (dict[(str, str)->synapse.events.EventBase]):
 
         Returns:
             defer.Deferred[None]
         """
-        # Check if we have all the auth events.
-        current_state = set(e.event_id for e in auth_events.values())
         event_auth_events = set(event.auth_event_ids())
 
         if event.is_state():
@@ -2024,11 +2120,21 @@ class FederationHandler(BaseHandler):
         else:
             event_key = None
 
-        if event_auth_events - current_state:
+        # if the event's auth_events refers to events which are not in our
+        # calculated auth_events, we need to fetch those events from somewhere.
+        #
+        # we start by fetching them from the store, and then try calling /event_auth/.
+        missing_auth = event_auth_events.difference(
+            e.event_id for e in auth_events.values()
+        )
+
+        if missing_auth:
             # TODO: can we use store.have_seen_events here instead?
             have_events = yield self.store.get_seen_events_with_rejections(
-                event_auth_events - current_state
+                missing_auth
             )
+            logger.debug("Got events %s from store", have_events)
+            missing_auth.difference_update(have_events.keys())
         else:
             have_events = {}
 
@@ -2037,17 +2143,22 @@ class FederationHandler(BaseHandler):
             for e in auth_events.values()
         })
 
-        seen_events = set(have_events.keys())
-
-        missing_auth = event_auth_events - seen_events - current_state
-
         if missing_auth:
-            logger.info("Missing auth: %s", missing_auth)
             # If we don't have all the auth events, we need to get them.
+            logger.info(
+                "auth_events contains unknown events: %s",
+                missing_auth,
+            )
             try:
-                remote_auth_chain = yield self.federation_client.get_event_auth(
-                    origin, event.room_id, event.event_id
-                )
+                try:
+                    remote_auth_chain = yield self.federation_client.get_event_auth(
+                        origin, event.room_id, event.event_id
+                    )
+                except RequestSendFailed as e:
+                    # The other side isn't around or doesn't implement the
+                    # endpoint, so lets just bail out.
+                    logger.info("Failed to get event auth from remote: %s", e)
+                    return
 
                 seen_remotes = yield self.store.have_seen_events(
                     [e.event_id for e in remote_auth_chain]
@@ -2084,145 +2195,174 @@ class FederationHandler(BaseHandler):
                 have_events = yield self.store.get_seen_events_with_rejections(
                     event.auth_event_ids()
                 )
-                seen_events = set(have_events.keys())
             except Exception:
                 # FIXME:
                 logger.exception("Failed to get auth chain")
 
+        if event.internal_metadata.is_outlier():
+            logger.info("Skipping auth_event fetch for outlier")
+            return
+
         # FIXME: Assumes we have and stored all the state for all the
         # prev_events
-        current_state = set(e.event_id for e in auth_events.values())
-        different_auth = event_auth_events - current_state
+        different_auth = event_auth_events.difference(
+            e.event_id for e in auth_events.values()
+        )
 
-        room_version = yield self.store.get_room_version(event.room_id)
+        if not different_auth:
+            return
 
-        if different_auth and not event.internal_metadata.is_outlier():
-            # Do auth conflict res.
-            logger.info("Different auth: %s", different_auth)
-
-            different_events = yield logcontext.make_deferred_yieldable(
-                defer.gatherResults([
-                    logcontext.run_in_background(
-                        self.store.get_event,
-                        d,
-                        allow_none=True,
-                        allow_rejected=False,
-                    )
-                    for d in different_auth
-                    if d in have_events and not have_events[d]
-                ], consumeErrors=True)
-            ).addErrback(unwrapFirstError)
-
-            if different_events:
-                local_view = dict(auth_events)
-                remote_view = dict(auth_events)
-                remote_view.update({
-                    (d.type, d.state_key): d for d in different_events if d
-                })
+        logger.info(
+            "auth_events refers to events which are not in our calculated auth "
+            "chain: %s",
+            different_auth,
+        )
+
+        room_version = yield self.store.get_room_version(event.room_id)
 
-                new_state = yield self.state_handler.resolve_events(
-                    room_version,
-                    [list(local_view.values()), list(remote_view.values())],
-                    event
+        different_events = yield logcontext.make_deferred_yieldable(
+            defer.gatherResults([
+                logcontext.run_in_background(
+                    self.store.get_event,
+                    d,
+                    allow_none=True,
+                    allow_rejected=False,
                 )
+                for d in different_auth
+                if d in have_events and not have_events[d]
+            ], consumeErrors=True)
+        ).addErrback(unwrapFirstError)
+
+        if different_events:
+            local_view = dict(auth_events)
+            remote_view = dict(auth_events)
+            remote_view.update({
+                (d.type, d.state_key): d for d in different_events if d
+            })
 
-                auth_events.update(new_state)
+            new_state = yield self.state_handler.resolve_events(
+                room_version,
+                [list(local_view.values()), list(remote_view.values())],
+                event
+            )
 
-                current_state = set(e.event_id for e in auth_events.values())
-                different_auth = event_auth_events - current_state
+            logger.info(
+                "After state res: updating auth_events with new state %s",
+                {
+                    (d.type, d.state_key): d.event_id for d in new_state.values()
+                    if auth_events.get((d.type, d.state_key)) != d
+                },
+            )
 
-                yield self._update_context_for_auth_events(
-                    event, context, auth_events, event_key,
-                )
+            auth_events.update(new_state)
 
-        if different_auth and not event.internal_metadata.is_outlier():
-            logger.info("Different auth after resolution: %s", different_auth)
+            different_auth = event_auth_events.difference(
+                e.event_id for e in auth_events.values()
+            )
 
-            # Only do auth resolution if we have something new to say.
-            # We can't rove an auth failure.
-            do_resolution = False
+            yield self._update_context_for_auth_events(
+                event, context, auth_events, event_key,
+            )
 
-            provable = [
-                RejectedReason.NOT_ANCESTOR, RejectedReason.NOT_ANCESTOR,
-            ]
+        if not different_auth:
+            # we're done
+            return
 
-            for e_id in different_auth:
-                if e_id in have_events:
-                    if have_events[e_id] in provable:
-                        do_resolution = True
-                        break
+        logger.info(
+            "auth_events still refers to events which are not in the calculated auth "
+            "chain after state resolution: %s",
+            different_auth,
+        )
 
-            if do_resolution:
-                prev_state_ids = yield context.get_prev_state_ids(self.store)
-                # 1. Get what we think is the auth chain.
-                auth_ids = yield self.auth.compute_auth_events(
-                    event, prev_state_ids
-                )
-                local_auth_chain = yield self.store.get_auth_chain(
-                    auth_ids, include_given=True
-                )
+        # Only do auth resolution if we have something new to say.
+        # We can't prove an auth failure.
+        do_resolution = False
 
-                try:
-                    # 2. Get remote difference.
-                    result = yield self.federation_client.query_auth(
-                        origin,
-                        event.room_id,
-                        event.event_id,
-                        local_auth_chain,
-                    )
+        for e_id in different_auth:
+            if e_id in have_events:
+                if have_events[e_id] == RejectedReason.NOT_ANCESTOR:
+                    do_resolution = True
+                    break
 
-                    seen_remotes = yield self.store.have_seen_events(
-                        [e.event_id for e in result["auth_chain"]]
-                    )
+        if not do_resolution:
+            logger.info(
+                "Skipping auth resolution due to lack of provable rejection reasons"
+            )
+            return
 
-                    # 3. Process any remote auth chain events we haven't seen.
-                    for ev in result["auth_chain"]:
-                        if ev.event_id in seen_remotes:
-                            continue
+        logger.info("Doing auth resolution")
 
-                        if ev.event_id == event.event_id:
-                            continue
+        prev_state_ids = yield context.get_prev_state_ids(self.store)
 
-                        try:
-                            auth_ids = ev.auth_event_ids()
-                            auth = {
-                                (e.type, e.state_key): e
-                                for e in result["auth_chain"]
-                                if e.event_id in auth_ids
-                                or event.type == EventTypes.Create
-                            }
-                            ev.internal_metadata.outlier = True
+        # 1. Get what we think is the auth chain.
+        auth_ids = yield self.auth.compute_auth_events(
+            event, prev_state_ids
+        )
+        local_auth_chain = yield self.store.get_auth_chain(
+            auth_ids, include_given=True
+        )
 
-                            logger.debug(
-                                "do_auth %s different_auth: %s",
-                                event.event_id, e.event_id
-                            )
+        try:
+            # 2. Get remote difference.
+            try:
+                result = yield self.federation_client.query_auth(
+                    origin,
+                    event.room_id,
+                    event.event_id,
+                    local_auth_chain,
+                )
+            except RequestSendFailed as e:
+                # The other side isn't around or doesn't implement the
+                # endpoint, so lets just bail out.
+                logger.info("Failed to query auth from remote: %s", e)
+                return
+
+            seen_remotes = yield self.store.have_seen_events(
+                [e.event_id for e in result["auth_chain"]]
+            )
 
-                            yield self._handle_new_event(
-                                origin, ev, auth_events=auth
-                            )
+            # 3. Process any remote auth chain events we haven't seen.
+            for ev in result["auth_chain"]:
+                if ev.event_id in seen_remotes:
+                    continue
 
-                            if ev.event_id in event_auth_events:
-                                auth_events[(ev.type, ev.state_key)] = ev
-                        except AuthError:
-                            pass
+                if ev.event_id == event.event_id:
+                    continue
 
-                except Exception:
-                    # FIXME:
-                    logger.exception("Failed to query auth chain")
+                try:
+                    auth_ids = ev.auth_event_ids()
+                    auth = {
+                        (e.type, e.state_key): e
+                        for e in result["auth_chain"]
+                        if e.event_id in auth_ids
+                        or event.type == EventTypes.Create
+                    }
+                    ev.internal_metadata.outlier = True
+
+                    logger.debug(
+                        "do_auth %s different_auth: %s",
+                        event.event_id, e.event_id
+                    )
 
-                # 4. Look at rejects and their proofs.
-                # TODO.
+                    yield self._handle_new_event(
+                        origin, ev, auth_events=auth
+                    )
 
-                yield self._update_context_for_auth_events(
-                    event, context, auth_events, event_key,
-                )
+                    if ev.event_id in event_auth_events:
+                        auth_events[(ev.type, ev.state_key)] = ev
+                except AuthError:
+                    pass
 
-        try:
-            self.auth.check(room_version, event, auth_events=auth_events)
-        except AuthError as e:
-            logger.warn("Failed auth resolution for %r because %s", event, e)
-            raise e
+        except Exception:
+            # FIXME:
+            logger.exception("Failed to query auth chain")
+
+        # 4. Look at rejects and their proofs.
+        # TODO.
+
+        yield self._update_context_for_auth_events(
+            event, context, auth_events, event_key,
+        )
 
     @defer.inlineCallbacks
     def _update_context_for_auth_events(self, event, context, auth_events,
@@ -2450,6 +2590,18 @@ class FederationHandler(BaseHandler):
                 builder=builder
             )
 
+            event_allowed = yield self.third_party_event_rules.check_event_allowed(
+                event, context,
+            )
+            if not event_allowed:
+                logger.info(
+                    "Creation of threepid invite %s forbidden by third-party rules",
+                    event,
+                )
+                raise SynapseError(
+                    403, "This event is not allowed in this context", Codes.FORBIDDEN,
+                )
+
             event, context = yield self.add_display_name_to_third_party_invite(
                 room_version, event_dict, event, context
             )
@@ -2498,6 +2650,18 @@ class FederationHandler(BaseHandler):
             builder=builder,
         )
 
+        event_allowed = yield self.third_party_event_rules.check_event_allowed(
+            event, context,
+        )
+        if not event_allowed:
+            logger.warning(
+                "Exchange of threepid invite %s forbidden by third-party rules",
+                event,
+            )
+            raise SynapseError(
+                403, "This event is not allowed in this context", Codes.FORBIDDEN,
+            )
+
         event, context = yield self.add_display_name_to_third_party_invite(
             room_version, event_dict, event, context
         )
@@ -2513,12 +2677,6 @@ class FederationHandler(BaseHandler):
         # though the sender isn't a local user.
         event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
 
-        # XXX we send the invite here, but send_membership_event also sends it,
-        # so we end up making two requests. I think this is redundant.
-        returned_invite = yield self.send_invite(origin, event)
-        # TODO: Make sure the signatures actually are correct.
-        event.signatures.update(returned_invite.signatures)
-
         member_handler = self.hs.get_room_member_handler()
         yield member_handler.send_membership_event(None, event, context)
 
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index 02c508acec..f60ace02e8 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -49,9 +49,7 @@ def _create_rerouter(func_name):
             def http_response_errback(failure):
                 failure.trap(HttpResponseException)
                 e = failure.value
-                if e.code == 403:
-                    raise e.to_synapse_error()
-                return failure
+                raise e.to_synapse_error()
 
             def request_failed_errback(failure):
                 failure.trap(RequestSendFailed)
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 22469486d7..04caf65793 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -247,7 +247,14 @@ class IdentityHandler(BaseHandler):
         defer.returnValue(changed)
 
     @defer.inlineCallbacks
-    def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
+    def requestEmailToken(
+        self,
+        id_server,
+        email,
+        client_secret,
+        send_attempt,
+        next_link=None,
+    ):
         if not self._should_trust_id_server(id_server):
             raise SynapseError(
                 400, "Untrusted ID server '%s'" % id_server,
@@ -259,7 +266,9 @@ class IdentityHandler(BaseHandler):
             'client_secret': client_secret,
             'send_attempt': send_attempt,
         }
-        params.update(kwargs)
+
+        if next_link:
+            params.update({'next_link': next_link})
 
         try:
             data = yield self.http_client.post_json_get_json(
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 7dfae78db0..aaee5db0b7 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -19,7 +19,6 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import AuthError, Codes, SynapseError
-from synapse.events.utils import serialize_event
 from synapse.events.validator import EventValidator
 from synapse.handlers.presence import format_user_presence_state
 from synapse.streams.config import PaginationConfig
@@ -43,6 +42,7 @@ class InitialSyncHandler(BaseHandler):
         self.clock = hs.get_clock()
         self.validator = EventValidator()
         self.snapshot_cache = SnapshotCache()
+        self._event_serializer = hs.get_event_client_serializer()
 
     def snapshot_all_rooms(self, user_id=None, pagin_config=None,
                            as_client_event=True, include_archived=False):
@@ -138,7 +138,9 @@ class InitialSyncHandler(BaseHandler):
                 d["inviter"] = event.sender
 
                 invite_event = yield self.store.get_event(event.event_id)
-                d["invite"] = serialize_event(invite_event, time_now, as_client_event)
+                d["invite"] = yield self._event_serializer.serialize_event(
+                    invite_event, time_now, as_client_event,
+                )
 
             rooms_ret.append(d)
 
@@ -185,18 +187,21 @@ class InitialSyncHandler(BaseHandler):
                 time_now = self.clock.time_msec()
 
                 d["messages"] = {
-                    "chunk": [
-                        serialize_event(m, time_now, as_client_event)
-                        for m in messages
-                    ],
+                    "chunk": (
+                        yield self._event_serializer.serialize_events(
+                            messages, time_now=time_now,
+                            as_client_event=as_client_event,
+                        )
+                    ),
                     "start": start_token.to_string(),
                     "end": end_token.to_string(),
                 }
 
-                d["state"] = [
-                    serialize_event(c, time_now, as_client_event)
-                    for c in current_state.values()
-                ]
+                d["state"] = yield self._event_serializer.serialize_events(
+                    current_state.values(),
+                    time_now=time_now,
+                    as_client_event=as_client_event
+                )
 
                 account_data_events = []
                 tags = tags_by_room.get(event.room_id)
@@ -337,11 +342,15 @@ class InitialSyncHandler(BaseHandler):
             "membership": membership,
             "room_id": room_id,
             "messages": {
-                "chunk": [serialize_event(m, time_now) for m in messages],
+                "chunk": (yield self._event_serializer.serialize_events(
+                    messages, time_now,
+                )),
                 "start": start_token.to_string(),
                 "end": end_token.to_string(),
             },
-            "state": [serialize_event(s, time_now) for s in room_state.values()],
+            "state": (yield self._event_serializer.serialize_events(
+                room_state.values(), time_now,
+            )),
             "presence": [],
             "receipts": [],
         })
@@ -355,10 +364,9 @@ class InitialSyncHandler(BaseHandler):
 
         # TODO: These concurrently
         time_now = self.clock.time_msec()
-        state = [
-            serialize_event(x, time_now)
-            for x in current_state.values()
-        ]
+        state = yield self._event_serializer.serialize_events(
+            current_state.values(), time_now,
+        )
 
         now_token = yield self.hs.get_event_sources().get_current_token()
 
@@ -425,7 +433,9 @@ class InitialSyncHandler(BaseHandler):
         ret = {
             "room_id": room_id,
             "messages": {
-                "chunk": [serialize_event(m, time_now) for m in messages],
+                "chunk": (yield self._event_serializer.serialize_events(
+                    messages, time_now,
+                )),
                 "start": start_token.to_string(),
                 "end": end_token.to_string(),
             },
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 224d34ef3a..11650dc80c 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
-# Copyright 2014 - 2016 OpenMarket Ltd
-# Copyright 2017 - 2018 New Vector Ltd
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +23,7 @@ from canonicaljson import encode_canonical_json, json
 from twisted.internet import defer
 from twisted.internet.defer import succeed
 
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventTypes, Membership, RelationTypes
 from synapse.api.errors import (
     AuthError,
     Codes,
@@ -32,7 +33,6 @@ from synapse.api.errors import (
 )
 from synapse.api.room_versions import RoomVersions
 from synapse.api.urls import ConsentURIBuilder
-from synapse.events.utils import serialize_event
 from synapse.events.validator import EventValidator
 from synapse.replication.http.send_event import ReplicationSendEventRestServlet
 from synapse.storage.state import StateFilter
@@ -57,6 +57,7 @@ class MessageHandler(object):
         self.clock = hs.get_clock()
         self.state = hs.get_state_handler()
         self.store = hs.get_datastore()
+        self._event_serializer = hs.get_event_client_serializer()
 
     @defer.inlineCallbacks
     def get_room_data(self, user_id=None, room_id=None,
@@ -164,9 +165,13 @@ class MessageHandler(object):
                 room_state = room_state[membership_event_id]
 
         now = self.clock.time_msec()
-        defer.returnValue(
-            [serialize_event(c, now) for c in room_state.values()]
+        events = yield self._event_serializer.serialize_events(
+            room_state.values(), now,
+            # We don't bother bundling aggregations in when asked for state
+            # events, as clients won't use them.
+            bundle_aggregations=False,
         )
+        defer.returnValue(events)
 
     @defer.inlineCallbacks
     def get_joined_members(self, requester, room_id):
@@ -228,6 +233,7 @@ class EventCreationHandler(object):
         self.ratelimiter = hs.get_ratelimiter()
         self.notifier = hs.get_notifier()
         self.config = hs.config
+        self.require_membership_for_aliases = hs.config.require_membership_for_aliases
 
         self.send_event_to_master = ReplicationSendEventRestServlet.make_client(hs)
 
@@ -243,6 +249,7 @@ class EventCreationHandler(object):
         self.action_generator = hs.get_action_generator()
 
         self.spam_checker = hs.get_spam_checker()
+        self.third_party_event_rules = hs.get_third_party_event_rules()
 
         self._block_events_without_consent_error = (
             self.config.block_events_without_consent_error
@@ -336,6 +343,35 @@ class EventCreationHandler(object):
             prev_events_and_hashes=prev_events_and_hashes,
         )
 
+        # In an ideal world we wouldn't need the second part of this condition. However,
+        # this behaviour isn't spec'd yet, meaning we should be able to deactivate this
+        # behaviour. Another reason is that this code is also evaluated each time a new
+        # m.room.aliases event is created, which includes hitting a /directory route.
+        # Therefore not including this condition here would render the similar one in
+        # synapse.handlers.directory pointless.
+        if builder.type == EventTypes.Aliases and self.require_membership_for_aliases:
+            # Ideally we'd do the membership check in event_auth.check(), which
+            # describes a spec'd algorithm for authenticating events received over
+            # federation as well as those created locally. As of room v3, aliases events
+            # can be created by users that are not in the room, therefore we have to
+            # tolerate them in event_auth.check().
+            prev_state_ids = yield context.get_prev_state_ids(self.store)
+            prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
+            prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
+            if not prev_event or prev_event.membership != Membership.JOIN:
+                logger.warning(
+                    ("Attempt to send `m.room.aliases` in room %s by user %s but"
+                     " membership is %s"),
+                    event.room_id,
+                    event.sender,
+                    prev_event.membership if prev_event else None,
+                )
+
+                raise AuthError(
+                    403,
+                    "You must be in the room to create an alias for it",
+                )
+
         self.validator.validate_new(event)
 
         defer.returnValue((event, context))
@@ -570,6 +606,20 @@ class EventCreationHandler(object):
 
         self.validator.validate_new(event)
 
+        # If this event is an annotation then we check that that the sender
+        # can't annotate the same way twice (e.g. stops users from liking an
+        # event multiple times).
+        relation = event.content.get("m.relates_to", {})
+        if relation.get("rel_type") == RelationTypes.ANNOTATION:
+            relates_to = relation["event_id"]
+            aggregation_key = relation["key"]
+
+            already_exists = yield self.store.has_user_annotated_event(
+                relates_to, event.type, aggregation_key, event.sender,
+            )
+            if already_exists:
+                raise SynapseError(400, "Can't send same reaction twice")
+
         logger.debug(
             "Created event %s",
             event.event_id,
@@ -610,6 +660,14 @@ class EventCreationHandler(object):
         else:
             room_version = yield self.store.get_room_version(event.room_id)
 
+        event_allowed = yield self.third_party_event_rules.check_event_allowed(
+            event, context,
+        )
+        if not event_allowed:
+            raise SynapseError(
+                403, "This event is not allowed in this context", Codes.FORBIDDEN,
+            )
+
         try:
             yield self.auth.check_from_context(room_version, event, context)
         except AuthError as err:
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index e4fdae9266..8f811e24fe 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -20,7 +20,6 @@ from twisted.python.failure import Failure
 
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import SynapseError
-from synapse.events.utils import serialize_event
 from synapse.storage.state import StateFilter
 from synapse.types import RoomStreamToken
 from synapse.util.async_helpers import ReadWriteLock
@@ -78,6 +77,7 @@ class PaginationHandler(object):
         self._purges_in_progress_by_room = set()
         # map from purge id to PurgeStatus
         self._purges_by_id = {}
+        self._event_serializer = hs.get_event_client_serializer()
 
     def start_purge_history(self, room_id, token,
                             delete_local_events=False):
@@ -278,18 +278,22 @@ class PaginationHandler(object):
         time_now = self.clock.time_msec()
 
         chunk = {
-            "chunk": [
-                serialize_event(e, time_now, as_client_event)
-                for e in events
-            ],
+            "chunk": (
+                yield self._event_serializer.serialize_events(
+                    events, time_now,
+                    as_client_event=as_client_event,
+                )
+            ),
             "start": pagin_config.from_token.to_string(),
             "end": next_token.to_string(),
         }
 
         if state:
-            chunk["state"] = [
-                serialize_event(e, time_now, as_client_event)
-                for e in state
-            ]
+            chunk["state"] = (
+                yield self._event_serializer.serialize_events(
+                    state, time_now,
+                    as_client_event=as_client_event,
+                )
+            )
 
         defer.returnValue(chunk)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index bd1285b15c..557fb5f83d 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -158,7 +158,13 @@ class PresenceHandler(object):
         # have not yet been persisted
         self.unpersisted_users_changes = set()
 
-        hs.get_reactor().addSystemEventTrigger("before", "shutdown", self._on_shutdown)
+        hs.get_reactor().addSystemEventTrigger(
+            "before",
+            "shutdown",
+            run_as_background_process,
+            "presence.on_shutdown",
+            self._on_shutdown,
+        )
 
         self.serial_to_user = {}
         self._next_serial = 1
@@ -182,17 +188,27 @@ class PresenceHandler(object):
         # Start a LoopingCall in 30s that fires every 5s.
         # The initial delay is to allow disconnected clients a chance to
         # reconnect before we treat them as offline.
+        def run_timeout_handler():
+            return run_as_background_process(
+                "handle_presence_timeouts", self._handle_timeouts
+            )
+
         self.clock.call_later(
             30,
             self.clock.looping_call,
-            self._handle_timeouts,
+            run_timeout_handler,
             5000,
         )
 
+        def run_persister():
+            return run_as_background_process(
+                "persist_presence_changes", self._persist_unpersisted_changes
+            )
+
         self.clock.call_later(
             60,
             self.clock.looping_call,
-            self._persist_unpersisted_changes,
+            run_persister,
             60 * 1000,
         )
 
@@ -229,6 +245,7 @@ class PresenceHandler(object):
         )
 
         if self.unpersisted_users_changes:
+
             yield self.store.update_presence([
                 self.user_to_current_state[user_id]
                 for user_id in self.unpersisted_users_changes
@@ -240,30 +257,18 @@ class PresenceHandler(object):
         """We periodically persist the unpersisted changes, as otherwise they
         may stack up and slow down shutdown times.
         """
-        logger.info(
-            "Performing _persist_unpersisted_changes. Persisting %d unpersisted changes",
-            len(self.unpersisted_users_changes)
-        )
-
         unpersisted = self.unpersisted_users_changes
         self.unpersisted_users_changes = set()
 
         if unpersisted:
+            logger.info(
+                "Persisting %d upersisted presence updates", len(unpersisted)
+            )
             yield self.store.update_presence([
                 self.user_to_current_state[user_id]
                 for user_id in unpersisted
             ])
 
-        logger.info("Finished _persist_unpersisted_changes")
-
-    @defer.inlineCallbacks
-    def _update_states_and_catch_exception(self, new_states):
-        try:
-            res = yield self._update_states(new_states)
-            defer.returnValue(res)
-        except Exception:
-            logger.exception("Error updating presence")
-
     @defer.inlineCallbacks
     def _update_states(self, new_states):
         """Updates presence of users. Sets the appropriate timeouts. Pokes
@@ -338,45 +343,41 @@ class PresenceHandler(object):
         logger.info("Handling presence timeouts")
         now = self.clock.time_msec()
 
-        try:
-            with Measure(self.clock, "presence_handle_timeouts"):
-                # Fetch the list of users that *may* have timed out. Things may have
-                # changed since the timeout was set, so we won't necessarily have to
-                # take any action.
-                users_to_check = set(self.wheel_timer.fetch(now))
-
-                # Check whether the lists of syncing processes from an external
-                # process have expired.
-                expired_process_ids = [
-                    process_id for process_id, last_update
-                    in self.external_process_last_updated_ms.items()
-                    if now - last_update > EXTERNAL_PROCESS_EXPIRY
-                ]
-                for process_id in expired_process_ids:
-                    users_to_check.update(
-                        self.external_process_last_updated_ms.pop(process_id, ())
-                    )
-                    self.external_process_last_update.pop(process_id)
+        # Fetch the list of users that *may* have timed out. Things may have
+        # changed since the timeout was set, so we won't necessarily have to
+        # take any action.
+        users_to_check = set(self.wheel_timer.fetch(now))
+
+        # Check whether the lists of syncing processes from an external
+        # process have expired.
+        expired_process_ids = [
+            process_id for process_id, last_update
+            in self.external_process_last_updated_ms.items()
+            if now - last_update > EXTERNAL_PROCESS_EXPIRY
+        ]
+        for process_id in expired_process_ids:
+            users_to_check.update(
+                self.external_process_last_updated_ms.pop(process_id, ())
+            )
+            self.external_process_last_update.pop(process_id)
 
-                states = [
-                    self.user_to_current_state.get(
-                        user_id, UserPresenceState.default(user_id)
-                    )
-                    for user_id in users_to_check
-                ]
+        states = [
+            self.user_to_current_state.get(
+                user_id, UserPresenceState.default(user_id)
+            )
+            for user_id in users_to_check
+        ]
 
-                timers_fired_counter.inc(len(states))
+        timers_fired_counter.inc(len(states))
 
-                changes = handle_timeouts(
-                    states,
-                    is_mine_fn=self.is_mine_id,
-                    syncing_user_ids=self.get_currently_syncing_users(),
-                    now=now,
-                )
+        changes = handle_timeouts(
+            states,
+            is_mine_fn=self.is_mine_id,
+            syncing_user_ids=self.get_currently_syncing_users(),
+            now=now,
+        )
 
-            run_in_background(self._update_states_and_catch_exception, changes)
-        except Exception:
-            logger.exception("Exception in _handle_timeouts loop")
+        return self._update_states(changes)
 
     @defer.inlineCallbacks
     def bump_presence_active_time(self, user):
@@ -828,14 +829,22 @@ class PresenceHandler(object):
             if typ != EventTypes.Member:
                 continue
 
-            event = yield self.store.get_event(event_id)
-            if event.content.get("membership") != Membership.JOIN:
+            if event_id is None:
+                # state has been deleted, so this is not a join. We only care about
+                # joins.
+                continue
+
+            event = yield self.store.get_event(event_id, allow_none=True)
+            if not event or event.content.get("membership") != Membership.JOIN:
                 # We only care about joins
                 continue
 
             if prev_event_id:
-                prev_event = yield self.store.get_event(prev_event_id)
-                if prev_event.content.get("membership") == Membership.JOIN:
+                prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
+                if (
+                    prev_event
+                    and prev_event.content.get("membership") == Membership.JOIN
+                ):
                     # Ignore changes to join events.
                     continue
 
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index a65c98ff5c..3e04233394 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -15,12 +15,15 @@
 
 import logging
 
+from six import raise_from
+
 from twisted.internet import defer
 
 from synapse.api.errors import (
     AuthError,
-    CodeMessageException,
     Codes,
+    HttpResponseException,
+    RequestSendFailed,
     StoreError,
     SynapseError,
 )
@@ -31,6 +34,9 @@ from ._base import BaseHandler
 
 logger = logging.getLogger(__name__)
 
+MAX_DISPLAYNAME_LEN = 100
+MAX_AVATAR_URL_LEN = 1000
+
 
 class BaseProfileHandler(BaseHandler):
     """Handles fetching and updating user profile information.
@@ -53,6 +59,7 @@ class BaseProfileHandler(BaseHandler):
     @defer.inlineCallbacks
     def get_profile(self, user_id):
         target_user = UserID.from_string(user_id)
+
         if self.hs.is_mine(target_user):
             try:
                 displayname = yield self.store.get_profile_displayname(
@@ -81,10 +88,10 @@ class BaseProfileHandler(BaseHandler):
                     ignore_backoff=True,
                 )
                 defer.returnValue(result)
-            except CodeMessageException as e:
-                if e.code != 404:
-                    logger.exception("Failed to get displayname")
-                raise
+            except RequestSendFailed as e:
+                raise_from(SynapseError(502, "Failed to fetch profile"), e)
+            except HttpResponseException as e:
+                raise e.to_synapse_error()
 
     @defer.inlineCallbacks
     def get_profile_from_cache(self, user_id):
@@ -138,10 +145,10 @@ class BaseProfileHandler(BaseHandler):
                     },
                     ignore_backoff=True,
                 )
-            except CodeMessageException as e:
-                if e.code != 404:
-                    logger.exception("Failed to get displayname")
-                raise
+            except RequestSendFailed as e:
+                raise_from(SynapseError(502, "Failed to fetch profile"), e)
+            except HttpResponseException as e:
+                raise e.to_synapse_error()
 
             defer.returnValue(result["displayname"])
 
@@ -161,6 +168,11 @@ class BaseProfileHandler(BaseHandler):
         if not by_admin and target_user != requester.user:
             raise AuthError(400, "Cannot set another user's displayname")
 
+        if len(new_displayname) > MAX_DISPLAYNAME_LEN:
+            raise SynapseError(
+                400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN, ),
+            )
+
         if new_displayname == '':
             new_displayname = None
 
@@ -199,10 +211,10 @@ class BaseProfileHandler(BaseHandler):
                     },
                     ignore_backoff=True,
                 )
-            except CodeMessageException as e:
-                if e.code != 404:
-                    logger.exception("Failed to get avatar_url")
-                raise
+            except RequestSendFailed as e:
+                raise_from(SynapseError(502, "Failed to fetch profile"), e)
+            except HttpResponseException as e:
+                raise e.to_synapse_error()
 
             defer.returnValue(result["avatar_url"])
 
@@ -216,6 +228,11 @@ class BaseProfileHandler(BaseHandler):
         if not by_admin and target_user != requester.user:
             raise AuthError(400, "Cannot set another user's avatar_url")
 
+        if len(new_avatar_url) > MAX_AVATAR_URL_LEN:
+            raise SynapseError(
+                400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN, ),
+            )
+
         yield self.store.set_profile_avatar_url(
             target_user.localpart, new_avatar_url
         )
@@ -283,6 +300,48 @@ class BaseProfileHandler(BaseHandler):
                     room_id, str(e)
                 )
 
+    @defer.inlineCallbacks
+    def check_profile_query_allowed(self, target_user, requester=None):
+        """Checks whether a profile query is allowed. If the
+        'require_auth_for_profile_requests' config flag is set to True and a
+        'requester' is provided, the query is only allowed if the two users
+        share a room.
+
+        Args:
+            target_user (UserID): The owner of the queried profile.
+            requester (None|UserID): The user querying for the profile.
+
+        Raises:
+            SynapseError(403): The two users share no room, or ne user couldn't
+                be found to be in any room the server is in, and therefore the query
+                is denied.
+        """
+        # Implementation of MSC1301: don't allow looking up profiles if the
+        # requester isn't in the same room as the target. We expect requester to
+        # be None when this function is called outside of a profile query, e.g.
+        # when building a membership event. In this case, we must allow the
+        # lookup.
+        if not self.hs.config.require_auth_for_profile_requests or not requester:
+            return
+
+        try:
+            requester_rooms = yield self.store.get_rooms_for_user(
+                requester.to_string()
+            )
+            target_user_rooms = yield self.store.get_rooms_for_user(
+                target_user.to_string(),
+            )
+
+            # Check if the room lists have no elements in common.
+            if requester_rooms.isdisjoint(target_user_rooms):
+                raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN)
+        except StoreError as e:
+            if e.code == 404:
+                # This likely means that one of the users doesn't exist,
+                # so we act as if we couldn't find the profile.
+                raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN)
+            raise
+
 
 class MasterProfileHandler(BaseProfileHandler):
     PROFILE_UPDATE_MS = 60 * 1000
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index a51d11a257..9a388ea013 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -19,7 +19,7 @@ import logging
 from twisted.internet import defer
 
 from synapse import types
-from synapse.api.constants import LoginType
+from synapse.api.constants import MAX_USERID_LENGTH, LoginType
 from synapse.api.errors import (
     AuthError,
     Codes,
@@ -123,6 +123,15 @@ class RegistrationHandler(BaseHandler):
 
         self.check_user_id_not_appservice_exclusive(user_id)
 
+        if len(user_id) > MAX_USERID_LENGTH:
+            raise SynapseError(
+                400,
+                "User ID may not be longer than %s characters" % (
+                    MAX_USERID_LENGTH,
+                ),
+                Codes.INVALID_USERNAME
+            )
+
         users = yield self.store.get_users_by_id_case_insensitive(user_id)
         if users:
             if not guest_access_token:
@@ -522,6 +531,8 @@ class RegistrationHandler(BaseHandler):
             A tuple of (user_id, access_token).
         Raises:
             RegistrationError if there was a problem registering.
+
+        NB this is only used in tests. TODO: move it to the test package!
         """
         if localpart is None:
             raise SynapseError(400, "Request must include user id")
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 17628e2684..74793bab33 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -27,7 +27,7 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
 from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
-from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.storage.state import StateFilter
 from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
 from synapse.util import stringutils
@@ -70,10 +70,15 @@ class RoomCreationHandler(BaseHandler):
         self.spam_checker = hs.get_spam_checker()
         self.event_creation_handler = hs.get_event_creation_handler()
         self.room_member_handler = hs.get_room_member_handler()
+        self.config = hs.config
 
         # linearizer to stop two upgrades happening at once
         self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
 
+        self._server_notices_mxid = hs.config.server_notices_mxid
+
+        self.third_party_event_rules = hs.get_third_party_event_rules()
+
     @defer.inlineCallbacks
     def upgrade_room(self, requester, old_room_id, new_version):
         """Replace a room with a new room with a different version
@@ -402,7 +407,7 @@ class RoomCreationHandler(BaseHandler):
                 yield directory_handler.create_association(
                     requester, RoomAlias.from_string(alias),
                     new_room_id, servers=(self.hs.hostname, ),
-                    send_event=False,
+                    send_event=False, check_membership=False,
                 )
                 logger.info("Moved alias %s to new room", alias)
             except SynapseError as e:
@@ -469,13 +474,36 @@ class RoomCreationHandler(BaseHandler):
 
         yield self.auth.check_auth_blocking(user_id)
 
-        if not self.spam_checker.user_may_create_room(user_id):
+        if (self._server_notices_mxid is not None and
+                requester.user.to_string() == self._server_notices_mxid):
+            # allow the server notices mxid to create rooms
+            is_requester_admin = True
+        else:
+            is_requester_admin = yield self.auth.is_server_admin(
+                requester.user,
+            )
+
+        # Check whether the third party rules allows/changes the room create
+        # request.
+        yield self.third_party_event_rules.on_create_room(
+            requester,
+            config,
+            is_requester_admin=is_requester_admin,
+        )
+
+        if not is_requester_admin and not self.spam_checker.user_may_create_room(
+            user_id,
+        ):
             raise SynapseError(403, "You are not permitted to create rooms")
 
         if ratelimit:
             yield self.ratelimit(requester)
 
-        room_version = config.get("room_version", DEFAULT_ROOM_VERSION.identifier)
+        room_version = config.get(
+            "room_version",
+            self.config.default_room_version.identifier,
+        )
+
         if not isinstance(room_version, string_types):
             raise SynapseError(
                 400,
@@ -538,6 +566,7 @@ class RoomCreationHandler(BaseHandler):
                 room_alias=room_alias,
                 servers=[self.hs.hostname],
                 send_event=False,
+                check_membership=False,
             )
 
         preset_config = config.get(
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 024d6db27a..458902bb7e 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2016 OpenMarket Ltd
 # Copyright 2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -33,6 +34,8 @@ from synapse.types import RoomID, UserID
 from synapse.util.async_helpers import Linearizer
 from synapse.util.distributor import user_joined_room, user_left_room
 
+from ._base import BaseHandler
+
 logger = logging.getLogger(__name__)
 
 id_server_scheme = "https://"
@@ -69,8 +72,15 @@ class RoomMemberHandler(object):
 
         self.clock = hs.get_clock()
         self.spam_checker = hs.get_spam_checker()
+        self.third_party_event_rules = hs.get_third_party_event_rules()
         self._server_notices_mxid = self.config.server_notices_mxid
         self._enable_lookup = hs.config.enable_3pid_lookup
+        self.allow_per_room_profiles = self.config.allow_per_room_profiles
+
+        # This is only used to get at ratelimit function, and
+        # maybe_kick_guest_users. It's fine there are multiple of these as
+        # it doesn't store state.
+        self.base_handler = BaseHandler(hs)
 
     @abc.abstractmethod
     def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
@@ -350,6 +360,13 @@ class RoomMemberHandler(object):
             # later on.
             content = dict(content)
 
+        if not self.allow_per_room_profiles:
+            # Strip profile data, knowing that new profile data will be added to the
+            # event's content in event_creation_handler.create_event() using the target's
+            # global profile.
+            content.pop("displayname", None)
+            content.pop("avatar_url", None)
+
         effective_membership_state = action
         if action in ["kick", "unban"]:
             effective_membership_state = "leave"
@@ -703,6 +720,19 @@ class RoomMemberHandler(object):
                     Codes.FORBIDDEN,
                 )
 
+        # We need to rate limit *before* we send out any 3PID invites, so we
+        # can't just rely on the standard ratelimiting of events.
+        yield self.base_handler.ratelimit(requester)
+
+        can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited(
+            medium, address, room_id,
+        )
+        if not can_invite:
+            raise SynapseError(
+                403, "This third-party identifier can not be invited in this room",
+                Codes.FORBIDDEN,
+            )
+
         invitee = yield self._lookup_3pid(
             id_server, medium, address
         )
@@ -924,7 +954,7 @@ class RoomMemberHandler(object):
         }
 
         if self.config.invite_3pid_guest:
-            guest_access_token, guest_user_id = yield self.get_or_register_3pid_guest(
+            guest_user_id, guest_access_token = yield self.get_or_register_3pid_guest(
                 requester=requester,
                 medium=medium,
                 address=address,
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 49c439313e..9bba74d6c9 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -23,7 +23,6 @@ from twisted.internet import defer
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import SynapseError
 from synapse.api.filtering import Filter
-from synapse.events.utils import serialize_event
 from synapse.storage.state import StateFilter
 from synapse.visibility import filter_events_for_client
 
@@ -36,6 +35,7 @@ class SearchHandler(BaseHandler):
 
     def __init__(self, hs):
         super(SearchHandler, self).__init__(hs)
+        self._event_serializer = hs.get_event_client_serializer()
 
     @defer.inlineCallbacks
     def get_old_rooms_from_upgraded_room(self, room_id):
@@ -401,14 +401,16 @@ class SearchHandler(BaseHandler):
         time_now = self.clock.time_msec()
 
         for context in contexts.values():
-            context["events_before"] = [
-                serialize_event(e, time_now)
-                for e in context["events_before"]
-            ]
-            context["events_after"] = [
-                serialize_event(e, time_now)
-                for e in context["events_after"]
-            ]
+            context["events_before"] = (
+                yield self._event_serializer.serialize_events(
+                    context["events_before"], time_now,
+                )
+            )
+            context["events_after"] = (
+                yield self._event_serializer.serialize_events(
+                    context["events_after"], time_now,
+                )
+            )
 
         state_results = {}
         if include_state:
@@ -422,14 +424,13 @@ class SearchHandler(BaseHandler):
         # We're now about to serialize the events. We should not make any
         # blocking calls after this. Otherwise the 'age' will be wrong
 
-        results = [
-            {
+        results = []
+        for e in allowed_events:
+            results.append({
                 "rank": rank_map[e.event_id],
-                "result": serialize_event(e, time_now),
+                "result": (yield self._event_serializer.serialize_event(e, time_now)),
                 "context": contexts.get(e.event_id, {}),
-            }
-            for e in allowed_events
-        ]
+            })
 
         rooms_cat_res = {
             "results": results,
@@ -438,10 +439,13 @@ class SearchHandler(BaseHandler):
         }
 
         if state_results:
-            rooms_cat_res["state"] = {
-                room_id: [serialize_event(e, time_now) for e in state]
-                for room_id, state in state_results.items()
-            }
+            s = {}
+            for room_id, state in state_results.items():
+                s[room_id] = yield self._event_serializer.serialize_events(
+                    state, time_now,
+                )
+
+            rooms_cat_res["state"] = s
 
         if room_groups and "room_id" in group_keys:
             rooms_cat_res.setdefault("groups", {})["room_id"] = room_groups
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
new file mode 100644
index 0000000000..7ad16c8566
--- /dev/null
+++ b/synapse/handlers/stats.py
@@ -0,0 +1,333 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.handlers.state_deltas import StateDeltasHandler
+from synapse.metrics import event_processing_positions
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.types import UserID
+from synapse.util.metrics import Measure
+
+logger = logging.getLogger(__name__)
+
+
+class StatsHandler(StateDeltasHandler):
+    """Handles keeping the *_stats tables updated with a simple time-series of
+    information about the users, rooms and media on the server, such that admins
+    have some idea of who is consuming their resources.
+
+    Heavily derived from UserDirectoryHandler
+    """
+
+    def __init__(self, hs):
+        super(StatsHandler, self).__init__(hs)
+        self.hs = hs
+        self.store = hs.get_datastore()
+        self.state = hs.get_state_handler()
+        self.server_name = hs.hostname
+        self.clock = hs.get_clock()
+        self.notifier = hs.get_notifier()
+        self.is_mine_id = hs.is_mine_id
+        self.stats_bucket_size = hs.config.stats_bucket_size
+
+        # The current position in the current_state_delta stream
+        self.pos = None
+
+        # Guard to ensure we only process deltas one at a time
+        self._is_processing = False
+
+        if hs.config.stats_enabled:
+            self.notifier.add_replication_callback(self.notify_new_event)
+
+            # We kick this off so that we don't have to wait for a change before
+            # we start populating stats
+            self.clock.call_later(0, self.notify_new_event)
+
+    def notify_new_event(self):
+        """Called when there may be more deltas to process
+        """
+        if not self.hs.config.stats_enabled:
+            return
+
+        if self._is_processing:
+            return
+
+        @defer.inlineCallbacks
+        def process():
+            try:
+                yield self._unsafe_process()
+            finally:
+                self._is_processing = False
+
+        self._is_processing = True
+        run_as_background_process("stats.notify_new_event", process)
+
+    @defer.inlineCallbacks
+    def _unsafe_process(self):
+        # If self.pos is None then means we haven't fetched it from DB
+        if self.pos is None:
+            self.pos = yield self.store.get_stats_stream_pos()
+
+        # If still None then the initial background update hasn't happened yet
+        if self.pos is None:
+            defer.returnValue(None)
+
+        # Loop round handling deltas until we're up to date
+        while True:
+            with Measure(self.clock, "stats_delta"):
+                deltas = yield self.store.get_current_state_deltas(self.pos)
+                if not deltas:
+                    return
+
+                logger.info("Handling %d state deltas", len(deltas))
+                yield self._handle_deltas(deltas)
+
+                self.pos = deltas[-1]["stream_id"]
+                yield self.store.update_stats_stream_pos(self.pos)
+
+                event_processing_positions.labels("stats").set(self.pos)
+
+    @defer.inlineCallbacks
+    def _handle_deltas(self, deltas):
+        """
+        Called with the state deltas to process
+        """
+        for delta in deltas:
+            typ = delta["type"]
+            state_key = delta["state_key"]
+            room_id = delta["room_id"]
+            event_id = delta["event_id"]
+            stream_id = delta["stream_id"]
+            prev_event_id = delta["prev_event_id"]
+            stream_pos = delta["stream_id"]
+
+            logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
+
+            token = yield self.store.get_earliest_token_for_room_stats(room_id)
+
+            # If the earliest token to begin from is larger than our current
+            # stream ID, skip processing this delta.
+            if token is not None and token >= stream_id:
+                logger.debug(
+                    "Ignoring: %s as earlier than this room's initial ingestion event",
+                    event_id,
+                )
+                continue
+
+            if event_id is None and prev_event_id is None:
+                # Errr...
+                continue
+
+            event_content = {}
+
+            if event_id is not None:
+                event = yield self.store.get_event(event_id, allow_none=True)
+                if event:
+                    event_content = event.content or {}
+
+            # We use stream_pos here rather than fetch by event_id as event_id
+            # may be None
+            now = yield self.store.get_received_ts_by_stream_pos(stream_pos)
+
+            # quantise time to the nearest bucket
+            now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size
+
+            if typ == EventTypes.Member:
+                # we could use _get_key_change here but it's a bit inefficient
+                # given we're not testing for a specific result; might as well
+                # just grab the prev_membership and membership strings and
+                # compare them.
+                prev_event_content = {}
+                if prev_event_id is not None:
+                    prev_event = yield self.store.get_event(
+                        prev_event_id, allow_none=True,
+                    )
+                    if prev_event:
+                        prev_event_content = prev_event.content
+
+                membership = event_content.get("membership", Membership.LEAVE)
+                prev_membership = prev_event_content.get("membership", Membership.LEAVE)
+
+                if prev_membership == membership:
+                    continue
+
+                if prev_membership == Membership.JOIN:
+                    yield self.store.update_stats_delta(
+                        now, "room", room_id, "joined_members", -1
+                    )
+                elif prev_membership == Membership.INVITE:
+                    yield self.store.update_stats_delta(
+                        now, "room", room_id, "invited_members", -1
+                    )
+                elif prev_membership == Membership.LEAVE:
+                    yield self.store.update_stats_delta(
+                        now, "room", room_id, "left_members", -1
+                    )
+                elif prev_membership == Membership.BAN:
+                    yield self.store.update_stats_delta(
+                        now, "room", room_id, "banned_members", -1
+                    )
+                else:
+                    err = "%s is not a valid prev_membership" % (repr(prev_membership),)
+                    logger.error(err)
+                    raise ValueError(err)
+
+                if membership == Membership.JOIN:
+                    yield self.store.update_stats_delta(
+                        now, "room", room_id, "joined_members", +1
+                    )
+                elif membership == Membership.INVITE:
+                    yield self.store.update_stats_delta(
+                        now, "room", room_id, "invited_members", +1
+                    )
+                elif membership == Membership.LEAVE:
+                    yield self.store.update_stats_delta(
+                        now, "room", room_id, "left_members", +1
+                    )
+                elif membership == Membership.BAN:
+                    yield self.store.update_stats_delta(
+                        now, "room", room_id, "banned_members", +1
+                    )
+                else:
+                    err = "%s is not a valid membership" % (repr(membership),)
+                    logger.error(err)
+                    raise ValueError(err)
+
+                user_id = state_key
+                if self.is_mine_id(user_id):
+                    # update user_stats as it's one of our users
+                    public = yield self._is_public_room(room_id)
+
+                    if membership == Membership.LEAVE:
+                        yield self.store.update_stats_delta(
+                            now,
+                            "user",
+                            user_id,
+                            "public_rooms" if public else "private_rooms",
+                            -1,
+                        )
+                    elif membership == Membership.JOIN:
+                        yield self.store.update_stats_delta(
+                            now,
+                            "user",
+                            user_id,
+                            "public_rooms" if public else "private_rooms",
+                            +1,
+                        )
+
+            elif typ == EventTypes.Create:
+                # Newly created room. Add it with all blank portions.
+                yield self.store.update_room_state(
+                    room_id,
+                    {
+                        "join_rules": None,
+                        "history_visibility": None,
+                        "encryption": None,
+                        "name": None,
+                        "topic": None,
+                        "avatar": None,
+                        "canonical_alias": None,
+                    },
+                )
+
+            elif typ == EventTypes.JoinRules:
+                yield self.store.update_room_state(
+                    room_id, {"join_rules": event_content.get("join_rule")}
+                )
+
+                is_public = yield self._get_key_change(
+                    prev_event_id, event_id, "join_rule", JoinRules.PUBLIC
+                )
+                if is_public is not None:
+                    yield self.update_public_room_stats(now, room_id, is_public)
+
+            elif typ == EventTypes.RoomHistoryVisibility:
+                yield self.store.update_room_state(
+                    room_id,
+                    {"history_visibility": event_content.get("history_visibility")},
+                )
+
+                is_public = yield self._get_key_change(
+                    prev_event_id, event_id, "history_visibility", "world_readable"
+                )
+                if is_public is not None:
+                    yield self.update_public_room_stats(now, room_id, is_public)
+
+            elif typ == EventTypes.Encryption:
+                yield self.store.update_room_state(
+                    room_id, {"encryption": event_content.get("algorithm")}
+                )
+            elif typ == EventTypes.Name:
+                yield self.store.update_room_state(
+                    room_id, {"name": event_content.get("name")}
+                )
+            elif typ == EventTypes.Topic:
+                yield self.store.update_room_state(
+                    room_id, {"topic": event_content.get("topic")}
+                )
+            elif typ == EventTypes.RoomAvatar:
+                yield self.store.update_room_state(
+                    room_id, {"avatar": event_content.get("url")}
+                )
+            elif typ == EventTypes.CanonicalAlias:
+                yield self.store.update_room_state(
+                    room_id, {"canonical_alias": event_content.get("alias")}
+                )
+
+    @defer.inlineCallbacks
+    def update_public_room_stats(self, ts, room_id, is_public):
+        """
+        Increment/decrement a user's number of public rooms when a room they are
+        in changes to/from public visibility.
+
+        Args:
+            ts (int): Timestamp in seconds
+            room_id (str)
+            is_public (bool)
+        """
+        # For now, blindly iterate over all local users in the room so that
+        # we can handle the whole problem of copying buckets over as needed
+        user_ids = yield self.store.get_users_in_room(room_id)
+
+        for user_id in user_ids:
+            if self.hs.is_mine(UserID.from_string(user_id)):
+                yield self.store.update_stats_delta(
+                    ts, "user", user_id, "public_rooms", +1 if is_public else -1
+                )
+                yield self.store.update_stats_delta(
+                    ts, "user", user_id, "private_rooms", -1 if is_public else +1
+                )
+
+    @defer.inlineCallbacks
+    def _is_public_room(self, room_id):
+        join_rules = yield self.state.get_current_state(room_id, EventTypes.JoinRules)
+        history_visibility = yield self.state.get_current_state(
+            room_id, EventTypes.RoomHistoryVisibility
+        )
+
+        if (join_rules and join_rules.content.get("join_rule") == JoinRules.PUBLIC) or (
+            (
+                history_visibility
+                and history_visibility.content.get("history_visibility")
+                == "world_readable"
+            )
+        ):
+            defer.returnValue(True)
+        else:
+            defer.returnValue(False)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 153312e39f..62fda0c664 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -583,30 +583,42 @@ class SyncHandler(object):
         )
 
         # if the room has a name or canonical_alias set, we can skip
-        # calculating heroes.  we assume that if the event has contents, it'll
-        # be a valid name or canonical_alias - i.e. we're checking that they
-        # haven't been "deleted" by blatting {} over the top.
+        # calculating heroes. Empty strings are falsey, so we check
+        # for the "name" value and default to an empty string.
         if name_id:
             name = yield self.store.get_event(name_id, allow_none=True)
-            if name and name.content:
+            if name and name.content.get("name"):
                 defer.returnValue(summary)
 
         if canonical_alias_id:
             canonical_alias = yield self.store.get_event(
                 canonical_alias_id, allow_none=True,
             )
-            if canonical_alias and canonical_alias.content:
+            if canonical_alias and canonical_alias.content.get("alias"):
                 defer.returnValue(summary)
 
+        me = sync_config.user.to_string()
+
         joined_user_ids = [
-            r[0] for r in details.get(Membership.JOIN, empty_ms).members
+            r[0]
+            for r in details.get(Membership.JOIN, empty_ms).members
+            if r[0] != me
         ]
         invited_user_ids = [
-            r[0] for r in details.get(Membership.INVITE, empty_ms).members
+            r[0]
+            for r in details.get(Membership.INVITE, empty_ms).members
+            if r[0] != me
         ]
         gone_user_ids = (
-            [r[0] for r in details.get(Membership.LEAVE, empty_ms).members] +
-            [r[0] for r in details.get(Membership.BAN, empty_ms).members]
+            [
+                r[0]
+                for r in details.get(Membership.LEAVE, empty_ms).members
+                if r[0] != me
+            ] + [
+                r[0]
+                for r in details.get(Membership.BAN, empty_ms).members
+                if r[0] != me
+            ]
         )
 
         # FIXME: only build up a member_ids list for our heroes
@@ -621,22 +633,13 @@ class SyncHandler(object):
                 member_ids[user_id] = event_id
 
         # FIXME: order by stream ordering rather than as returned by SQL
-        me = sync_config.user.to_string()
         if (joined_user_ids or invited_user_ids):
             summary['m.heroes'] = sorted(
-                [
-                    user_id
-                    for user_id in (joined_user_ids + invited_user_ids)
-                    if user_id != me
-                ]
+                [user_id for user_id in (joined_user_ids + invited_user_ids)]
             )[0:5]
         else:
             summary['m.heroes'] = sorted(
-                [
-                    user_id
-                    for user_id in gone_user_ids
-                    if user_id != me
-                ]
+                [user_id for user_id in gone_user_ids]
             )[0:5]
 
         if not sync_config.filter_collection.lazy_load_members():
@@ -934,7 +937,7 @@ class SyncHandler(object):
         res = yield self._generate_sync_entry_for_rooms(
             sync_result_builder, account_data_by_room
         )
-        newly_joined_rooms, newly_joined_users, _, _ = res
+        newly_joined_rooms, newly_joined_or_invited_users, _, _ = res
         _, _, newly_left_rooms, newly_left_users = res
 
         block_all_presence_data = (
@@ -943,7 +946,7 @@ class SyncHandler(object):
         )
         if self.hs_config.use_presence and not block_all_presence_data:
             yield self._generate_sync_entry_for_presence(
-                sync_result_builder, newly_joined_rooms, newly_joined_users
+                sync_result_builder, newly_joined_rooms, newly_joined_or_invited_users
             )
 
         yield self._generate_sync_entry_for_to_device(sync_result_builder)
@@ -951,7 +954,7 @@ class SyncHandler(object):
         device_lists = yield self._generate_sync_entry_for_device_list(
             sync_result_builder,
             newly_joined_rooms=newly_joined_rooms,
-            newly_joined_users=newly_joined_users,
+            newly_joined_or_invited_users=newly_joined_or_invited_users,
             newly_left_rooms=newly_left_rooms,
             newly_left_users=newly_left_users,
         )
@@ -1036,7 +1039,8 @@ class SyncHandler(object):
     @measure_func("_generate_sync_entry_for_device_list")
     @defer.inlineCallbacks
     def _generate_sync_entry_for_device_list(self, sync_result_builder,
-                                             newly_joined_rooms, newly_joined_users,
+                                             newly_joined_rooms,
+                                             newly_joined_or_invited_users,
                                              newly_left_rooms, newly_left_users):
         user_id = sync_result_builder.sync_config.user.to_string()
         since_token = sync_result_builder.since_token
@@ -1050,7 +1054,7 @@ class SyncHandler(object):
             # share a room with?
             for room_id in newly_joined_rooms:
                 joined_users = yield self.state.get_current_users_in_room(room_id)
-                newly_joined_users.update(joined_users)
+                newly_joined_or_invited_users.update(joined_users)
 
             for room_id in newly_left_rooms:
                 left_users = yield self.state.get_current_users_in_room(room_id)
@@ -1058,7 +1062,7 @@ class SyncHandler(object):
 
             # TODO: Check that these users are actually new, i.e. either they
             # weren't in the previous sync *or* they left and rejoined.
-            changed.update(newly_joined_users)
+            changed.update(newly_joined_or_invited_users)
 
             if not changed and not newly_left_users:
                 defer.returnValue(DeviceLists(
@@ -1176,7 +1180,7 @@ class SyncHandler(object):
 
     @defer.inlineCallbacks
     def _generate_sync_entry_for_presence(self, sync_result_builder, newly_joined_rooms,
-                                          newly_joined_users):
+                                          newly_joined_or_invited_users):
         """Generates the presence portion of the sync response. Populates the
         `sync_result_builder` with the result.
 
@@ -1184,8 +1188,9 @@ class SyncHandler(object):
             sync_result_builder(SyncResultBuilder)
             newly_joined_rooms(list): List of rooms that the user has joined
                 since the last sync (or empty if an initial sync)
-            newly_joined_users(list): List of users that have joined rooms
-                since the last sync (or empty if an initial sync)
+            newly_joined_or_invited_users(list): List of users that have joined
+                or been invited to rooms since the last sync (or empty if an initial
+                sync)
         """
         now_token = sync_result_builder.now_token
         sync_config = sync_result_builder.sync_config
@@ -1211,7 +1216,7 @@ class SyncHandler(object):
             "presence_key", presence_key
         )
 
-        extra_users_ids = set(newly_joined_users)
+        extra_users_ids = set(newly_joined_or_invited_users)
         for room_id in newly_joined_rooms:
             users = yield self.state.get_current_users_in_room(room_id)
             extra_users_ids.update(users)
@@ -1243,7 +1248,8 @@ class SyncHandler(object):
 
         Returns:
             Deferred(tuple): Returns a 4-tuple of
-            `(newly_joined_rooms, newly_joined_users, newly_left_rooms, newly_left_users)`
+            `(newly_joined_rooms, newly_joined_or_invited_users,
+            newly_left_rooms, newly_left_users)`
         """
         user_id = sync_result_builder.sync_config.user.to_string()
         block_all_room_ephemeral = (
@@ -1314,8 +1320,8 @@ class SyncHandler(object):
 
         sync_result_builder.invited.extend(invited)
 
-        # Now we want to get any newly joined users
-        newly_joined_users = set()
+        # Now we want to get any newly joined or invited users
+        newly_joined_or_invited_users = set()
         newly_left_users = set()
         if since_token:
             for joined_sync in sync_result_builder.joined:
@@ -1324,19 +1330,22 @@ class SyncHandler(object):
                 )
                 for event in it:
                     if event.type == EventTypes.Member:
-                        if event.membership == Membership.JOIN:
-                            newly_joined_users.add(event.state_key)
+                        if (
+                            event.membership == Membership.JOIN or
+                            event.membership == Membership.INVITE
+                        ):
+                            newly_joined_or_invited_users.add(event.state_key)
                         else:
                             prev_content = event.unsigned.get("prev_content", {})
                             prev_membership = prev_content.get("membership", None)
                             if prev_membership == Membership.JOIN:
                                 newly_left_users.add(event.state_key)
 
-        newly_left_users -= newly_joined_users
+        newly_left_users -= newly_joined_or_invited_users
 
         defer.returnValue((
             newly_joined_rooms,
-            newly_joined_users,
+            newly_joined_or_invited_users,
             newly_left_rooms,
             newly_left_users,
         ))
@@ -1381,7 +1390,7 @@ class SyncHandler(object):
             where:
                 room_entries is a list [RoomSyncResultBuilder]
                 invited_rooms is a list [InvitedSyncResult]
-                newly_joined rooms is a list[str] of room ids
+                newly_joined_rooms is a list[str] of room ids
                 newly_left_rooms is a list[str] of room ids
         """
         user_id = sync_result_builder.sync_config.user.to_string()
@@ -1422,7 +1431,7 @@ class SyncHandler(object):
             if room_id in sync_result_builder.joined_room_ids and non_joins:
                 # Always include if the user (re)joined the room, especially
                 # important so that device list changes are calculated correctly.
-                # If there are non join member events, but we are still in the room,
+                # If there are non-join member events, but we are still in the room,
                 # then the user must have left and joined
                 newly_joined_rooms.append(room_id)
 
diff --git a/synapse/http/client.py b/synapse/http/client.py
index ad454f4964..5c073fff07 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -17,7 +17,7 @@
 import logging
 from io import BytesIO
 
-from six import text_type
+from six import raise_from, text_type
 from six.moves import urllib
 
 import treq
@@ -90,45 +90,50 @@ class IPBlacklistingResolver(object):
     def resolveHostName(self, recv, hostname, portNumber=0):
 
         r = recv()
-        d = defer.Deferred()
         addresses = []
 
-        @provider(IResolutionReceiver)
-        class EndpointReceiver(object):
-            @staticmethod
-            def resolutionBegan(resolutionInProgress):
-                pass
+        def _callback():
+            r.resolutionBegan(None)
 
-            @staticmethod
-            def addressResolved(address):
-                ip_address = IPAddress(address.host)
+            has_bad_ip = False
+            for i in addresses:
+                ip_address = IPAddress(i.host)
 
                 if check_against_blacklist(
                     ip_address, self._ip_whitelist, self._ip_blacklist
                 ):
                     logger.info(
-                        "Dropped %s from DNS resolution to %s" % (ip_address, hostname)
+                        "Dropped %s from DNS resolution to %s due to blacklist" %
+                        (ip_address, hostname)
                     )
-                    raise SynapseError(403, "IP address blocked by IP blacklist entry")
+                    has_bad_ip = True
+
+            # if we have a blacklisted IP, we'd like to raise an error to block the
+            # request, but all we can really do from here is claim that there were no
+            # valid results.
+            if not has_bad_ip:
+                for i in addresses:
+                    r.addressResolved(i)
+            r.resolutionComplete()
+
+        @provider(IResolutionReceiver)
+        class EndpointReceiver(object):
+            @staticmethod
+            def resolutionBegan(resolutionInProgress):
+                pass
 
+            @staticmethod
+            def addressResolved(address):
                 addresses.append(address)
 
             @staticmethod
             def resolutionComplete():
-                d.callback(addresses)
+                _callback()
 
         self._reactor.nameResolver.resolveHostName(
             EndpointReceiver, hostname, portNumber=portNumber
         )
 
-        def _callback(addrs):
-            r.resolutionBegan(None)
-            for i in addrs:
-                r.addressResolved(i)
-            r.resolutionComplete()
-
-        d.addCallback(_callback)
-
         return r
 
 
@@ -160,7 +165,8 @@ class BlacklistingAgentWrapper(Agent):
                 ip_address, self._ip_whitelist, self._ip_blacklist
             ):
                 logger.info(
-                    "Blocking access to %s because of blacklist" % (ip_address,)
+                    "Blocking access to %s due to blacklist" %
+                    (ip_address,)
                 )
                 e = SynapseError(403, "IP address blocked by IP blacklist entry")
                 return defer.fail(Failure(e))
@@ -258,9 +264,6 @@ class SimpleHttpClient(object):
             uri (str): URI to query.
             data (bytes): Data to send in the request body, if applicable.
             headers (t.w.http_headers.Headers): Request headers.
-
-        Raises:
-            SynapseError: If the IP is blacklisted.
         """
         # A small wrapper around self.agent.request() so we can easily attach
         # counters to it
@@ -539,10 +542,15 @@ class SimpleHttpClient(object):
             length = yield make_deferred_yieldable(
                 _readBodyToFile(response, output_stream, max_size)
             )
+        except SynapseError:
+            # This can happen e.g. because the body is too large.
+            raise
         except Exception as e:
-            logger.exception("Failed to download body")
-            raise SynapseError(
-                502, ("Failed to download remote body: %s" % e), Codes.UNKNOWN
+            raise_from(
+                SynapseError(
+                    502, ("Failed to download remote body: %s" % e),
+                ),
+                e
             )
 
         defer.returnValue(
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 1334c630cc..b4cbe97b41 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -149,7 +149,7 @@ class MatrixFederationAgent(object):
             tls_options = None
         else:
             tls_options = self._tls_client_options_factory.get_options(
-                res.tls_server_name.decode("ascii")
+                res.tls_server_name.decode("ascii"),
             )
 
         # make sure that the Host header is set correctly
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index ff63d0b2a8..663ea72a7a 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -27,9 +27,11 @@ import treq
 from canonicaljson import encode_canonical_json
 from prometheus_client import Counter
 from signedjson.sign import sign_json
+from zope.interface import implementer
 
 from twisted.internet import defer, protocol
 from twisted.internet.error import DNSLookupError
+from twisted.internet.interfaces import IReactorPluggableNameResolver
 from twisted.internet.task import _EPSILON, Cooperator
 from twisted.web._newclient import ResponseDone
 from twisted.web.http_headers import Headers
@@ -44,6 +46,7 @@ from synapse.api.errors import (
     SynapseError,
 )
 from synapse.http import QuieterFileBodyProducer
+from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver
 from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
 from synapse.util.async_helpers import timeout_deferred
 from synapse.util.logcontext import make_deferred_yieldable
@@ -172,19 +175,44 @@ class MatrixFederationHttpClient(object):
         self.hs = hs
         self.signing_key = hs.config.signing_key[0]
         self.server_name = hs.hostname
-        reactor = hs.get_reactor()
+
+        real_reactor = hs.get_reactor()
+
+        # We need to use a DNS resolver which filters out blacklisted IP
+        # addresses, to prevent DNS rebinding.
+        nameResolver = IPBlacklistingResolver(
+            real_reactor, None, hs.config.federation_ip_range_blacklist,
+        )
+
+        @implementer(IReactorPluggableNameResolver)
+        class Reactor(object):
+            def __getattr__(_self, attr):
+                if attr == "nameResolver":
+                    return nameResolver
+                else:
+                    return getattr(real_reactor, attr)
+
+        self.reactor = Reactor()
 
         self.agent = MatrixFederationAgent(
-            hs.get_reactor(),
+            self.reactor,
             tls_client_options_factory,
         )
+
+        # Use a BlacklistingAgentWrapper to prevent circumventing the IP
+        # blacklist via IP literals in server names
+        self.agent = BlacklistingAgentWrapper(
+            self.agent, self.reactor,
+            ip_blacklist=hs.config.federation_ip_range_blacklist,
+        )
+
         self.clock = hs.get_clock()
         self._store = hs.get_datastore()
         self.version_string_bytes = hs.version_string.encode('ascii')
         self.default_timeout = 60
 
         def schedule(x):
-            reactor.callLater(_EPSILON, x)
+            self.reactor.callLater(_EPSILON, x)
 
         self._cooperator = Cooperator(scheduler=schedule)
 
@@ -257,7 +285,24 @@ class MatrixFederationHttpClient(object):
             request (MatrixFederationRequest): details of request to be sent
 
             timeout (int|None): number of milliseconds to wait for the response headers
-                (including connecting to the server). 60s by default.
+                (including connecting to the server), *for each attempt*.
+                60s by default.
+
+            long_retries (bool): whether to use the long retry algorithm.
+
+                The regular retry algorithm makes 4 attempts, with intervals
+                [0.5s, 1s, 2s].
+
+                The long retry algorithm makes 11 attempts, with intervals
+                [4s, 16s, 60s, 60s, ...]
+
+                Both algorithms add -20%/+40% jitter to the retry intervals.
+
+                Note that the above intervals are *in addition* to the time spent
+                waiting for the request to complete (up to `timeout` ms).
+
+                NB: the long retry algorithm takes over 20 minutes to complete, with
+                a default timeout of 60s!
 
             ignore_backoff (bool): true to ignore the historical backoff data
                 and try the request anyway.
@@ -370,7 +415,7 @@ class MatrixFederationHttpClient(object):
                             request_deferred = timeout_deferred(
                                 request_deferred,
                                 timeout=_sec_timeout,
-                                reactor=self.hs.get_reactor(),
+                                reactor=self.reactor,
                             )
 
                             response = yield request_deferred
@@ -397,7 +442,7 @@ class MatrixFederationHttpClient(object):
                         d = timeout_deferred(
                             d,
                             timeout=_sec_timeout,
-                            reactor=self.hs.get_reactor(),
+                            reactor=self.reactor,
                         )
 
                         try:
@@ -538,10 +583,14 @@ class MatrixFederationHttpClient(object):
                 the request body. This will be encoded as JSON.
             json_data_callback (callable): A callable returning the dict to
                 use as the request body.
-            long_retries (bool): A boolean that indicates whether we should
-                retry for a short or long time.
-            timeout(int): How long to try (in ms) the destination for before
-                giving up. None indicates no timeout.
+
+            long_retries (bool): whether to use the long retry algorithm. See
+                docs on _send_request for details.
+
+            timeout (int|None): number of milliseconds to wait for the response headers
+                (including connecting to the server), *for each attempt*.
+                self._default_timeout (60s) by default.
+
             ignore_backoff (bool): true to ignore the historical backoff data
                 and try the request anyway.
             backoff_on_404 (bool): True if we should count a 404 response as
@@ -586,7 +635,7 @@ class MatrixFederationHttpClient(object):
         )
 
         body = yield _handle_json_response(
-            self.hs.get_reactor(), self.default_timeout, request, response,
+            self.reactor, self.default_timeout, request, response,
         )
 
         defer.returnValue(body)
@@ -599,15 +648,22 @@ class MatrixFederationHttpClient(object):
         Args:
             destination (str): The remote server to send the HTTP request
                 to.
+
             path (str): The HTTP path.
+
             data (dict): A dict containing the data that will be used as
                 the request body. This will be encoded as JSON.
-            long_retries (bool): A boolean that indicates whether we should
-                retry for a short or long time.
-            timeout(int): How long to try (in ms) the destination for before
-                giving up. None indicates no timeout.
+
+            long_retries (bool): whether to use the long retry algorithm. See
+                docs on _send_request for details.
+
+            timeout (int|None): number of milliseconds to wait for the response headers
+                (including connecting to the server), *for each attempt*.
+                self._default_timeout (60s) by default.
+
             ignore_backoff (bool): true to ignore the historical backoff data and
                 try the request anyway.
+
             args (dict): query params
         Returns:
             Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
@@ -645,7 +701,7 @@ class MatrixFederationHttpClient(object):
             _sec_timeout = self.default_timeout
 
         body = yield _handle_json_response(
-            self.hs.get_reactor(), _sec_timeout, request, response,
+            self.reactor, _sec_timeout, request, response,
         )
         defer.returnValue(body)
 
@@ -658,14 +714,19 @@ class MatrixFederationHttpClient(object):
         Args:
             destination (str): The remote server to send the HTTP request
                 to.
+
             path (str): The HTTP path.
+
             args (dict|None): A dictionary used to create query strings, defaults to
                 None.
-            timeout (int): How long to try (in ms) the destination for before
-                giving up. None indicates no timeout and that the request will
-                be retried.
+
+            timeout (int|None): number of milliseconds to wait for the response headers
+                (including connecting to the server), *for each attempt*.
+                self._default_timeout (60s) by default.
+
             ignore_backoff (bool): true to ignore the historical backoff data
                 and try the request anyway.
+
             try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED
                 response we should try appending a trailing slash to the end of
                 the request. Workaround for #3622 in Synapse <= v0.99.3.
@@ -683,10 +744,6 @@ class MatrixFederationHttpClient(object):
             RequestSendFailed: If there were problems connecting to the
                 remote, due to e.g. DNS failures, connection timeouts etc.
         """
-        logger.debug("get_json args: %s", args)
-
-        logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail)
-
         request = MatrixFederationRequest(
             method="GET",
             destination=destination,
@@ -704,7 +761,7 @@ class MatrixFederationHttpClient(object):
         )
 
         body = yield _handle_json_response(
-            self.hs.get_reactor(), self.default_timeout, request, response,
+            self.reactor, self.default_timeout, request, response,
         )
 
         defer.returnValue(body)
@@ -718,12 +775,18 @@ class MatrixFederationHttpClient(object):
             destination (str): The remote server to send the HTTP request
                 to.
             path (str): The HTTP path.
-            long_retries (bool): A boolean that indicates whether we should
-                retry for a short or long time.
-            timeout(int): How long to try (in ms) the destination for before
-                giving up. None indicates no timeout.
+
+            long_retries (bool): whether to use the long retry algorithm. See
+                docs on _send_request for details.
+
+            timeout (int|None): number of milliseconds to wait for the response headers
+                (including connecting to the server), *for each attempt*.
+                self._default_timeout (60s) by default.
+
             ignore_backoff (bool): true to ignore the historical backoff data and
                 try the request anyway.
+
+            args (dict): query params
         Returns:
             Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
             result will be the decoded JSON body.
@@ -753,7 +816,7 @@ class MatrixFederationHttpClient(object):
         )
 
         body = yield _handle_json_response(
-            self.hs.get_reactor(), self.default_timeout, request, response,
+            self.reactor, self.default_timeout, request, response,
         )
         defer.returnValue(body)
 
@@ -801,7 +864,7 @@ class MatrixFederationHttpClient(object):
 
         try:
             d = _readBodyToFile(response, output_stream, max_size)
-            d.addTimeout(self.default_timeout, self.hs.get_reactor())
+            d.addTimeout(self.default_timeout, self.reactor)
             length = yield make_deferred_yieldable(d)
         except Exception as e:
             logger.warn(
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 528125e737..197c652850 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -55,7 +55,7 @@ def parse_integer_from_args(args, name, default=None, required=False):
             return int(args[name][0])
         except Exception:
             message = "Query parameter %r must be an integer" % (name,)
-            raise SynapseError(400, message)
+            raise SynapseError(400, message, errcode=Codes.INVALID_PARAM)
     else:
         if required:
             message = "Missing integer query parameter %r" % (name,)
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index ef48984fdd..8aee14a8a8 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -25,7 +25,7 @@ import six
 
 import attr
 from prometheus_client import Counter, Gauge, Histogram
-from prometheus_client.core import REGISTRY, GaugeMetricFamily
+from prometheus_client.core import REGISTRY, GaugeMetricFamily, HistogramMetricFamily
 
 from twisted.internet import reactor
 
@@ -40,7 +40,6 @@ HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
 
 
 class RegistryProxy(object):
-
     @staticmethod
     def collect():
         for metric in REGISTRY.collect():
@@ -63,10 +62,7 @@ class LaterGauge(object):
         try:
             calls = self.caller()
         except Exception:
-            logger.exception(
-                "Exception running callback for LaterGauge(%s)",
-                self.name,
-            )
+            logger.exception("Exception running callback for LaterGauge(%s)", self.name)
             yield g
             return
 
@@ -116,9 +112,7 @@ class InFlightGauge(object):
         # Create a class which have the sub_metrics values as attributes, which
         # default to 0 on initialization. Used to pass to registered callbacks.
         self._metrics_class = attr.make_class(
-            "_MetricsEntry",
-            attrs={x: attr.ib(0) for x in sub_metrics},
-            slots=True,
+            "_MetricsEntry", attrs={x: attr.ib(0) for x in sub_metrics}, slots=True
         )
 
         # Counts number of in flight blocks for a given set of label values
@@ -157,7 +151,9 @@ class InFlightGauge(object):
 
         Note: may be called by a separate thread.
         """
-        in_flight = GaugeMetricFamily(self.name + "_total", self.desc, labels=self.labels)
+        in_flight = GaugeMetricFamily(
+            self.name + "_total", self.desc, labels=self.labels
+        )
 
         metrics_by_key = {}
 
@@ -179,7 +175,9 @@ class InFlightGauge(object):
         yield in_flight
 
         for name in self.sub_metrics:
-            gauge = GaugeMetricFamily("_".join([self.name, name]), "", labels=self.labels)
+            gauge = GaugeMetricFamily(
+                "_".join([self.name, name]), "", labels=self.labels
+            )
             for key, metrics in six.iteritems(metrics_by_key):
                 gauge.add_metric(key, getattr(metrics, name))
             yield gauge
@@ -193,12 +191,74 @@ class InFlightGauge(object):
         all_gauges[self.name] = self
 
 
+@attr.s(hash=True)
+class BucketCollector(object):
+    """
+    Like a Histogram, but allows buckets to be point-in-time instead of
+    incrementally added to.
+
+    Args:
+        name (str): Base name of metric to be exported to Prometheus.
+        data_collector (callable -> dict): A synchronous callable that
+            returns a dict mapping bucket to number of items in the
+            bucket. If these buckets are not the same as the buckets
+            given to this class, they will be remapped into them.
+        buckets (list[float]): List of floats/ints of the buckets to
+            give to Prometheus. +Inf is ignored, if given.
+
+    """
+
+    name = attr.ib()
+    data_collector = attr.ib()
+    buckets = attr.ib()
+
+    def collect(self):
+
+        # Fetch the data -- this must be synchronous!
+        data = self.data_collector()
+
+        buckets = {}
+
+        res = []
+        for x in data.keys():
+            for i, bound in enumerate(self.buckets):
+                if x <= bound:
+                    buckets[bound] = buckets.get(bound, 0) + data[x]
+
+        for i in self.buckets:
+            res.append([str(i), buckets.get(i, 0)])
+
+        res.append(["+Inf", sum(data.values())])
+
+        metric = HistogramMetricFamily(
+            self.name,
+            "",
+            buckets=res,
+            sum_value=sum([x * y for x, y in data.items()]),
+        )
+        yield metric
+
+    def __attrs_post_init__(self):
+        self.buckets = [float(x) for x in self.buckets if x != "+Inf"]
+        if self.buckets != sorted(self.buckets):
+            raise ValueError("Buckets not sorted")
+
+        self.buckets = tuple(self.buckets)
+
+        if self.name in all_gauges.keys():
+            logger.warning("%s already registered, reregistering" % (self.name,))
+            REGISTRY.unregister(all_gauges.pop(self.name))
+
+        REGISTRY.register(self)
+        all_gauges[self.name] = self
+
+
 #
 # Detailed CPU metrics
 #
 
-class CPUMetrics(object):
 
+class CPUMetrics(object):
     def __init__(self):
         ticks_per_sec = 100
         try:
@@ -237,13 +297,28 @@ gc_time = Histogram(
     "python_gc_time",
     "Time taken to GC (sec)",
     ["gen"],
-    buckets=[0.0025, 0.005, 0.01, 0.025, 0.05, 0.10, 0.25, 0.50, 1.00, 2.50,
-             5.00, 7.50, 15.00, 30.00, 45.00, 60.00],
+    buckets=[
+        0.0025,
+        0.005,
+        0.01,
+        0.025,
+        0.05,
+        0.10,
+        0.25,
+        0.50,
+        1.00,
+        2.50,
+        5.00,
+        7.50,
+        15.00,
+        30.00,
+        45.00,
+        60.00,
+    ],
 )
 
 
 class GCCounts(object):
-
     def collect(self):
         cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"])
         for n, m in enumerate(gc.get_count()):
@@ -279,9 +354,7 @@ sent_transactions_counter = Counter("synapse_federation_client_sent_transactions
 events_processed_counter = Counter("synapse_federation_client_events_processed", "")
 
 event_processing_loop_counter = Counter(
-    "synapse_event_processing_loop_count",
-    "Event processing loop iterations",
-    ["name"],
+    "synapse_event_processing_loop_count", "Event processing loop iterations", ["name"]
 )
 
 event_processing_loop_room_count = Counter(
@@ -311,7 +384,6 @@ last_ticked = time.time()
 
 
 class ReactorLastSeenMetric(object):
-
     def collect(self):
         cm = GaugeMetricFamily(
             "python_twisted_reactor_last_seen",
@@ -325,7 +397,6 @@ REGISTRY.register(ReactorLastSeenMetric())
 
 
 def runUntilCurrentTimer(func):
-
     @functools.wraps(func)
     def f(*args, **kwargs):
         now = reactor.seconds()
diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
index 8f0682c948..3523a40108 100644
--- a/synapse/push/baserules.py
+++ b/synapse/push/baserules.py
@@ -261,6 +261,23 @@ BASE_APPEND_OVERRIDE_RULES = [
                 'value': True,
             }
         ]
+    },
+    {
+        'rule_id': 'global/override/.m.rule.tombstone',
+        'conditions': [
+            {
+                'kind': 'event_match',
+                'key': 'type',
+                'pattern': 'm.room.tombstone',
+                '_id': '_tombstone',
+            }
+        ],
+        'actions': [
+            'notify', {
+                'set_tweak': 'highlight',
+                'value': True,
+            }
+        ]
     }
 ]
 
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index e8ee67401f..c89a8438a9 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -114,6 +114,21 @@ class EmailPusher(object):
 
         run_as_background_process("emailpush.process", self._process)
 
+    def _pause_processing(self):
+        """Used by tests to temporarily pause processing of events.
+
+        Asserts that its not currently processing.
+        """
+        assert not self._is_processing
+        self._is_processing = True
+
+    def _resume_processing(self):
+        """Used by tests to resume processing of events after pausing.
+        """
+        assert self._is_processing
+        self._is_processing = False
+        self._start_processing()
+
     @defer.inlineCallbacks
     def _process(self):
         # we should never get here if we are already processing
@@ -215,6 +230,10 @@ class EmailPusher(object):
 
     @defer.inlineCallbacks
     def save_last_stream_ordering_and_success(self, last_stream_ordering):
+        if last_stream_ordering is None:
+            # This happens if we haven't yet processed anything
+            return
+
         self.last_stream_ordering = last_stream_ordering
         yield self.store.update_pusher_last_stream_ordering_and_success(
             self.app_id, self.email, self.user_id,
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 1eb5be0957..099f9545ab 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -80,10 +80,10 @@ ALLOWED_ATTRS = {
 
 
 class Mailer(object):
-    def __init__(self, hs, app_name, notif_template_html, notif_template_text):
+    def __init__(self, hs, app_name, template_html, template_text):
         self.hs = hs
-        self.notif_template_html = notif_template_html
-        self.notif_template_text = notif_template_text
+        self.template_html = template_html
+        self.template_text = template_text
 
         self.sendmail = self.hs.get_sendmail()
         self.store = self.hs.get_datastore()
@@ -94,21 +94,48 @@ class Mailer(object):
         logger.info("Created Mailer for app_name %s" % app_name)
 
     @defer.inlineCallbacks
-    def send_notification_mail(self, app_id, user_id, email_address,
-                               push_actions, reason):
-        try:
-            from_string = self.hs.config.email_notif_from % {
-                "app": self.app_name
-            }
-        except TypeError:
-            from_string = self.hs.config.email_notif_from
+    def send_password_reset_mail(
+        self,
+        email_address,
+        token,
+        client_secret,
+        sid,
+    ):
+        """Send an email with a password reset link to a user
+
+        Args:
+            email_address (str): Email address we're sending the password
+                reset to
+            token (str): Unique token generated by the server to verify
+                password reset email was received
+            client_secret (str): Unique token generated by the client to
+                group together multiple email sending attempts
+            sid (str): The generated session ID
+        """
+        if email.utils.parseaddr(email_address)[1] == '':
+            raise RuntimeError("Invalid 'to' email address")
+
+        link = (
+            self.hs.config.public_baseurl +
+            "_matrix/client/unstable/password_reset/email/submit_token"
+            "?token=%s&client_secret=%s&sid=%s" %
+            (token, client_secret, sid)
+        )
 
-        raw_from = email.utils.parseaddr(from_string)[1]
-        raw_to = email.utils.parseaddr(email_address)[1]
+        template_vars = {
+            "link": link,
+        }
 
-        if raw_to == '':
-            raise RuntimeError("Invalid 'to' address")
+        yield self.send_email(
+            email_address,
+            "[%s] Password Reset Email" % self.hs.config.server_name,
+            template_vars,
+        )
 
+    @defer.inlineCallbacks
+    def send_notification_mail(self, app_id, user_id, email_address,
+                               push_actions, reason):
+        """Send email regarding a user's room notifications"""
         rooms_in_order = deduped_ordered_list(
             [pa['room_id'] for pa in push_actions]
         )
@@ -176,14 +203,36 @@ class Mailer(object):
             "reason": reason,
         }
 
-        html_text = self.notif_template_html.render(**template_vars)
+        yield self.send_email(
+            email_address,
+            "[%s] %s" % (self.app_name, summary_text),
+            template_vars,
+        )
+
+    @defer.inlineCallbacks
+    def send_email(self, email_address, subject, template_vars):
+        """Send an email with the given information and template text"""
+        try:
+            from_string = self.hs.config.email_notif_from % {
+                "app": self.app_name
+            }
+        except TypeError:
+            from_string = self.hs.config.email_notif_from
+
+        raw_from = email.utils.parseaddr(from_string)[1]
+        raw_to = email.utils.parseaddr(email_address)[1]
+
+        if raw_to == '':
+            raise RuntimeError("Invalid 'to' address")
+
+        html_text = self.template_html.render(**template_vars)
         html_part = MIMEText(html_text, "html", "utf8")
 
-        plain_text = self.notif_template_text.render(**template_vars)
+        plain_text = self.template_text.render(**template_vars)
         text_part = MIMEText(plain_text, "plain", "utf8")
 
         multipart_msg = MIMEMultipart('alternative')
-        multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text)
+        multipart_msg['Subject'] = subject
         multipart_msg['From'] = from_string
         multipart_msg['To'] = email_address
         multipart_msg['Date'] = email.utils.formatdate()
@@ -521,11 +570,11 @@ def format_ts_filter(value, format):
     return time.strftime(format, time.localtime(value / 1000))
 
 
-def load_jinja2_templates(config):
+def load_jinja2_templates(config, template_html_name, template_text_name):
     """Load the jinja2 email templates from disk
 
     Returns:
-        (notif_template_html, notif_template_text)
+        (template_html, template_text)
     """
     logger.info("loading email templates from '%s'", config.email_template_dir)
     loader = jinja2.FileSystemLoader(config.email_template_dir)
@@ -533,14 +582,10 @@ def load_jinja2_templates(config):
     env.filters["format_ts"] = format_ts_filter
     env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config)
 
-    notif_template_html = env.get_template(
-        config.email_notif_template_html
-    )
-    notif_template_text = env.get_template(
-        config.email_notif_template_text
-    )
+    template_html = env.get_template(template_html_name)
+    template_text = env.get_template(template_text_name)
 
-    return notif_template_html, notif_template_text
+    return template_html, template_text
 
 
 def _create_mxc_to_http_filter(config):
diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py
index eef6e18c2e..0c66702325 100644
--- a/synapse/push/presentable_names.py
+++ b/synapse/push/presentable_names.py
@@ -162,6 +162,17 @@ def calculate_room_name(store, room_state_ids, user_id, fallback_to_members=True
 
 
 def descriptor_from_member_events(member_events):
+    """Get a description of the room based on the member events.
+
+    Args:
+        member_events (Iterable[FrozenEvent])
+
+    Returns:
+        str
+    """
+
+    member_events = list(member_events)
+
     if len(member_events) == 0:
         return "nobody"
     elif len(member_events) == 1:
diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py
index b33f2a357b..aff85daeb5 100644
--- a/synapse/push/pusher.py
+++ b/synapse/push/pusher.py
@@ -44,7 +44,11 @@ class PusherFactory(object):
         if hs.config.email_enable_notifs:
             self.mailers = {}  # app_name -> Mailer
 
-            templates = load_jinja2_templates(hs.config)
+            templates = load_jinja2_templates(
+                config=hs.config,
+                template_html_name=hs.config.email_notif_template_html,
+                template_text_name=hs.config.email_notif_template_text,
+            )
             self.notif_template_html, self.notif_template_text = templates
 
             self.pusher_types["email"] = self._create_email_pusher
@@ -66,8 +70,8 @@ class PusherFactory(object):
             mailer = Mailer(
                 hs=self.hs,
                 app_name=app_name,
-                notif_template_html=self.notif_template_html,
-                notif_template_text=self.notif_template_text,
+                template_html=self.notif_template_html,
+                template_text=self.notif_template_text,
             )
             self.mailers[app_name] = mailer
         return EmailPusher(self.hs, pusherdict, mailer)
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 40a7709c09..63c583565f 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -60,6 +60,11 @@ class PusherPool:
     def add_pusher(self, user_id, access_token, kind, app_id,
                    app_display_name, device_display_name, pushkey, lang, data,
                    profile_tag=""):
+        """Creates a new pusher and adds it to the pool
+
+        Returns:
+            Deferred[EmailPusher|HttpPusher]
+        """
         time_now_msec = self.clock.time_msec()
 
         # we try to create the pusher just to validate the config: it
@@ -103,7 +108,9 @@ class PusherPool:
             last_stream_ordering=last_stream_ordering,
             profile_tag=profile_tag,
         )
-        yield self.start_pusher_by_id(app_id, pushkey, user_id)
+        pusher = yield self.start_pusher_by_id(app_id, pushkey, user_id)
+
+        defer.returnValue(pusher)
 
     @defer.inlineCallbacks
     def remove_pushers_by_app_id_and_pushkey_not_user(self, app_id, pushkey,
@@ -184,7 +191,11 @@ class PusherPool:
 
     @defer.inlineCallbacks
     def start_pusher_by_id(self, app_id, pushkey, user_id):
-        """Look up the details for the given pusher, and start it"""
+        """Look up the details for the given pusher, and start it
+
+        Returns:
+            Deferred[EmailPusher|HttpPusher|None]: The pusher started, if any
+        """
         if not self._should_start_pushers:
             return
 
@@ -192,13 +203,16 @@ class PusherPool:
             app_id, pushkey
         )
 
-        p = None
+        pusher_dict = None
         for r in resultlist:
             if r['user_name'] == user_id:
-                p = r
+                pusher_dict = r
 
-        if p:
-            yield self._start_pusher(p)
+        pusher = None
+        if pusher_dict:
+            pusher = yield self._start_pusher(pusher_dict)
+
+        defer.returnValue(pusher)
 
     @defer.inlineCallbacks
     def _start_pushers(self):
@@ -224,7 +238,7 @@ class PusherPool:
             pusherdict (dict):
 
         Returns:
-            None
+            Deferred[EmailPusher|HttpPusher]
         """
         try:
             p = self.pusher_factory.create_pusher(pusherdict)
@@ -270,6 +284,8 @@ class PusherPool:
 
         p.on_started(have_notifs)
 
+        defer.returnValue(p)
+
     @defer.inlineCallbacks
     def remove_pusher(self, app_id, pushkey, user_id):
         appid_pushkey = "%s:%s" % (app_id, pushkey)
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index f71e21ff4d..11ace2bfb1 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -16,7 +16,12 @@
 
 import logging
 
-from pkg_resources import DistributionNotFound, VersionConflict, get_distribution
+from pkg_resources import (
+    DistributionNotFound,
+    Requirement,
+    VersionConflict,
+    get_provider,
+)
 
 logger = logging.getLogger(__name__)
 
@@ -39,7 +44,10 @@ REQUIREMENTS = [
     "canonicaljson>=1.1.3",
     "signedjson>=1.0.0",
     "pynacl>=1.2.1",
-    "service_identity>=16.0.0",
+    "idna>=2.5",
+
+    # validating SSL certs for IP addresses requires service_identity 18.1.
+    "service_identity>=18.1.0",
 
     # our logcontext handling relies on the ability to cancel inlineCallbacks
     # (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
@@ -53,11 +61,11 @@ REQUIREMENTS = [
     "pyasn1-modules>=0.0.7",
     "daemonize>=2.3.1",
     "bcrypt>=3.1.0",
-    "pillow>=3.1.2",
+    "pillow>=4.3.0",
     "sortedcontainers>=1.4.4",
     "psutil>=2.0.0",
     "pymacaroons>=0.13.0",
-    "msgpack>=0.5.0",
+    "msgpack>=0.5.2",
     "phonenumbers>=8.2.0",
     "six>=1.10",
     # prometheus_client 0.4.0 changed the format of counter metrics
@@ -72,30 +80,42 @@ REQUIREMENTS = [
 ]
 
 CONDITIONAL_REQUIREMENTS = {
-    "email.enable_notifs": ["Jinja2>=2.9", "bleach>=1.4.2"],
+    "email": ["Jinja2>=2.9", "bleach>=1.4.3"],
     "matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
-    "postgres": ["psycopg2>=2.6"],
+
+    # we use execute_batch, which arrived in psycopg 2.7.
+    "postgres": ["psycopg2>=2.7"],
 
     # ConsentResource uses select_autoescape, which arrived in jinja 2.9
     "resources.consent": ["Jinja2>=2.9"],
 
     # ACME support is required to provision TLS certificates from authorities
     # that use the protocol, such as Let's Encrypt.
-    "acme": ["txacme>=0.9.2"],
+    "acme": [
+        "txacme>=0.9.2",
+
+        # txacme depends on eliot. Eliot 1.8.0 is incompatible with
+        # python 3.5.2, as per https://github.com/itamarst/eliot/issues/418
+        'eliot<1.8.0;python_version<"3.5.3"',
+    ],
 
     "saml2": ["pysaml2>=4.5.0"],
+    "systemd": ["systemd-python>=231"],
     "url_preview": ["lxml>=3.5.0"],
     "test": ["mock>=2.0", "parameterized"],
     "sentry": ["sentry-sdk>=0.7.2"],
 }
 
+ALL_OPTIONAL_REQUIREMENTS = set()
+
+for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
+    # Exclude systemd as it's a system-based requirement.
+    if name not in ["systemd"]:
+        ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
 
-def list_requirements():
-    deps = set(REQUIREMENTS)
-    for opt in CONDITIONAL_REQUIREMENTS.values():
-        deps = set(opt) | deps
 
-    return list(deps)
+def list_requirements():
+    return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS)
 
 
 class DependencyException(Exception):
@@ -111,10 +131,10 @@ class DependencyException(Exception):
     @property
     def dependencies(self):
         for i in self.args[0]:
-            yield '"' + i + '"'
+            yield "'" + i + "'"
 
 
-def check_requirements(for_feature=None, _get_distribution=get_distribution):
+def check_requirements(for_feature=None):
     deps_needed = []
     errors = []
 
@@ -125,7 +145,7 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution):
 
     for dependency in reqs:
         try:
-            _get_distribution(dependency)
+            _check_requirement(dependency)
         except VersionConflict as e:
             deps_needed.append(dependency)
             errors.append(
@@ -143,7 +163,7 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution):
 
         for dependency in OPTS:
             try:
-                _get_distribution(dependency)
+                _check_requirement(dependency)
             except VersionConflict as e:
                 deps_needed.append(dependency)
                 errors.append(
@@ -161,6 +181,23 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution):
         raise DependencyException(deps_needed)
 
 
+def _check_requirement(dependency_string):
+    """Parses a dependency string, and checks if the specified requirement is installed
+
+    Raises:
+        VersionConflict if the requirement is installed, but with the the wrong version
+        DistributionNotFound if nothing is found to provide the requirement
+    """
+    req = Requirement.parse(dependency_string)
+
+    # first check if the markers specify that this requirement needs installing
+    if req.marker is not None and not req.marker.evaluate():
+        # not required for this environment
+        return
+
+    get_provider(req)
+
+
 if __name__ == "__main__":
     import sys
 
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index e81456ab2b..0a432a16fa 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -17,11 +17,17 @@ import abc
 import logging
 import re
 
+from six import raise_from
 from six.moves import urllib
 
 from twisted.internet import defer
 
-from synapse.api.errors import CodeMessageException, HttpResponseException
+from synapse.api.errors import (
+    CodeMessageException,
+    HttpResponseException,
+    RequestSendFailed,
+    SynapseError,
+)
 from synapse.util.caches.response_cache import ResponseCache
 from synapse.util.stringutils import random_string
 
@@ -175,6 +181,8 @@ class ReplicationEndpoint(object):
                 # on the master process that we should send to the client. (And
                 # importantly, not stack traces everywhere)
                 raise e.to_synapse_error()
+            except RequestSendFailed as e:
+                raise_from(SynapseError(502, "Failed to talk to master"), e)
 
             defer.returnValue(result)
 
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index b457c5563f..a3952506c1 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -23,6 +23,7 @@ from synapse.replication.tcp.streams.events import (
 from synapse.storage.event_federation import EventFederationWorkerStore
 from synapse.storage.event_push_actions import EventPushActionsWorkerStore
 from synapse.storage.events_worker import EventsWorkerStore
+from synapse.storage.relations import RelationsWorkerStore
 from synapse.storage.roommember import RoomMemberWorkerStore
 from synapse.storage.signatures import SignatureWorkerStore
 from synapse.storage.state import StateGroupWorkerStore
@@ -52,6 +53,7 @@ class SlavedEventStore(EventFederationWorkerStore,
                        EventsWorkerStore,
                        SignatureWorkerStore,
                        UserErasureWorkerStore,
+                       RelationsWorkerStore,
                        BaseSlavedStore):
 
     def __init__(self, db_conn, hs):
@@ -89,7 +91,7 @@ class SlavedEventStore(EventFederationWorkerStore,
             for row in rows:
                 self.invalidate_caches_for_event(
                     -token, row.event_id, row.room_id, row.type, row.state_key,
-                    row.redacts,
+                    row.redacts, row.relates_to,
                     backfilled=True,
                 )
         return super(SlavedEventStore, self).process_replication_rows(
@@ -102,7 +104,7 @@ class SlavedEventStore(EventFederationWorkerStore,
         if row.type == EventsStreamEventRow.TypeId:
             self.invalidate_caches_for_event(
                 token, data.event_id, data.room_id, data.type, data.state_key,
-                data.redacts,
+                data.redacts, data.relates_to,
                 backfilled=False,
             )
         elif row.type == EventsStreamCurrentStateRow.TypeId:
@@ -114,7 +116,8 @@ class SlavedEventStore(EventFederationWorkerStore,
             raise Exception("Unknown events stream row type %s" % (row.type, ))
 
     def invalidate_caches_for_event(self, stream_ordering, event_id, room_id,
-                                    etype, state_key, redacts, backfilled):
+                                    etype, state_key, redacts, relates_to,
+                                    backfilled):
         self._invalidate_get_event_cache(event_id)
 
         self.get_latest_event_ids_in_room.invalidate((room_id,))
@@ -136,3 +139,8 @@ class SlavedEventStore(EventFederationWorkerStore,
                 state_key, stream_ordering
             )
             self.get_invited_rooms_for_user.invalidate((state_key,))
+
+        if relates_to:
+            self.get_relations_for_event.invalidate_many((relates_to,))
+            self.get_aggregation_groups_for_event.invalidate_many((relates_to,))
+            self.get_applicable_edit.invalidate((relates_to,))
diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py
index 8032f53fec..cc6f7f009f 100644
--- a/synapse/replication/slave/storage/keys.py
+++ b/synapse/replication/slave/storage/keys.py
@@ -13,22 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage import DataStore
-from synapse.storage.keys import KeyStore
+from synapse.storage import KeyStore
 
-from ._base import BaseSlavedStore, __func__
+# KeyStore isn't really safe to use from a worker, but for now we do so and hope that
+# the races it creates aren't too bad.
 
-
-class SlavedKeyStore(BaseSlavedStore):
-    _get_server_verify_key = KeyStore.__dict__[
-        "_get_server_verify_key"
-    ]
-
-    get_server_verify_keys = __func__(DataStore.get_server_verify_keys)
-    store_server_verify_key = __func__(DataStore.store_server_verify_key)
-
-    get_server_certificate = __func__(DataStore.get_server_certificate)
-    store_server_certificate = __func__(DataStore.store_server_certificate)
-
-    get_server_keys_json = __func__(DataStore.get_server_keys_json)
-    store_server_keys_json = __func__(DataStore.store_server_keys_json)
+SlavedKeyStore = KeyStore
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 8971a6a22e..b6ce7a7bee 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -32,6 +32,7 @@ BackfillStreamRow = namedtuple("BackfillStreamRow", (
     "type",  # str
     "state_key",  # str, optional
     "redacts",  # str, optional
+    "relates_to",  # str, optional
 ))
 PresenceStreamRow = namedtuple("PresenceStreamRow", (
     "user_id",  # str
diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py
index e0f6e29248..f1290d022a 100644
--- a/synapse/replication/tcp/streams/events.py
+++ b/synapse/replication/tcp/streams/events.py
@@ -80,11 +80,12 @@ class BaseEventsStreamRow(object):
 class EventsStreamEventRow(BaseEventsStreamRow):
     TypeId = "ev"
 
-    event_id = attr.ib()   # str
-    room_id = attr.ib()    # str
-    type = attr.ib()       # str
-    state_key = attr.ib()  # str, optional
-    redacts = attr.ib()    # str, optional
+    event_id = attr.ib()    # str
+    room_id = attr.ib()     # str
+    type = attr.ib()        # str
+    state_key = attr.ib()   # str, optional
+    redacts = attr.ib()     # str, optional
+    relates_to = attr.ib()  # str, optional
 
 
 @attr.s(slots=True, frozen=True)
diff --git a/synapse/res/templates/mail-expiry.css b/synapse/res/templates/mail-expiry.css
new file mode 100644
index 0000000000..3dea486467
--- /dev/null
+++ b/synapse/res/templates/mail-expiry.css
@@ -0,0 +1,4 @@
+.noticetext {
+    margin-top: 10px;
+    margin-bottom: 10px;
+}
diff --git a/synapse/res/templates/notice_expiry.html b/synapse/res/templates/notice_expiry.html
new file mode 100644
index 0000000000..f0d7c66e1b
--- /dev/null
+++ b/synapse/res/templates/notice_expiry.html
@@ -0,0 +1,43 @@
+<!doctype html>
+<html lang="en">
+    <head>
+        <style type="text/css">
+            {% include 'mail.css' without context %}
+            {% include "mail-%s.css" % app_name ignore missing without context %}
+            {% include 'mail-expiry.css' without context %}
+        </style>
+    </head>
+    <body>
+        <table id="page">
+            <tr>
+                <td> </td>
+                <td id="inner">
+                    <table class="header">
+                        <tr>
+                            <td>
+                                <div class="salutation">Hi {{ display_name }},</div>
+                            </td>
+                            <td class="logo">
+                                {% if app_name == "Riot" %}
+                                    <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
+                                {% elif app_name == "Vector" %}
+                                    <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
+                                {% else %}
+                                    <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
+                                {% endif %}
+                            </td>
+                        </tr>
+                        <tr>
+                          <td colspan="2">
+                            <div class="noticetext">Your account will expire on {{ expiration_ts|format_ts("%d-%m-%Y") }}. This means that you will lose access to your account after this date.</div>
+                            <div class="noticetext">To extend the validity of your account, please click on the link bellow (or copy and paste it into a new browser tab):</div>
+                            <div class="noticetext"><a href="{{ url }}">{{ url }}</a></div>
+                          </td>
+                        </tr>
+                    </table>
+                </td>
+                <td> </td>
+            </tr>
+        </table>
+    </body>
+</html>
diff --git a/synapse/res/templates/notice_expiry.txt b/synapse/res/templates/notice_expiry.txt
new file mode 100644
index 0000000000..41f1c4279c
--- /dev/null
+++ b/synapse/res/templates/notice_expiry.txt
@@ -0,0 +1,7 @@
+Hi {{ display_name }},
+
+Your account will expire on {{ expiration_ts|format_ts("%d-%m-%Y") }}. This means that you will lose access to your account after this date.
+
+To extend the validity of your account, please click on the link bellow (or copy and paste it to a new browser tab):
+
+{{ url }}
diff --git a/synapse/res/templates/password_reset.html b/synapse/res/templates/password_reset.html
new file mode 100644
index 0000000000..4fa7b36734
--- /dev/null
+++ b/synapse/res/templates/password_reset.html
@@ -0,0 +1,9 @@
+<html>
+<body>
+    <p>A password reset request has been received for your Matrix account. If this was you, please click the link below to confirm resetting your password:</p>
+
+    <a href="{{ link }}">{{ link }}</a>
+
+    <p>If this was not you, please disregard this email and contact your server administrator. Thank you.</p>
+</body>
+</html>
diff --git a/synapse/res/templates/password_reset.txt b/synapse/res/templates/password_reset.txt
new file mode 100644
index 0000000000..f0deff59a7
--- /dev/null
+++ b/synapse/res/templates/password_reset.txt
@@ -0,0 +1,7 @@
+A password reset request has been received for your Matrix account. If this
+was you, please click the link below to confirm resetting your password:
+
+{{ link }}
+
+If this was not you, please disregard this email and contact your server
+administrator. Thank you.
diff --git a/synapse/res/templates/password_reset_failure.html b/synapse/res/templates/password_reset_failure.html
new file mode 100644
index 0000000000..0b132cf8db
--- /dev/null
+++ b/synapse/res/templates/password_reset_failure.html
@@ -0,0 +1,6 @@
+<html>
+<head></head>
+<body>
+<p>{{ failure_reason }}. Your password has not been reset.</p>
+</body>
+</html>
diff --git a/synapse/res/templates/password_reset_success.html b/synapse/res/templates/password_reset_success.html
new file mode 100644
index 0000000000..7324d66d1e
--- /dev/null
+++ b/synapse/res/templates/password_reset_success.html
@@ -0,0 +1,6 @@
+<html>
+<head></head>
+<body>
+<p>Your email has now been validated, please return to your client to reset your password. You may now close this window.</p>
+</body>
+</html>
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 91f5247d52..e6110ad9b1 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -13,11 +13,10 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
+import synapse.rest.admin
 from synapse.http.server import JsonResource
 from synapse.rest.client import versions
 from synapse.rest.client.v1 import (
-    admin,
     directory,
     events,
     initial_sync,
@@ -33,6 +32,7 @@ from synapse.rest.client.v1 import (
 from synapse.rest.client.v2_alpha import (
     account,
     account_data,
+    account_validity,
     auth,
     capabilities,
     devices,
@@ -44,6 +44,7 @@ from synapse.rest.client.v2_alpha import (
     read_marker,
     receipts,
     register,
+    relations,
     report_event,
     room_keys,
     room_upgrade_rest_servlet,
@@ -57,8 +58,14 @@ from synapse.rest.client.v2_alpha import (
 
 
 class ClientRestResource(JsonResource):
-    """A resource for version 1 of the matrix client API."""
+    """Matrix Client API REST resource.
 
+    This gets mounted at various points under /_matrix/client, including:
+       * /_matrix/client/r0
+       * /_matrix/client/api/v1
+       * /_matrix/client/unstable
+       * etc
+    """
     def __init__(self, hs):
         JsonResource.__init__(self, hs, canonical_json=False)
         self.register_servlets(self, hs)
@@ -81,7 +88,6 @@ class ClientRestResource(JsonResource):
         presence.register_servlets(hs, client_resource)
         directory.register_servlets(hs, client_resource)
         voip.register_servlets(hs, client_resource)
-        admin.register_servlets(hs, client_resource)
         pusher.register_servlets(hs, client_resource)
         push_rule.register_servlets(hs, client_resource)
         logout.register_servlets(hs, client_resource)
@@ -109,3 +115,10 @@ class ClientRestResource(JsonResource):
         groups.register_servlets(hs, client_resource)
         room_upgrade_rest_servlet.register_servlets(hs, client_resource)
         capabilities.register_servlets(hs, client_resource)
+        account_validity.register_servlets(hs, client_resource)
+        relations.register_servlets(hs, client_resource)
+
+        # moving to /_synapse/admin
+        synapse.rest.admin.register_servlets_for_client_rest_resource(
+            hs, client_resource
+        )
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/admin/__init__.py
index 7d7a75fc30..d6c4dcdb18 100644
--- a/synapse/rest/client/v1/admin.py
+++ b/synapse/rest/admin/__init__.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
+# Copyright 2018-2019 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,6 +18,7 @@ import hashlib
 import hmac
 import logging
 import platform
+import re
 
 from six import text_type
 from six.moves import http_client
@@ -27,39 +28,56 @@ from twisted.internet import defer
 import synapse
 from synapse.api.constants import Membership, UserTypes
 from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
+from synapse.http.server import JsonResource
 from synapse.http.servlet import (
+    RestServlet,
     assert_params_in_dict,
     parse_integer,
     parse_json_object_from_request,
     parse_string,
 )
+from synapse.rest.admin._base import assert_requester_is_admin, assert_user_is_admin
+from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
 from synapse.types import UserID, create_requester
 from synapse.util.versionstring import get_version_string
 
-from .base import ClientV1RestServlet, client_path_patterns
-
 logger = logging.getLogger(__name__)
 
 
-class UsersRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/admin/users/(?P<user_id>[^/]*)")
+def historical_admin_path_patterns(path_regex):
+    """Returns the list of patterns for an admin endpoint, including historical ones
+
+    This is a backwards-compatibility hack. Previously, the Admin API was exposed at
+    various paths under /_matrix/client. This function returns a list of patterns
+    matching those paths (as well as the new one), so that existing scripts which rely
+    on the endpoints being available there are not broken.
+
+    Note that this should only be used for existing endpoints: new ones should just
+    register for the /_synapse/admin path.
+    """
+    return list(
+        re.compile(prefix + path_regex)
+        for prefix in (
+            "^/_synapse/admin/v1",
+            "^/_matrix/client/api/v1/admin",
+            "^/_matrix/client/unstable/admin",
+            "^/_matrix/client/r0/admin"
+        )
+    )
+
+
+class UsersRestServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)")
 
     def __init__(self, hs):
-        super(UsersRestServlet, self).__init__(hs)
+        self.hs = hs
+        self.auth = hs.get_auth()
         self.handlers = hs.get_handlers()
 
     @defer.inlineCallbacks
     def on_GET(self, request, user_id):
         target_user = UserID.from_string(user_id)
-        requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
-
-        # To allow all users to get the users list
-        # if not is_admin and target_user != auth_user:
-        #     raise AuthError(403, "You are not a server admin")
+        yield assert_requester_is_admin(self.auth, request)
 
         if not self.hs.is_mine(target_user):
             raise SynapseError(400, "Can only users a local user")
@@ -69,37 +87,30 @@ class UsersRestServlet(ClientV1RestServlet):
         defer.returnValue((200, ret))
 
 
-class VersionServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/admin/server_version")
-
-    @defer.inlineCallbacks
-    def on_GET(self, request):
-        requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
+class VersionServlet(RestServlet):
+    PATTERNS = (re.compile("^/_synapse/admin/v1/server_version$"), )
 
-        ret = {
+    def __init__(self, hs):
+        self.res = {
             'server_version': get_version_string(synapse),
             'python_version': platform.python_version(),
         }
 
-        defer.returnValue((200, ret))
+    def on_GET(self, request):
+        return 200, self.res
 
 
-class UserRegisterServlet(ClientV1RestServlet):
+class UserRegisterServlet(RestServlet):
     """
     Attributes:
          NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted
          nonces (dict[str, int]): The nonces that we will accept. A dict of
              nonce to the time it was generated, in int seconds.
     """
-    PATTERNS = client_path_patterns("/admin/register")
+    PATTERNS = historical_admin_path_patterns("/register")
     NONCE_TIMEOUT = 60
 
     def __init__(self, hs):
-        super(UserRegisterServlet, self).__init__(hs)
         self.handlers = hs.get_handlers()
         self.reactor = hs.get_reactor()
         self.nonces = {}
@@ -226,11 +237,12 @@ class UserRegisterServlet(ClientV1RestServlet):
         defer.returnValue((200, result))
 
 
-class WhoisRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/admin/whois/(?P<user_id>[^/]*)")
+class WhoisRestServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns("/whois/(?P<user_id>[^/]*)")
 
     def __init__(self, hs):
-        super(WhoisRestServlet, self).__init__(hs)
+        self.hs = hs
+        self.auth = hs.get_auth()
         self.handlers = hs.get_handlers()
 
     @defer.inlineCallbacks
@@ -238,10 +250,9 @@ class WhoisRestServlet(ClientV1RestServlet):
         target_user = UserID.from_string(user_id)
         requester = yield self.auth.get_user_by_req(request)
         auth_user = requester.user
-        is_admin = yield self.auth.is_server_admin(requester.user)
 
-        if not is_admin and target_user != auth_user:
-            raise AuthError(403, "You are not a server admin")
+        if target_user != auth_user:
+            yield assert_user_is_admin(self.auth, auth_user)
 
         if not self.hs.is_mine(target_user):
             raise SynapseError(400, "Can only whois a local user")
@@ -251,20 +262,16 @@ class WhoisRestServlet(ClientV1RestServlet):
         defer.returnValue((200, ret))
 
 
-class PurgeMediaCacheRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/admin/purge_media_cache")
+class PurgeMediaCacheRestServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns("/purge_media_cache")
 
     def __init__(self, hs):
         self.media_repository = hs.get_media_repository()
-        super(PurgeMediaCacheRestServlet, self).__init__(hs)
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_POST(self, request):
-        requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
+        yield assert_requester_is_admin(self.auth, request)
 
         before_ts = parse_integer(request, "before_ts", required=True)
         logger.info("before_ts: %r", before_ts)
@@ -274,9 +281,9 @@ class PurgeMediaCacheRestServlet(ClientV1RestServlet):
         defer.returnValue((200, ret))
 
 
-class PurgeHistoryRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns(
-        "/admin/purge_history/(?P<room_id>[^/]*)(/(?P<event_id>[^/]+))?"
+class PurgeHistoryRestServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns(
+        "/purge_history/(?P<room_id>[^/]*)(/(?P<event_id>[^/]+))?"
     )
 
     def __init__(self, hs):
@@ -285,17 +292,13 @@ class PurgeHistoryRestServlet(ClientV1RestServlet):
         Args:
             hs (synapse.server.HomeServer)
         """
-        super(PurgeHistoryRestServlet, self).__init__(hs)
         self.pagination_handler = hs.get_pagination_handler()
         self.store = hs.get_datastore()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_POST(self, request, room_id, event_id):
-        requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
+        yield assert_requester_is_admin(self.auth, request)
 
         body = parse_json_object_from_request(request, allow_empty_body=True)
 
@@ -371,9 +374,9 @@ class PurgeHistoryRestServlet(ClientV1RestServlet):
         }))
 
 
-class PurgeHistoryStatusRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns(
-        "/admin/purge_history_status/(?P<purge_id>[^/]+)"
+class PurgeHistoryStatusRestServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns(
+        "/purge_history_status/(?P<purge_id>[^/]+)"
     )
 
     def __init__(self, hs):
@@ -382,16 +385,12 @@ class PurgeHistoryStatusRestServlet(ClientV1RestServlet):
         Args:
             hs (synapse.server.HomeServer)
         """
-        super(PurgeHistoryStatusRestServlet, self).__init__(hs)
         self.pagination_handler = hs.get_pagination_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, purge_id):
-        requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
+        yield assert_requester_is_admin(self.auth, request)
 
         purge_status = self.pagination_handler.get_purge_status(purge_id)
         if purge_status is None:
@@ -400,15 +399,16 @@ class PurgeHistoryStatusRestServlet(ClientV1RestServlet):
         defer.returnValue((200, purge_status.asdict()))
 
 
-class DeactivateAccountRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/admin/deactivate/(?P<target_user_id>[^/]*)")
+class DeactivateAccountRestServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns("/deactivate/(?P<target_user_id>[^/]*)")
 
     def __init__(self, hs):
-        super(DeactivateAccountRestServlet, self).__init__(hs)
         self._deactivate_account_handler = hs.get_deactivate_account_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_POST(self, request, target_user_id):
+        yield assert_requester_is_admin(self.auth, request)
         body = parse_json_object_from_request(request, allow_empty_body=True)
         erase = body.get("erase", False)
         if not isinstance(erase, bool):
@@ -419,11 +419,6 @@ class DeactivateAccountRestServlet(ClientV1RestServlet):
             )
 
         UserID.from_string(target_user_id)
-        requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
 
         result = yield self._deactivate_account_handler.deactivate_account(
             target_user_id, erase,
@@ -438,13 +433,13 @@ class DeactivateAccountRestServlet(ClientV1RestServlet):
         }))
 
 
-class ShutdownRoomRestServlet(ClientV1RestServlet):
+class ShutdownRoomRestServlet(RestServlet):
     """Shuts down a room by removing all local users from the room and blocking
     all future invites and joins to the room. Any local aliases will be repointed
     to a new room created by `new_room_user_id` and kicked users will be auto
     joined to the new room.
     """
-    PATTERNS = client_path_patterns("/admin/shutdown_room/(?P<room_id>[^/]+)")
+    PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P<room_id>[^/]+)")
 
     DEFAULT_MESSAGE = (
         "Sharing illegal content on this server is not permitted and rooms in"
@@ -452,19 +447,18 @@ class ShutdownRoomRestServlet(ClientV1RestServlet):
     )
 
     def __init__(self, hs):
-        super(ShutdownRoomRestServlet, self).__init__(hs)
+        self.hs = hs
         self.store = hs.get_datastore()
         self.state = hs.get_state_handler()
         self._room_creation_handler = hs.get_room_creation_handler()
         self.event_creation_handler = hs.get_event_creation_handler()
         self.room_member_handler = hs.get_room_member_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_POST(self, request, room_id):
         requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
+        yield assert_user_is_admin(self.auth, requester.user)
 
         content = parse_json_object_from_request(request)
         assert_params_in_dict(content, ["new_room_user_id"])
@@ -564,22 +558,20 @@ class ShutdownRoomRestServlet(ClientV1RestServlet):
         }))
 
 
-class QuarantineMediaInRoom(ClientV1RestServlet):
+class QuarantineMediaInRoom(RestServlet):
     """Quarantines all media in a room so that no one can download it via
     this server.
     """
-    PATTERNS = client_path_patterns("/admin/quarantine_media/(?P<room_id>[^/]+)")
+    PATTERNS = historical_admin_path_patterns("/quarantine_media/(?P<room_id>[^/]+)")
 
     def __init__(self, hs):
-        super(QuarantineMediaInRoom, self).__init__(hs)
         self.store = hs.get_datastore()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_POST(self, request, room_id):
         requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
+        yield assert_user_is_admin(self.auth, requester.user)
 
         num_quarantined = yield self.store.quarantine_media_ids_in_room(
             room_id, requester.user.to_string(),
@@ -588,13 +580,12 @@ class QuarantineMediaInRoom(ClientV1RestServlet):
         defer.returnValue((200, {"num_quarantined": num_quarantined}))
 
 
-class ListMediaInRoom(ClientV1RestServlet):
+class ListMediaInRoom(RestServlet):
     """Lists all of the media in a given room.
     """
-    PATTERNS = client_path_patterns("/admin/room/(?P<room_id>[^/]+)/media")
+    PATTERNS = historical_admin_path_patterns("/room/(?P<room_id>[^/]+)/media")
 
     def __init__(self, hs):
-        super(ListMediaInRoom, self).__init__(hs)
         self.store = hs.get_datastore()
 
     @defer.inlineCallbacks
@@ -609,11 +600,11 @@ class ListMediaInRoom(ClientV1RestServlet):
         defer.returnValue((200, {"local": local_mxcs, "remote": remote_mxcs}))
 
 
-class ResetPasswordRestServlet(ClientV1RestServlet):
+class ResetPasswordRestServlet(RestServlet):
     """Post request to allow an administrator reset password for a user.
     This needs user to have administrator access in Synapse.
         Example:
-            http://localhost:8008/_matrix/client/api/v1/admin/reset_password/
+            http://localhost:8008/_synapse/admin/v1/reset_password/
             @user:to_reset_password?access_token=admin_access_token
         JsonBodyToSend:
             {
@@ -622,11 +613,10 @@ class ResetPasswordRestServlet(ClientV1RestServlet):
         Returns:
             200 OK with empty object if success otherwise an error.
         """
-    PATTERNS = client_path_patterns("/admin/reset_password/(?P<target_user_id>[^/]*)")
+    PATTERNS = historical_admin_path_patterns("/reset_password/(?P<target_user_id>[^/]*)")
 
     def __init__(self, hs):
         self.store = hs.get_datastore()
-        super(ResetPasswordRestServlet, self).__init__(hs)
         self.hs = hs
         self.auth = hs.get_auth()
         self._set_password_handler = hs.get_set_password_handler()
@@ -636,12 +626,10 @@ class ResetPasswordRestServlet(ClientV1RestServlet):
         """Post request to allow an administrator reset password for a user.
         This needs user to have administrator access in Synapse.
         """
-        UserID.from_string(target_user_id)
         requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
+        yield assert_user_is_admin(self.auth, requester.user)
 
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
+        UserID.from_string(target_user_id)
 
         params = parse_json_object_from_request(request)
         assert_params_in_dict(params, ["new_password"])
@@ -653,20 +641,19 @@ class ResetPasswordRestServlet(ClientV1RestServlet):
         defer.returnValue((200, {}))
 
 
-class GetUsersPaginatedRestServlet(ClientV1RestServlet):
+class GetUsersPaginatedRestServlet(RestServlet):
     """Get request to get specific number of users from Synapse.
     This needs user to have administrator access in Synapse.
         Example:
-            http://localhost:8008/_matrix/client/api/v1/admin/users_paginate/
+            http://localhost:8008/_synapse/admin/v1/users_paginate/
             @admin:user?access_token=admin_access_token&start=0&limit=10
         Returns:
             200 OK with json object {list[dict[str, Any]], count} or empty object.
         """
-    PATTERNS = client_path_patterns("/admin/users_paginate/(?P<target_user_id>[^/]*)")
+    PATTERNS = historical_admin_path_patterns("/users_paginate/(?P<target_user_id>[^/]*)")
 
     def __init__(self, hs):
         self.store = hs.get_datastore()
-        super(GetUsersPaginatedRestServlet, self).__init__(hs)
         self.hs = hs
         self.auth = hs.get_auth()
         self.handlers = hs.get_handlers()
@@ -676,16 +663,9 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
         """Get request to get specific number of users from Synapse.
         This needs user to have administrator access in Synapse.
         """
-        target_user = UserID.from_string(target_user_id)
-        requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
+        yield assert_requester_is_admin(self.auth, request)
 
-        # To allow all users to get the users list
-        # if not is_admin and target_user != auth_user:
-        #     raise AuthError(403, "You are not a server admin")
+        target_user = UserID.from_string(target_user_id)
 
         if not self.hs.is_mine(target_user):
             raise SynapseError(400, "Can only users a local user")
@@ -706,7 +686,7 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
         """Post request to get specific number of users from Synapse..
         This needs user to have administrator access in Synapse.
         Example:
-            http://localhost:8008/_matrix/client/api/v1/admin/users_paginate/
+            http://localhost:8008/_synapse/admin/v1/users_paginate/
             @admin:user?access_token=admin_access_token
         JsonBodyToSend:
             {
@@ -716,12 +696,8 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
         Returns:
             200 OK with json object {list[dict[str, Any]], count} or empty object.
         """
+        yield assert_requester_is_admin(self.auth, request)
         UserID.from_string(target_user_id)
-        requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
 
         order = "name"  # order by name in user table
         params = parse_json_object_from_request(request)
@@ -736,21 +712,20 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
         defer.returnValue((200, ret))
 
 
-class SearchUsersRestServlet(ClientV1RestServlet):
+class SearchUsersRestServlet(RestServlet):
     """Get request to search user table for specific users according to
     search term.
     This needs user to have administrator access in Synapse.
         Example:
-            http://localhost:8008/_matrix/client/api/v1/admin/search_users/
+            http://localhost:8008/_synapse/admin/v1/search_users/
             @admin:user?access_token=admin_access_token&term=alice
         Returns:
             200 OK with json object {list[dict[str, Any]], count} or empty object.
     """
-    PATTERNS = client_path_patterns("/admin/search_users/(?P<target_user_id>[^/]*)")
+    PATTERNS = historical_admin_path_patterns("/search_users/(?P<target_user_id>[^/]*)")
 
     def __init__(self, hs):
         self.store = hs.get_datastore()
-        super(SearchUsersRestServlet, self).__init__(hs)
         self.hs = hs
         self.auth = hs.get_auth()
         self.handlers = hs.get_handlers()
@@ -761,12 +736,9 @@ class SearchUsersRestServlet(ClientV1RestServlet):
         search term.
         This needs user to have a administrator access in Synapse.
         """
-        target_user = UserID.from_string(target_user_id)
-        requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
+        yield assert_requester_is_admin(self.auth, request)
 
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
+        target_user = UserID.from_string(target_user_id)
 
         # To allow all users to get the users list
         # if not is_admin and target_user != auth_user:
@@ -784,23 +756,20 @@ class SearchUsersRestServlet(ClientV1RestServlet):
         defer.returnValue((200, ret))
 
 
-class DeleteGroupAdminRestServlet(ClientV1RestServlet):
+class DeleteGroupAdminRestServlet(RestServlet):
     """Allows deleting of local groups
     """
-    PATTERNS = client_path_patterns("/admin/delete_group/(?P<group_id>[^/]*)")
+    PATTERNS = historical_admin_path_patterns("/delete_group/(?P<group_id>[^/]*)")
 
     def __init__(self, hs):
-        super(DeleteGroupAdminRestServlet, self).__init__(hs)
         self.group_server = hs.get_groups_server_handler()
         self.is_mine_id = hs.is_mine_id
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_POST(self, request, group_id):
         requester = yield self.auth.get_user_by_req(request)
-        is_admin = yield self.auth.is_server_admin(requester.user)
-
-        if not is_admin:
-            raise AuthError(403, "You are not a server admin")
+        yield assert_user_is_admin(self.auth, requester.user)
 
         if not self.is_mine_id(group_id):
             raise SynapseError(400, "Can only delete local groups")
@@ -809,7 +778,64 @@ class DeleteGroupAdminRestServlet(ClientV1RestServlet):
         defer.returnValue((200, {}))
 
 
+class AccountValidityRenewServlet(RestServlet):
+    PATTERNS = historical_admin_path_patterns("/account_validity/validity$")
+
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        self.hs = hs
+        self.account_activity_handler = hs.get_account_validity_handler()
+        self.auth = hs.get_auth()
+
+    @defer.inlineCallbacks
+    def on_POST(self, request):
+        yield assert_requester_is_admin(self.auth, request)
+
+        body = parse_json_object_from_request(request)
+
+        if "user_id" not in body:
+            raise SynapseError(400, "Missing property 'user_id' in the request body")
+
+        expiration_ts = yield self.account_activity_handler.renew_account_for_user(
+            body["user_id"], body.get("expiration_ts"),
+            not body.get("enable_renewal_emails", True),
+        )
+
+        res = {
+            "expiration_ts": expiration_ts,
+        }
+        defer.returnValue((200, res))
+
+########################################################################################
+#
+# please don't add more servlets here: this file is already long and unwieldy. Put
+# them in separate files within the 'admin' package.
+#
+########################################################################################
+
+
+class AdminRestResource(JsonResource):
+    """The REST resource which gets mounted at /_synapse/admin"""
+
+    def __init__(self, hs):
+        JsonResource.__init__(self, hs, canonical_json=False)
+        register_servlets(hs, self)
+
+
 def register_servlets(hs, http_server):
+    """
+    Register all the admin servlets.
+    """
+    register_servlets_for_client_rest_resource(hs, http_server)
+    SendServerNoticeServlet(hs).register(http_server)
+    VersionServlet(hs).register(http_server)
+
+
+def register_servlets_for_client_rest_resource(hs, http_server):
+    """Register only the servlets which need to be exposed on /_matrix/client/xxx"""
     WhoisRestServlet(hs).register(http_server)
     PurgeMediaCacheRestServlet(hs).register(http_server)
     PurgeHistoryStatusRestServlet(hs).register(http_server)
@@ -823,5 +849,7 @@ def register_servlets(hs, http_server):
     QuarantineMediaInRoom(hs).register(http_server)
     ListMediaInRoom(hs).register(http_server)
     UserRegisterServlet(hs).register(http_server)
-    VersionServlet(hs).register(http_server)
     DeleteGroupAdminRestServlet(hs).register(http_server)
+    AccountValidityRenewServlet(hs).register(http_server)
+    # don't add more things here: new servlets should only be exposed on
+    # /_synapse/admin so should not go here. Instead register them in AdminRestResource.
diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py
new file mode 100644
index 0000000000..881d67b89c
--- /dev/null
+++ b/synapse/rest/admin/_base.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import defer
+
+from synapse.api.errors import AuthError
+
+
+@defer.inlineCallbacks
+def assert_requester_is_admin(auth, request):
+    """Verify that the requester is an admin user
+
+    WARNING: MAKE SURE YOU YIELD ON THE RESULT!
+
+    Args:
+        auth (synapse.api.auth.Auth):
+        request (twisted.web.server.Request): incoming request
+
+    Returns:
+        Deferred
+
+    Raises:
+        AuthError if the requester is not an admin
+    """
+    requester = yield auth.get_user_by_req(request)
+    yield assert_user_is_admin(auth, requester.user)
+
+
+@defer.inlineCallbacks
+def assert_user_is_admin(auth, user_id):
+    """Verify that the given user is an admin user
+
+    WARNING: MAKE SURE YOU YIELD ON THE RESULT!
+
+    Args:
+        auth (synapse.api.auth.Auth):
+        user_id (UserID):
+
+    Returns:
+        Deferred
+
+    Raises:
+        AuthError if the user is not an admin
+    """
+
+    is_admin = yield auth.is_server_admin(user_id)
+    if not is_admin:
+        raise AuthError(403, "You are not a server admin")
diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py
new file mode 100644
index 0000000000..ae5aca9dac
--- /dev/null
+++ b/synapse/rest/admin/server_notice_servlet.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import re
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes
+from synapse.api.errors import SynapseError
+from synapse.http.servlet import (
+    RestServlet,
+    assert_params_in_dict,
+    parse_json_object_from_request,
+)
+from synapse.rest.admin import assert_requester_is_admin
+from synapse.rest.client.transactions import HttpTransactionCache
+from synapse.types import UserID
+
+
+class SendServerNoticeServlet(RestServlet):
+    """Servlet which will send a server notice to a given user
+
+    POST /_synapse/admin/v1/send_server_notice
+    {
+        "user_id": "@target_user:server_name",
+        "content": {
+            "msgtype": "m.text",
+            "body": "This is my message"
+        }
+    }
+
+    returns:
+
+    {
+        "event_id": "$1895723857jgskldgujpious"
+    }
+    """
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.txns = HttpTransactionCache(hs)
+        self.snm = hs.get_server_notices_manager()
+
+    def register(self, json_resource):
+        PATTERN = "^/_synapse/admin/v1/send_server_notice"
+        json_resource.register_paths(
+            "POST",
+            (re.compile(PATTERN + "$"), ),
+            self.on_POST,
+        )
+        json_resource.register_paths(
+            "PUT",
+            (re.compile(PATTERN + "/(?P<txn_id>[^/]*)$",), ),
+            self.on_PUT,
+        )
+
+    @defer.inlineCallbacks
+    def on_POST(self, request, txn_id=None):
+        yield assert_requester_is_admin(self.auth, request)
+        body = parse_json_object_from_request(request)
+        assert_params_in_dict(body, ("user_id", "content"))
+        event_type = body.get("type", EventTypes.Message)
+        state_key = body.get("state_key")
+
+        if not self.snm.is_enabled():
+            raise SynapseError(400, "Server notices are not enabled on this server")
+
+        user_id = body["user_id"]
+        UserID.from_string(user_id)
+        if not self.hs.is_mine_id(user_id):
+            raise SynapseError(400, "Server notices can only be sent to local users")
+
+        event = yield self.snm.send_notice(
+            user_id=body["user_id"],
+            type=event_type,
+            state_key=state_key,
+            event_content=body["content"],
+        )
+
+        defer.returnValue((200, {"event_id": event.event_id}))
+
+    def on_PUT(self, request, txn_id):
+        return self.txns.fetch_or_execute_request(
+            request, self.on_POST, request, txn_id,
+        )
diff --git a/synapse/rest/client/v1/base.py b/synapse/rest/client/v1/base.py
deleted file mode 100644
index c77d7aba68..0000000000
--- a/synapse/rest/client/v1/base.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""This module contains base REST classes for constructing client v1 servlets.
-"""
-
-import logging
-import re
-
-from synapse.api.urls import CLIENT_PREFIX
-from synapse.http.servlet import RestServlet
-from synapse.rest.client.transactions import HttpTransactionCache
-
-logger = logging.getLogger(__name__)
-
-
-def client_path_patterns(path_regex, releases=(0,), include_in_unstable=True):
-    """Creates a regex compiled client path with the correct client path
-    prefix.
-
-    Args:
-        path_regex (str): The regex string to match. This should NOT have a ^
-        as this will be prefixed.
-    Returns:
-        SRE_Pattern
-    """
-    patterns = [re.compile("^" + CLIENT_PREFIX + path_regex)]
-    if include_in_unstable:
-        unstable_prefix = CLIENT_PREFIX.replace("/api/v1", "/unstable")
-        patterns.append(re.compile("^" + unstable_prefix + path_regex))
-    for release in releases:
-        new_prefix = CLIENT_PREFIX.replace("/api/v1", "/r%d" % release)
-        patterns.append(re.compile("^" + new_prefix + path_regex))
-    return patterns
-
-
-class ClientV1RestServlet(RestServlet):
-    """A base Synapse REST Servlet for the client version 1 API.
-    """
-
-    # This subclass was presumably created to allow the auth for the v1
-    # protocol version to be different, however this behaviour was removed.
-    # it may no longer be necessary
-
-    def __init__(self, hs):
-        """
-        Args:
-            hs (synapse.server.HomeServer):
-        """
-        self.hs = hs
-        self.builder_factory = hs.get_event_builder_factory()
-        self.auth = hs.get_auth()
-        self.txns = HttpTransactionCache(hs)
diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py
index 0220acf644..0035182bb9 100644
--- a/synapse/rest/client/v1/directory.py
+++ b/synapse/rest/client/v1/directory.py
@@ -19,11 +19,10 @@ import logging
 from twisted.internet import defer
 
 from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
-from synapse.http.servlet import parse_json_object_from_request
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.types import RoomAlias
 
-from .base import ClientV1RestServlet, client_path_patterns
-
 logger = logging.getLogger(__name__)
 
 
@@ -33,13 +32,14 @@ def register_servlets(hs, http_server):
     ClientAppserviceDirectoryListServer(hs).register(http_server)
 
 
-class ClientDirectoryServer(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/directory/room/(?P<room_alias>[^/]*)$")
+class ClientDirectoryServer(RestServlet):
+    PATTERNS = client_patterns("/directory/room/(?P<room_alias>[^/]*)$", v1=True)
 
     def __init__(self, hs):
-        super(ClientDirectoryServer, self).__init__(hs)
+        super(ClientDirectoryServer, self).__init__()
         self.store = hs.get_datastore()
         self.handlers = hs.get_handlers()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_alias):
@@ -120,13 +120,14 @@ class ClientDirectoryServer(ClientV1RestServlet):
         defer.returnValue((200, {}))
 
 
-class ClientDirectoryListServer(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/directory/list/room/(?P<room_id>[^/]*)$")
+class ClientDirectoryListServer(RestServlet):
+    PATTERNS = client_patterns("/directory/list/room/(?P<room_id>[^/]*)$", v1=True)
 
     def __init__(self, hs):
-        super(ClientDirectoryListServer, self).__init__(hs)
+        super(ClientDirectoryListServer, self).__init__()
         self.store = hs.get_datastore()
         self.handlers = hs.get_handlers()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id):
@@ -162,15 +163,16 @@ class ClientDirectoryListServer(ClientV1RestServlet):
         defer.returnValue((200, {}))
 
 
-class ClientAppserviceDirectoryListServer(ClientV1RestServlet):
-    PATTERNS = client_path_patterns(
-        "/directory/list/appservice/(?P<network_id>[^/]*)/(?P<room_id>[^/]*)$"
+class ClientAppserviceDirectoryListServer(RestServlet):
+    PATTERNS = client_patterns(
+        "/directory/list/appservice/(?P<network_id>[^/]*)/(?P<room_id>[^/]*)$", v1=True
     )
 
     def __init__(self, hs):
-        super(ClientAppserviceDirectoryListServer, self).__init__(hs)
+        super(ClientAppserviceDirectoryListServer, self).__init__()
         self.store = hs.get_datastore()
         self.handlers = hs.get_handlers()
+        self.auth = hs.get_auth()
 
     def on_PUT(self, request, network_id, room_id):
         content = parse_json_object_from_request(request)
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index cd9b3bdbd1..84ca36270b 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -19,22 +19,22 @@ import logging
 from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
-from synapse.events.utils import serialize_event
+from synapse.http.servlet import RestServlet
+from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.streams.config import PaginationConfig
 
-from .base import ClientV1RestServlet, client_path_patterns
-
 logger = logging.getLogger(__name__)
 
 
-class EventStreamRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/events$")
+class EventStreamRestServlet(RestServlet):
+    PATTERNS = client_patterns("/events$", v1=True)
 
     DEFAULT_LONGPOLL_TIME_MS = 30000
 
     def __init__(self, hs):
-        super(EventStreamRestServlet, self).__init__(hs)
+        super(EventStreamRestServlet, self).__init__()
         self.event_stream_handler = hs.get_event_stream_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
@@ -77,13 +77,14 @@ class EventStreamRestServlet(ClientV1RestServlet):
 
 
 # TODO: Unit test gets, with and without auth, with different kinds of events.
-class EventRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/events/(?P<event_id>[^/]*)$")
+class EventRestServlet(RestServlet):
+    PATTERNS = client_patterns("/events/(?P<event_id>[^/]*)$", v1=True)
 
     def __init__(self, hs):
-        super(EventRestServlet, self).__init__(hs)
+        super(EventRestServlet, self).__init__()
         self.clock = hs.get_clock()
         self.event_handler = hs.get_event_handler()
+        self._event_serializer = hs.get_event_client_serializer()
 
     @defer.inlineCallbacks
     def on_GET(self, request, event_id):
@@ -92,7 +93,8 @@ class EventRestServlet(ClientV1RestServlet):
 
         time_now = self.clock.time_msec()
         if event:
-            defer.returnValue((200, serialize_event(event, time_now)))
+            event = yield self._event_serializer.serialize_event(event, time_now)
+            defer.returnValue((200, event))
         else:
             defer.returnValue((404, "Event not found."))
 
diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py
index 3ead75cb77..0fe5f2d79b 100644
--- a/synapse/rest/client/v1/initial_sync.py
+++ b/synapse/rest/client/v1/initial_sync.py
@@ -15,19 +15,19 @@
 
 from twisted.internet import defer
 
-from synapse.http.servlet import parse_boolean
+from synapse.http.servlet import RestServlet, parse_boolean
+from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.streams.config import PaginationConfig
 
-from .base import ClientV1RestServlet, client_path_patterns
-
 
 # TODO: Needs unit testing
-class InitialSyncRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/initialSync$")
+class InitialSyncRestServlet(RestServlet):
+    PATTERNS = client_patterns("/initialSync$", v1=True)
 
     def __init__(self, hs):
-        super(InitialSyncRestServlet, self).__init__(hs)
+        super(InitialSyncRestServlet, self).__init__()
         self.initial_sync_handler = hs.get_initial_sync_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 5180e9eaf1..3b60728628 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -29,12 +29,11 @@ from synapse.http.servlet import (
     parse_json_object_from_request,
     parse_string,
 )
+from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.rest.well_known import WellKnownBuilder
 from synapse.types import UserID, map_username_to_mxid_localpart
 from synapse.util.msisdn import phone_number_to_msisdn
 
-from .base import ClientV1RestServlet, client_path_patterns
-
 logger = logging.getLogger(__name__)
 
 
@@ -81,15 +80,16 @@ def login_id_thirdparty_from_phone(identifier):
     }
 
 
-class LoginRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/login$")
+class LoginRestServlet(RestServlet):
+    PATTERNS = client_patterns("/login$", v1=True)
     CAS_TYPE = "m.login.cas"
     SSO_TYPE = "m.login.sso"
     TOKEN_TYPE = "m.login.token"
     JWT_TYPE = "m.login.jwt"
 
     def __init__(self, hs):
-        super(LoginRestServlet, self).__init__(hs)
+        super(LoginRestServlet, self).__init__()
+        self.hs = hs
         self.jwt_enabled = hs.config.jwt_enabled
         self.jwt_secret = hs.config.jwt_secret
         self.jwt_algorithm = hs.config.jwt_algorithm
@@ -371,7 +371,7 @@ class LoginRestServlet(ClientV1RestServlet):
 
 
 class CasRedirectServlet(RestServlet):
-    PATTERNS = client_path_patterns("/login/(cas|sso)/redirect")
+    PATTERNS = client_patterns("/login/(cas|sso)/redirect", v1=True)
 
     def __init__(self, hs):
         super(CasRedirectServlet, self).__init__()
@@ -386,7 +386,7 @@ class CasRedirectServlet(RestServlet):
             b"redirectUrl": args[b"redirectUrl"][0]
         }).encode('ascii')
         hs_redirect_url = (self.cas_service_url +
-                           b"/_matrix/client/api/v1/login/cas/ticket")
+                           b"/_matrix/client/r0/login/cas/ticket")
         service_param = urllib.parse.urlencode({
             b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param)
         }).encode('ascii')
@@ -394,27 +394,27 @@ class CasRedirectServlet(RestServlet):
         finish_request(request)
 
 
-class CasTicketServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/login/cas/ticket", releases=())
+class CasTicketServlet(RestServlet):
+    PATTERNS = client_patterns("/login/cas/ticket", v1=True)
 
     def __init__(self, hs):
-        super(CasTicketServlet, self).__init__(hs)
+        super(CasTicketServlet, self).__init__()
         self.cas_server_url = hs.config.cas_server_url
         self.cas_service_url = hs.config.cas_service_url
         self.cas_required_attributes = hs.config.cas_required_attributes
         self._sso_auth_handler = SSOAuthHandler(hs)
+        self._http_client = hs.get_simple_http_client()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
         client_redirect_url = parse_string(request, "redirectUrl", required=True)
-        http_client = self.hs.get_simple_http_client()
         uri = self.cas_server_url + "/proxyValidate"
         args = {
             "ticket": parse_string(request, "ticket", required=True),
             "service": self.cas_service_url
         }
         try:
-            body = yield http_client.get_raw(uri, args)
+            body = yield self._http_client.get_raw(uri, args)
         except PartialDownloadError as pde:
             # Twisted raises this error if the connection is closed,
             # even if that's being used old-http style to signal end-of-data
diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py
index 430c692336..b8064f261e 100644
--- a/synapse/rest/client/v1/logout.py
+++ b/synapse/rest/client/v1/logout.py
@@ -17,19 +17,18 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.api.errors import AuthError
-
-from .base import ClientV1RestServlet, client_path_patterns
+from synapse.http.servlet import RestServlet
+from synapse.rest.client.v2_alpha._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
 
-class LogoutRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/logout$")
+class LogoutRestServlet(RestServlet):
+    PATTERNS = client_patterns("/logout$", v1=True)
 
     def __init__(self, hs):
-        super(LogoutRestServlet, self).__init__(hs)
-        self._auth = hs.get_auth()
+        super(LogoutRestServlet, self).__init__()
+        self.auth = hs.get_auth()
         self._auth_handler = hs.get_auth_handler()
         self._device_handler = hs.get_device_handler()
 
@@ -38,32 +37,25 @@ class LogoutRestServlet(ClientV1RestServlet):
 
     @defer.inlineCallbacks
     def on_POST(self, request):
-        try:
-            requester = yield self.auth.get_user_by_req(request)
-        except AuthError:
-            # this implies the access token has already been deleted.
-            defer.returnValue((401, {
-                "errcode": "M_UNKNOWN_TOKEN",
-                "error": "Access Token unknown or expired"
-            }))
+        requester = yield self.auth.get_user_by_req(request)
+
+        if requester.device_id is None:
+            # the acccess token wasn't associated with a device.
+            # Just delete the access token
+            access_token = self.auth.get_access_token_from_request(request)
+            yield self._auth_handler.delete_access_token(access_token)
         else:
-            if requester.device_id is None:
-                # the acccess token wasn't associated with a device.
-                # Just delete the access token
-                access_token = self._auth.get_access_token_from_request(request)
-                yield self._auth_handler.delete_access_token(access_token)
-            else:
-                yield self._device_handler.delete_device(
-                    requester.user.to_string(), requester.device_id)
+            yield self._device_handler.delete_device(
+                requester.user.to_string(), requester.device_id)
 
         defer.returnValue((200, {}))
 
 
-class LogoutAllRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/logout/all$")
+class LogoutAllRestServlet(RestServlet):
+    PATTERNS = client_patterns("/logout/all$", v1=True)
 
     def __init__(self, hs):
-        super(LogoutAllRestServlet, self).__init__(hs)
+        super(LogoutAllRestServlet, self).__init__()
         self.auth = hs.get_auth()
         self._auth_handler = hs.get_auth_handler()
         self._device_handler = hs.get_device_handler()
diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py
index 045d5a20ac..e263da3cb7 100644
--- a/synapse/rest/client/v1/presence.py
+++ b/synapse/rest/client/v1/presence.py
@@ -23,21 +23,22 @@ from twisted.internet import defer
 
 from synapse.api.errors import AuthError, SynapseError
 from synapse.handlers.presence import format_user_presence_state
-from synapse.http.servlet import parse_json_object_from_request
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.types import UserID
 
-from .base import ClientV1RestServlet, client_path_patterns
-
 logger = logging.getLogger(__name__)
 
 
-class PresenceStatusRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
+class PresenceStatusRestServlet(RestServlet):
+    PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status", v1=True)
 
     def __init__(self, hs):
-        super(PresenceStatusRestServlet, self).__init__(hs)
+        super(PresenceStatusRestServlet, self).__init__()
+        self.hs = hs
         self.presence_handler = hs.get_presence_handler()
         self.clock = hs.get_clock()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, user_id):
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
index a23edd8fe5..e15d9d82a6 100644
--- a/synapse/rest/client/v1/profile.py
+++ b/synapse/rest/client/v1/profile.py
@@ -16,26 +16,33 @@
 """ This module contains REST servlets to do with profile: /profile/<paths> """
 from twisted.internet import defer
 
-from synapse.http.servlet import parse_json_object_from_request
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.types import UserID
 
-from .base import ClientV1RestServlet, client_path_patterns
 
-
-class ProfileDisplaynameRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)/displayname")
+class ProfileDisplaynameRestServlet(RestServlet):
+    PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/displayname", v1=True)
 
     def __init__(self, hs):
-        super(ProfileDisplaynameRestServlet, self).__init__(hs)
+        super(ProfileDisplaynameRestServlet, self).__init__()
+        self.hs = hs
         self.profile_handler = hs.get_profile_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, user_id):
+        requester_user = None
+
+        if self.hs.config.require_auth_for_profile_requests:
+            requester = yield self.auth.get_user_by_req(request)
+            requester_user = requester.user
+
         user = UserID.from_string(user_id)
 
-        displayname = yield self.profile_handler.get_displayname(
-            user,
-        )
+        yield self.profile_handler.check_profile_query_allowed(user, requester_user)
+
+        displayname = yield self.profile_handler.get_displayname(user)
 
         ret = {}
         if displayname is not None:
@@ -65,20 +72,28 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet):
         return (200, {})
 
 
-class ProfileAvatarURLRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)/avatar_url")
+class ProfileAvatarURLRestServlet(RestServlet):
+    PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/avatar_url", v1=True)
 
     def __init__(self, hs):
-        super(ProfileAvatarURLRestServlet, self).__init__(hs)
+        super(ProfileAvatarURLRestServlet, self).__init__()
+        self.hs = hs
         self.profile_handler = hs.get_profile_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, user_id):
+        requester_user = None
+
+        if self.hs.config.require_auth_for_profile_requests:
+            requester = yield self.auth.get_user_by_req(request)
+            requester_user = requester.user
+
         user = UserID.from_string(user_id)
 
-        avatar_url = yield self.profile_handler.get_avatar_url(
-            user,
-        )
+        yield self.profile_handler.check_profile_query_allowed(user, requester_user)
+
+        avatar_url = yield self.profile_handler.get_avatar_url(user)
 
         ret = {}
         if avatar_url is not None:
@@ -107,23 +122,29 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet):
         return (200, {})
 
 
-class ProfileRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)")
+class ProfileRestServlet(RestServlet):
+    PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)", v1=True)
 
     def __init__(self, hs):
-        super(ProfileRestServlet, self).__init__(hs)
+        super(ProfileRestServlet, self).__init__()
+        self.hs = hs
         self.profile_handler = hs.get_profile_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, user_id):
+        requester_user = None
+
+        if self.hs.config.require_auth_for_profile_requests:
+            requester = yield self.auth.get_user_by_req(request)
+            requester_user = requester.user
+
         user = UserID.from_string(user_id)
 
-        displayname = yield self.profile_handler.get_displayname(
-            user,
-        )
-        avatar_url = yield self.profile_handler.get_avatar_url(
-            user,
-        )
+        yield self.profile_handler.check_profile_query_allowed(user, requester_user)
+
+        displayname = yield self.profile_handler.get_displayname(user)
+        avatar_url = yield self.profile_handler.get_avatar_url(user)
 
         ret = {}
         if displayname is not None:
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index c654f9b5f0..3d6326fe2f 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/v1/push_rule.py
@@ -21,28 +21,32 @@ from synapse.api.errors import (
     SynapseError,
     UnrecognizedRequestError,
 )
-from synapse.http.servlet import parse_json_value_from_request, parse_string
+from synapse.http.servlet import RestServlet, parse_json_value_from_request, parse_string
 from synapse.push.baserules import BASE_RULE_IDS
 from synapse.push.clientformat import format_push_rules_for_user
 from synapse.push.rulekinds import PRIORITY_CLASS_MAP
+from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
 
-from .base import ClientV1RestServlet, client_path_patterns
 
-
-class PushRuleRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/pushrules/.*$")
+class PushRuleRestServlet(RestServlet):
+    PATTERNS = client_patterns("/(?P<path>pushrules/.*)$", v1=True)
     SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR = (
         "Unrecognised request: You probably wanted a trailing slash")
 
     def __init__(self, hs):
-        super(PushRuleRestServlet, self).__init__(hs)
+        super(PushRuleRestServlet, self).__init__()
+        self.auth = hs.get_auth()
         self.store = hs.get_datastore()
         self.notifier = hs.get_notifier()
+        self._is_worker = hs.config.worker_app is not None
 
     @defer.inlineCallbacks
-    def on_PUT(self, request):
-        spec = _rule_spec_from_path([x.decode('utf8') for x in request.postpath])
+    def on_PUT(self, request, path):
+        if self._is_worker:
+            raise Exception("Cannot handle PUT /push_rules on worker")
+
+        spec = _rule_spec_from_path([x for x in path.split("/")])
         try:
             priority_class = _priority_class_from_spec(spec)
         except InvalidRuleException as e:
@@ -102,8 +106,11 @@ class PushRuleRestServlet(ClientV1RestServlet):
         defer.returnValue((200, {}))
 
     @defer.inlineCallbacks
-    def on_DELETE(self, request):
-        spec = _rule_spec_from_path([x.decode('utf8') for x in request.postpath])
+    def on_DELETE(self, request, path):
+        if self._is_worker:
+            raise Exception("Cannot handle DELETE /push_rules on worker")
+
+        spec = _rule_spec_from_path([x for x in path.split("/")])
 
         requester = yield self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
@@ -123,7 +130,7 @@ class PushRuleRestServlet(ClientV1RestServlet):
                 raise
 
     @defer.inlineCallbacks
-    def on_GET(self, request):
+    def on_GET(self, request, path):
         requester = yield self.auth.get_user_by_req(request)
         user_id = requester.user.to_string()
 
@@ -134,7 +141,7 @@ class PushRuleRestServlet(ClientV1RestServlet):
 
         rules = format_push_rules_for_user(requester.user, rules)
 
-        path = [x.decode('utf8') for x in request.postpath][1:]
+        path = [x for x in path.split("/")][1:]
 
         if path == []:
             # we're a reference impl: pedantry is our job.
@@ -150,7 +157,7 @@ class PushRuleRestServlet(ClientV1RestServlet):
         else:
             raise UnrecognizedRequestError()
 
-    def on_OPTIONS(self, _):
+    def on_OPTIONS(self, request, path):
         return 200, {}
 
     def notify_user(self, user_id):
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index 4c07ae7f45..15d860db37 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -26,17 +26,18 @@ from synapse.http.servlet import (
     parse_string,
 )
 from synapse.push import PusherConfigException
-
-from .base import ClientV1RestServlet, client_path_patterns
+from synapse.rest.client.v2_alpha._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
 
-class PushersRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/pushers$")
+class PushersRestServlet(RestServlet):
+    PATTERNS = client_patterns("/pushers$", v1=True)
 
     def __init__(self, hs):
-        super(PushersRestServlet, self).__init__(hs)
+        super(PushersRestServlet, self).__init__()
+        self.hs = hs
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
@@ -69,11 +70,13 @@ class PushersRestServlet(ClientV1RestServlet):
         return 200, {}
 
 
-class PushersSetRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/pushers/set$")
+class PushersSetRestServlet(RestServlet):
+    PATTERNS = client_patterns("/pushers/set$", v1=True)
 
     def __init__(self, hs):
-        super(PushersSetRestServlet, self).__init__(hs)
+        super(PushersSetRestServlet, self).__init__()
+        self.hs = hs
+        self.auth = hs.get_auth()
         self.notifier = hs.get_notifier()
         self.pusher_pool = self.hs.get_pusherpool()
 
@@ -141,7 +144,7 @@ class PushersRemoveRestServlet(RestServlet):
     """
     To allow pusher to be delete by clicking a link (ie. GET request)
     """
-    PATTERNS = client_path_patterns("/pushers/remove$")
+    PATTERNS = client_patterns("/pushers/remove$", v1=True)
     SUCCESS_HTML = b"<html><body>You have been unsubscribed</body><html>"
 
     def __init__(self, hs):
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 48da4d557f..e8f672c4ba 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -26,39 +26,47 @@ from twisted.internet import defer
 from synapse.api.constants import EventTypes, Membership
 from synapse.api.errors import AuthError, Codes, SynapseError
 from synapse.api.filtering import Filter
-from synapse.events.utils import format_event_for_client_v2, serialize_event
+from synapse.events.utils import format_event_for_client_v2
 from synapse.http.servlet import (
+    RestServlet,
     assert_params_in_dict,
     parse_integer,
     parse_json_object_from_request,
     parse_string,
 )
+from synapse.rest.client.transactions import HttpTransactionCache
+from synapse.rest.client.v2_alpha._base import client_patterns
 from synapse.storage.state import StateFilter
 from synapse.streams.config import PaginationConfig
 from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID
 
-from .base import ClientV1RestServlet, client_path_patterns
-
 logger = logging.getLogger(__name__)
 
 
-class RoomCreateRestServlet(ClientV1RestServlet):
+class TransactionRestServlet(RestServlet):
+    def __init__(self, hs):
+        super(TransactionRestServlet, self).__init__()
+        self.txns = HttpTransactionCache(hs)
+
+
+class RoomCreateRestServlet(TransactionRestServlet):
     # No PATTERN; we have custom dispatch rules here
 
     def __init__(self, hs):
         super(RoomCreateRestServlet, self).__init__(hs)
         self._room_creation_handler = hs.get_room_creation_handler()
+        self.auth = hs.get_auth()
 
     def register(self, http_server):
         PATTERNS = "/createRoom"
         register_txn_path(self, PATTERNS, http_server)
         # define CORS for all of /rooms in RoomCreateRestServlet for simplicity
         http_server.register_paths("OPTIONS",
-                                   client_path_patterns("/rooms(?:/.*)?$"),
+                                   client_patterns("/rooms(?:/.*)?$", v1=True),
                                    self.on_OPTIONS)
         # define CORS for /createRoom[/txnid]
         http_server.register_paths("OPTIONS",
-                                   client_path_patterns("/createRoom(?:/.*)?$"),
+                                   client_patterns("/createRoom(?:/.*)?$", v1=True),
                                    self.on_OPTIONS)
 
     def on_PUT(self, request, txn_id):
@@ -85,13 +93,14 @@ class RoomCreateRestServlet(ClientV1RestServlet):
 
 
 # TODO: Needs unit testing for generic events
-class RoomStateEventRestServlet(ClientV1RestServlet):
+class RoomStateEventRestServlet(TransactionRestServlet):
     def __init__(self, hs):
         super(RoomStateEventRestServlet, self).__init__(hs)
         self.handlers = hs.get_handlers()
         self.event_creation_handler = hs.get_event_creation_handler()
         self.room_member_handler = hs.get_room_member_handler()
         self.message_handler = hs.get_message_handler()
+        self.auth = hs.get_auth()
 
     def register(self, http_server):
         # /room/$roomid/state/$eventtype
@@ -102,16 +111,16 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
                      "(?P<event_type>[^/]*)/(?P<state_key>[^/]*)$")
 
         http_server.register_paths("GET",
-                                   client_path_patterns(state_key),
+                                   client_patterns(state_key, v1=True),
                                    self.on_GET)
         http_server.register_paths("PUT",
-                                   client_path_patterns(state_key),
+                                   client_patterns(state_key, v1=True),
                                    self.on_PUT)
         http_server.register_paths("GET",
-                                   client_path_patterns(no_state_key),
+                                   client_patterns(no_state_key, v1=True),
                                    self.on_GET_no_state_key)
         http_server.register_paths("PUT",
-                                   client_path_patterns(no_state_key),
+                                   client_patterns(no_state_key, v1=True),
                                    self.on_PUT_no_state_key)
 
     def on_GET_no_state_key(self, request, room_id, event_type):
@@ -185,11 +194,12 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
 
 
 # TODO: Needs unit testing for generic events + feedback
-class RoomSendEventRestServlet(ClientV1RestServlet):
+class RoomSendEventRestServlet(TransactionRestServlet):
 
     def __init__(self, hs):
         super(RoomSendEventRestServlet, self).__init__(hs)
         self.event_creation_handler = hs.get_event_creation_handler()
+        self.auth = hs.get_auth()
 
     def register(self, http_server):
         # /rooms/$roomid/send/$event_type[/$txn_id]
@@ -229,10 +239,11 @@ class RoomSendEventRestServlet(ClientV1RestServlet):
 
 
 # TODO: Needs unit testing for room ID + alias joins
-class JoinRoomAliasServlet(ClientV1RestServlet):
+class JoinRoomAliasServlet(TransactionRestServlet):
     def __init__(self, hs):
         super(JoinRoomAliasServlet, self).__init__(hs)
         self.room_member_handler = hs.get_room_member_handler()
+        self.auth = hs.get_auth()
 
     def register(self, http_server):
         # /join/$room_identifier[/$txn_id]
@@ -291,8 +302,13 @@ class JoinRoomAliasServlet(ClientV1RestServlet):
 
 
 # TODO: Needs unit testing
-class PublicRoomListRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/publicRooms$")
+class PublicRoomListRestServlet(TransactionRestServlet):
+    PATTERNS = client_patterns("/publicRooms$", v1=True)
+
+    def __init__(self, hs):
+        super(PublicRoomListRestServlet, self).__init__(hs)
+        self.hs = hs
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
@@ -301,6 +317,12 @@ class PublicRoomListRestServlet(ClientV1RestServlet):
         try:
             yield self.auth.get_user_by_req(request, allow_guest=True)
         except AuthError as e:
+            # Option to allow servers to require auth when accessing
+            # /publicRooms via CS API. This is especially helpful in private
+            # federations.
+            if self.hs.config.restrict_public_rooms_to_local_users:
+                raise
+
             # We allow people to not be authed if they're just looking at our
             # room list, but require auth when we proxy the request.
             # In both cases we call the auth function, as that has the side
@@ -376,12 +398,13 @@ class PublicRoomListRestServlet(ClientV1RestServlet):
 
 
 # TODO: Needs unit testing
-class RoomMemberListRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/rooms/(?P<room_id>[^/]*)/members$")
+class RoomMemberListRestServlet(RestServlet):
+    PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/members$", v1=True)
 
     def __init__(self, hs):
-        super(RoomMemberListRestServlet, self).__init__(hs)
+        super(RoomMemberListRestServlet, self).__init__()
         self.message_handler = hs.get_message_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id):
@@ -430,12 +453,13 @@ class RoomMemberListRestServlet(ClientV1RestServlet):
 
 # deprecated in favour of /members?membership=join?
 # except it does custom AS logic and has a simpler return format
-class JoinedRoomMemberListRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/rooms/(?P<room_id>[^/]*)/joined_members$")
+class JoinedRoomMemberListRestServlet(RestServlet):
+    PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/joined_members$", v1=True)
 
     def __init__(self, hs):
-        super(JoinedRoomMemberListRestServlet, self).__init__(hs)
+        super(JoinedRoomMemberListRestServlet, self).__init__()
         self.message_handler = hs.get_message_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id):
@@ -451,12 +475,13 @@ class JoinedRoomMemberListRestServlet(ClientV1RestServlet):
 
 
 # TODO: Needs better unit testing
-class RoomMessageListRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/rooms/(?P<room_id>[^/]*)/messages$")
+class RoomMessageListRestServlet(RestServlet):
+    PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/messages$", v1=True)
 
     def __init__(self, hs):
-        super(RoomMessageListRestServlet, self).__init__(hs)
+        super(RoomMessageListRestServlet, self).__init__()
         self.pagination_handler = hs.get_pagination_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id):
@@ -469,6 +494,8 @@ class RoomMessageListRestServlet(ClientV1RestServlet):
         if filter_bytes:
             filter_json = urlparse.unquote(filter_bytes.decode("UTF-8"))
             event_filter = Filter(json.loads(filter_json))
+            if event_filter.filter_json.get("event_format", "client") == "federation":
+                as_client_event = False
         else:
             event_filter = None
         msgs = yield self.pagination_handler.get_messages(
@@ -483,12 +510,13 @@ class RoomMessageListRestServlet(ClientV1RestServlet):
 
 
 # TODO: Needs unit testing
-class RoomStateRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/rooms/(?P<room_id>[^/]*)/state$")
+class RoomStateRestServlet(RestServlet):
+    PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/state$", v1=True)
 
     def __init__(self, hs):
-        super(RoomStateRestServlet, self).__init__(hs)
+        super(RoomStateRestServlet, self).__init__()
         self.message_handler = hs.get_message_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id):
@@ -503,12 +531,13 @@ class RoomStateRestServlet(ClientV1RestServlet):
 
 
 # TODO: Needs unit testing
-class RoomInitialSyncRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/rooms/(?P<room_id>[^/]*)/initialSync$")
+class RoomInitialSyncRestServlet(RestServlet):
+    PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/initialSync$", v1=True)
 
     def __init__(self, hs):
-        super(RoomInitialSyncRestServlet, self).__init__(hs)
+        super(RoomInitialSyncRestServlet, self).__init__()
         self.initial_sync_handler = hs.get_initial_sync_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id):
@@ -522,15 +551,17 @@ class RoomInitialSyncRestServlet(ClientV1RestServlet):
         defer.returnValue((200, content))
 
 
-class RoomEventServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns(
-        "/rooms/(?P<room_id>[^/]*)/event/(?P<event_id>[^/]*)$"
+class RoomEventServlet(RestServlet):
+    PATTERNS = client_patterns(
+        "/rooms/(?P<room_id>[^/]*)/event/(?P<event_id>[^/]*)$", v1=True
     )
 
     def __init__(self, hs):
-        super(RoomEventServlet, self).__init__(hs)
+        super(RoomEventServlet, self).__init__()
         self.clock = hs.get_clock()
         self.event_handler = hs.get_event_handler()
+        self._event_serializer = hs.get_event_client_serializer()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id, event_id):
@@ -539,20 +570,23 @@ class RoomEventServlet(ClientV1RestServlet):
 
         time_now = self.clock.time_msec()
         if event:
-            defer.returnValue((200, serialize_event(event, time_now)))
+            event = yield self._event_serializer.serialize_event(event, time_now)
+            defer.returnValue((200, event))
         else:
             defer.returnValue((404, "Event not found."))
 
 
-class RoomEventContextServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns(
-        "/rooms/(?P<room_id>[^/]*)/context/(?P<event_id>[^/]*)$"
+class RoomEventContextServlet(RestServlet):
+    PATTERNS = client_patterns(
+        "/rooms/(?P<room_id>[^/]*)/context/(?P<event_id>[^/]*)$", v1=True
     )
 
     def __init__(self, hs):
-        super(RoomEventContextServlet, self).__init__(hs)
+        super(RoomEventContextServlet, self).__init__()
         self.clock = hs.get_clock()
         self.room_context_handler = hs.get_room_context_handler()
+        self._event_serializer = hs.get_event_client_serializer()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request, room_id, event_id):
@@ -582,24 +616,27 @@ class RoomEventContextServlet(ClientV1RestServlet):
             )
 
         time_now = self.clock.time_msec()
-        results["events_before"] = [
-            serialize_event(event, time_now) for event in results["events_before"]
-        ]
-        results["event"] = serialize_event(results["event"], time_now)
-        results["events_after"] = [
-            serialize_event(event, time_now) for event in results["events_after"]
-        ]
-        results["state"] = [
-            serialize_event(event, time_now) for event in results["state"]
-        ]
+        results["events_before"] = yield self._event_serializer.serialize_events(
+            results["events_before"], time_now,
+        )
+        results["event"] = yield self._event_serializer.serialize_event(
+            results["event"], time_now,
+        )
+        results["events_after"] = yield self._event_serializer.serialize_events(
+            results["events_after"], time_now,
+        )
+        results["state"] = yield self._event_serializer.serialize_events(
+            results["state"], time_now,
+        )
 
         defer.returnValue((200, results))
 
 
-class RoomForgetRestServlet(ClientV1RestServlet):
+class RoomForgetRestServlet(TransactionRestServlet):
     def __init__(self, hs):
         super(RoomForgetRestServlet, self).__init__(hs)
         self.room_member_handler = hs.get_room_member_handler()
+        self.auth = hs.get_auth()
 
     def register(self, http_server):
         PATTERNS = ("/rooms/(?P<room_id>[^/]*)/forget")
@@ -626,11 +663,12 @@ class RoomForgetRestServlet(ClientV1RestServlet):
 
 
 # TODO: Needs unit testing
-class RoomMembershipRestServlet(ClientV1RestServlet):
+class RoomMembershipRestServlet(TransactionRestServlet):
 
     def __init__(self, hs):
         super(RoomMembershipRestServlet, self).__init__(hs)
         self.room_member_handler = hs.get_room_member_handler()
+        self.auth = hs.get_auth()
 
     def register(self, http_server):
         # /rooms/$roomid/[invite|join|leave]
@@ -709,11 +747,12 @@ class RoomMembershipRestServlet(ClientV1RestServlet):
         )
 
 
-class RoomRedactEventRestServlet(ClientV1RestServlet):
+class RoomRedactEventRestServlet(TransactionRestServlet):
     def __init__(self, hs):
         super(RoomRedactEventRestServlet, self).__init__(hs)
         self.handlers = hs.get_handlers()
         self.event_creation_handler = hs.get_event_creation_handler()
+        self.auth = hs.get_auth()
 
     def register(self, http_server):
         PATTERNS = ("/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)")
@@ -744,15 +783,16 @@ class RoomRedactEventRestServlet(ClientV1RestServlet):
         )
 
 
-class RoomTypingRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns(
-        "/rooms/(?P<room_id>[^/]*)/typing/(?P<user_id>[^/]*)$"
+class RoomTypingRestServlet(RestServlet):
+    PATTERNS = client_patterns(
+        "/rooms/(?P<room_id>[^/]*)/typing/(?P<user_id>[^/]*)$", v1=True
     )
 
     def __init__(self, hs):
-        super(RoomTypingRestServlet, self).__init__(hs)
+        super(RoomTypingRestServlet, self).__init__()
         self.presence_handler = hs.get_presence_handler()
         self.typing_handler = hs.get_typing_handler()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_PUT(self, request, room_id, user_id):
@@ -785,14 +825,13 @@ class RoomTypingRestServlet(ClientV1RestServlet):
         defer.returnValue((200, {}))
 
 
-class SearchRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns(
-        "/search$"
-    )
+class SearchRestServlet(RestServlet):
+    PATTERNS = client_patterns("/search$", v1=True)
 
     def __init__(self, hs):
-        super(SearchRestServlet, self).__init__(hs)
+        super(SearchRestServlet, self).__init__()
         self.handlers = hs.get_handlers()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_POST(self, request):
@@ -810,12 +849,13 @@ class SearchRestServlet(ClientV1RestServlet):
         defer.returnValue((200, results))
 
 
-class JoinedRoomsRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/joined_rooms$")
+class JoinedRoomsRestServlet(RestServlet):
+    PATTERNS = client_patterns("/joined_rooms$", v1=True)
 
     def __init__(self, hs):
-        super(JoinedRoomsRestServlet, self).__init__(hs)
+        super(JoinedRoomsRestServlet, self).__init__()
         self.store = hs.get_datastore()
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
@@ -840,18 +880,18 @@ def register_txn_path(servlet, regex_string, http_server, with_get=False):
     """
     http_server.register_paths(
         "POST",
-        client_path_patterns(regex_string + "$"),
+        client_patterns(regex_string + "$", v1=True),
         servlet.on_POST
     )
     http_server.register_paths(
         "PUT",
-        client_path_patterns(regex_string + "/(?P<txn_id>[^/]*)$"),
+        client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
         servlet.on_PUT
     )
     if with_get:
         http_server.register_paths(
             "GET",
-            client_path_patterns(regex_string + "/(?P<txn_id>[^/]*)$"),
+            client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
             servlet.on_GET
         )
 
diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py
index 53da905eea..6381049210 100644
--- a/synapse/rest/client/v1/voip.py
+++ b/synapse/rest/client/v1/voip.py
@@ -19,11 +19,17 @@ import hmac
 
 from twisted.internet import defer
 
-from .base import ClientV1RestServlet, client_path_patterns
+from synapse.http.servlet import RestServlet
+from synapse.rest.client.v2_alpha._base import client_patterns
 
 
-class VoipRestServlet(ClientV1RestServlet):
-    PATTERNS = client_path_patterns("/voip/turnServer$")
+class VoipRestServlet(RestServlet):
+    PATTERNS = client_patterns("/voip/turnServer$", v1=True)
+
+    def __init__(self, hs):
+        super(VoipRestServlet, self).__init__()
+        self.hs = hs
+        self.auth = hs.get_auth()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py
index 77434937ff..5236d5d566 100644
--- a/synapse/rest/client/v2_alpha/_base.py
+++ b/synapse/rest/client/v2_alpha/_base.py
@@ -21,14 +21,12 @@ import re
 from twisted.internet import defer
 
 from synapse.api.errors import InteractiveAuthIncompleteError
-from synapse.api.urls import CLIENT_V2_ALPHA_PREFIX
+from synapse.api.urls import CLIENT_API_PREFIX
 
 logger = logging.getLogger(__name__)
 
 
-def client_v2_patterns(path_regex, releases=(0,),
-                       v2_alpha=True,
-                       unstable=True):
+def client_patterns(path_regex, releases=(0,), unstable=True, v1=False):
     """Creates a regex compiled client path with the correct client path
     prefix.
 
@@ -39,13 +37,14 @@ def client_v2_patterns(path_regex, releases=(0,),
         SRE_Pattern
     """
     patterns = []
-    if v2_alpha:
-        patterns.append(re.compile("^" + CLIENT_V2_ALPHA_PREFIX + path_regex))
     if unstable:
-        unstable_prefix = CLIENT_V2_ALPHA_PREFIX.replace("/v2_alpha", "/unstable")
+        unstable_prefix = CLIENT_API_PREFIX + "/unstable"
         patterns.append(re.compile("^" + unstable_prefix + path_regex))
+    if v1:
+        v1_prefix = CLIENT_API_PREFIX + "/api/v1"
+        patterns.append(re.compile("^" + v1_prefix + path_regex))
     for release in releases:
-        new_prefix = CLIENT_V2_ALPHA_PREFIX.replace("/v2_alpha", "/r%d" % release)
+        new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,)
         patterns.append(re.compile("^" + new_prefix + path_regex))
     return patterns
 
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index ee069179f0..ab75f6c2b2 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -18,40 +18,76 @@ import logging
 
 from six.moves import http_client
 
+import jinja2
+
 from twisted.internet import defer
 
 from synapse.api.constants import LoginType
-from synapse.api.errors import Codes, SynapseError
+from synapse.api.errors import Codes, SynapseError, ThreepidValidationError
+from synapse.http.server import finish_request
 from synapse.http.servlet import (
     RestServlet,
     assert_params_in_dict,
     parse_json_object_from_request,
+    parse_string,
 )
 from synapse.util.msisdn import phone_number_to_msisdn
+from synapse.util.stringutils import random_string
 from synapse.util.threepids import check_3pid_allowed
 
-from ._base import client_v2_patterns, interactive_auth_handler
+from ._base import client_patterns, interactive_auth_handler
 
 logger = logging.getLogger(__name__)
 
 
 class EmailPasswordRequestTokenRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/account/password/email/requestToken$")
+    PATTERNS = client_patterns("/account/password/email/requestToken$")
 
     def __init__(self, hs):
         super(EmailPasswordRequestTokenRestServlet, self).__init__()
         self.hs = hs
+        self.datastore = hs.get_datastore()
+        self.config = hs.config
         self.identity_handler = hs.get_handlers().identity_handler
 
+        if self.config.email_password_reset_behaviour == "local":
+            from synapse.push.mailer import Mailer, load_jinja2_templates
+            templates = load_jinja2_templates(
+                config=hs.config,
+                template_html_name=hs.config.email_password_reset_template_html,
+                template_text_name=hs.config.email_password_reset_template_text,
+            )
+            self.mailer = Mailer(
+                hs=self.hs,
+                app_name=self.config.email_app_name,
+                template_html=templates[0],
+                template_text=templates[1],
+            )
+
     @defer.inlineCallbacks
     def on_POST(self, request):
+        if self.config.email_password_reset_behaviour == "off":
+            if self.config.password_resets_were_disabled_due_to_email_config:
+                logger.warn(
+                    "User password resets have been disabled due to lack of email config"
+                )
+            raise SynapseError(
+                400, "Email-based password resets have been disabled on this server",
+            )
+
         body = parse_json_object_from_request(request)
 
         assert_params_in_dict(body, [
-            'id_server', 'client_secret', 'email', 'send_attempt'
+            'client_secret', 'email', 'send_attempt'
         ])
 
-        if not check_3pid_allowed(self.hs, "email", body['email']):
+        # Extract params from body
+        client_secret = body["client_secret"]
+        email = body["email"]
+        send_attempt = body["send_attempt"]
+        next_link = body.get("next_link")  # Optional param
+
+        if not check_3pid_allowed(self.hs, "email", email):
             raise SynapseError(
                 403,
                 "Your email domain is not authorized on this server",
@@ -59,18 +95,103 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
             )
 
         existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
-            'email', body['email']
+            'email', email,
         )
 
         if existingUid is None:
             raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
 
-        ret = yield self.identity_handler.requestEmailToken(**body)
+        if self.config.email_password_reset_behaviour == "remote":
+            if 'id_server' not in body:
+                raise SynapseError(400, "Missing 'id_server' param in body")
+
+            # Have the identity server handle the password reset flow
+            ret = yield self.identity_handler.requestEmailToken(
+                body["id_server"], email, client_secret, send_attempt, next_link,
+            )
+        else:
+            # Send password reset emails from Synapse
+            sid = yield self.send_password_reset(
+                email, client_secret, send_attempt, next_link,
+            )
+
+            # Wrap the session id in a JSON object
+            ret = {"sid": sid}
+
         defer.returnValue((200, ret))
 
+    @defer.inlineCallbacks
+    def send_password_reset(
+        self,
+        email,
+        client_secret,
+        send_attempt,
+        next_link=None,
+    ):
+        """Send a password reset email
+
+        Args:
+            email (str): The user's email address
+            client_secret (str): The provided client secret
+            send_attempt (int): Which send attempt this is
+
+        Returns:
+            The new session_id upon success
+
+        Raises:
+            SynapseError is an error occurred when sending the email
+        """
+        # Check that this email/client_secret/send_attempt combo is new or
+        # greater than what we've seen previously
+        session = yield self.datastore.get_threepid_validation_session(
+            "email", client_secret, address=email, validated=False,
+        )
+
+        # Check to see if a session already exists and that it is not yet
+        # marked as validated
+        if session and session.get("validated_at") is None:
+            session_id = session['session_id']
+            last_send_attempt = session['last_send_attempt']
+
+            # Check that the send_attempt is higher than previous attempts
+            if send_attempt <= last_send_attempt:
+                # If not, just return a success without sending an email
+                defer.returnValue(session_id)
+        else:
+            # An non-validated session does not exist yet.
+            # Generate a session id
+            session_id = random_string(16)
+
+        # Generate a new validation token
+        token = random_string(32)
+
+        # Send the mail with the link containing the token, client_secret
+        # and session_id
+        try:
+            yield self.mailer.send_password_reset_mail(
+                email, token, client_secret, session_id,
+            )
+        except Exception:
+            logger.exception(
+                "Error sending a password reset email to %s", email,
+            )
+            raise SynapseError(
+                500, "An error was encountered when sending the password reset email"
+            )
+
+        token_expires = (self.hs.clock.time_msec() +
+                         self.config.email_validation_token_lifetime)
+
+        yield self.datastore.start_or_continue_validation_session(
+            "email", email, session_id, client_secret, send_attempt,
+            next_link, token, token_expires,
+        )
+
+        defer.returnValue(session_id)
+
 
 class MsisdnPasswordRequestTokenRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/account/password/msisdn/requestToken$")
+    PATTERNS = client_patterns("/account/password/msisdn/requestToken$")
 
     def __init__(self, hs):
         super(MsisdnPasswordRequestTokenRestServlet, self).__init__()
@@ -107,8 +228,130 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
         defer.returnValue((200, ret))
 
 
+class PasswordResetSubmitTokenServlet(RestServlet):
+    """Handles 3PID validation token submission"""
+    PATTERNS = client_patterns(
+        "/password_reset/(?P<medium>[^/]*)/submit_token/*$",
+        releases=(),
+        unstable=True,
+    )
+
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        super(PasswordResetSubmitTokenServlet, self).__init__()
+        self.hs = hs
+        self.auth = hs.get_auth()
+        self.config = hs.config
+        self.clock = hs.get_clock()
+        self.datastore = hs.get_datastore()
+
+    @defer.inlineCallbacks
+    def on_GET(self, request, medium):
+        if medium != "email":
+            raise SynapseError(
+                400,
+                "This medium is currently not supported for password resets",
+            )
+        if self.config.email_password_reset_behaviour == "off":
+            if self.config.password_resets_were_disabled_due_to_email_config:
+                logger.warn(
+                    "User password resets have been disabled due to lack of email config"
+                )
+            raise SynapseError(
+                400, "Email-based password resets have been disabled on this server",
+            )
+
+        sid = parse_string(request, "sid")
+        client_secret = parse_string(request, "client_secret")
+        token = parse_string(request, "token")
+
+        # Attempt to validate a 3PID sesssion
+        try:
+            # Mark the session as valid
+            next_link = yield self.datastore.validate_threepid_session(
+                sid,
+                client_secret,
+                token,
+                self.clock.time_msec(),
+            )
+
+            # Perform a 302 redirect if next_link is set
+            if next_link:
+                if next_link.startswith("file:///"):
+                    logger.warn(
+                        "Not redirecting to next_link as it is a local file: address"
+                    )
+                else:
+                    request.setResponseCode(302)
+                    request.setHeader("Location", next_link)
+                    finish_request(request)
+                    defer.returnValue(None)
+
+            # Otherwise show the success template
+            html = self.config.email_password_reset_success_html_content
+            request.setResponseCode(200)
+        except ThreepidValidationError as e:
+            # Show a failure page with a reason
+            html = self.load_jinja2_template(
+                self.config.email_template_dir,
+                self.config.email_password_reset_failure_template,
+                template_vars={
+                    "failure_reason": e.msg,
+                }
+            )
+            request.setResponseCode(e.code)
+
+        request.write(html.encode('utf-8'))
+        finish_request(request)
+        defer.returnValue(None)
+
+    def load_jinja2_template(self, template_dir, template_filename, template_vars):
+        """Loads a jinja2 template with variables to insert
+
+        Args:
+            template_dir (str): The directory where templates are stored
+            template_filename (str): The name of the template in the template_dir
+            template_vars (Dict): Dictionary of keys in the template
+                alongside their values to insert
+
+        Returns:
+            str containing the contents of the rendered template
+        """
+        loader = jinja2.FileSystemLoader(template_dir)
+        env = jinja2.Environment(loader=loader)
+
+        template = env.get_template(template_filename)
+        return template.render(**template_vars)
+
+    @defer.inlineCallbacks
+    def on_POST(self, request, medium):
+        if medium != "email":
+            raise SynapseError(
+                400,
+                "This medium is currently not supported for password resets",
+            )
+
+        body = parse_json_object_from_request(request)
+        assert_params_in_dict(body, [
+            'sid', 'client_secret', 'token',
+        ])
+
+        valid, _ = yield self.datastore.validate_threepid_validation_token(
+            body['sid'],
+            body['client_secret'],
+            body['token'],
+            self.clock.time_msec(),
+        )
+        response_code = 200 if valid else 400
+
+        defer.returnValue((response_code, {"success": valid}))
+
+
 class PasswordRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/account/password$")
+    PATTERNS = client_patterns("/account/password$")
 
     def __init__(self, hs):
         super(PasswordRestServlet, self).__init__()
@@ -144,6 +387,7 @@ class PasswordRestServlet(RestServlet):
             result, params, _ = yield self.auth_handler.check_auth(
                 [[LoginType.EMAIL_IDENTITY], [LoginType.MSISDN]],
                 body, self.hs.get_ip_from_request(request),
+                password_servlet=True,
             )
 
             if LoginType.EMAIL_IDENTITY in result:
@@ -180,7 +424,7 @@ class PasswordRestServlet(RestServlet):
 
 
 class DeactivateAccountRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/account/deactivate$")
+    PATTERNS = client_patterns("/account/deactivate$")
 
     def __init__(self, hs):
         super(DeactivateAccountRestServlet, self).__init__()
@@ -228,7 +472,7 @@ class DeactivateAccountRestServlet(RestServlet):
 
 
 class EmailThreepidRequestTokenRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/account/3pid/email/requestToken$")
+    PATTERNS = client_patterns("/account/3pid/email/requestToken$")
 
     def __init__(self, hs):
         self.hs = hs
@@ -263,7 +507,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
 
 
 class MsisdnThreepidRequestTokenRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/account/3pid/msisdn/requestToken$")
+    PATTERNS = client_patterns("/account/3pid/msisdn/requestToken$")
 
     def __init__(self, hs):
         self.hs = hs
@@ -300,7 +544,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
 
 
 class ThreepidRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/account/3pid$")
+    PATTERNS = client_patterns("/account/3pid$")
 
     def __init__(self, hs):
         super(ThreepidRestServlet, self).__init__()
@@ -364,7 +608,7 @@ class ThreepidRestServlet(RestServlet):
 
 
 class ThreepidDeleteRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/account/3pid/delete$")
+    PATTERNS = client_patterns("/account/3pid/delete$")
 
     def __init__(self, hs):
         super(ThreepidDeleteRestServlet, self).__init__()
@@ -401,7 +645,7 @@ class ThreepidDeleteRestServlet(RestServlet):
 
 
 class WhoamiRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/account/whoami$")
+    PATTERNS = client_patterns("/account/whoami$")
 
     def __init__(self, hs):
         super(WhoamiRestServlet, self).__init__()
@@ -417,6 +661,7 @@ class WhoamiRestServlet(RestServlet):
 def register_servlets(hs, http_server):
     EmailPasswordRequestTokenRestServlet(hs).register(http_server)
     MsisdnPasswordRequestTokenRestServlet(hs).register(http_server)
+    PasswordResetSubmitTokenServlet(hs).register(http_server)
     PasswordRestServlet(hs).register(http_server)
     DeactivateAccountRestServlet(hs).register(http_server)
     EmailThreepidRequestTokenRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py
index f171b8d626..574a6298ce 100644
--- a/synapse/rest/client/v2_alpha/account_data.py
+++ b/synapse/rest/client/v2_alpha/account_data.py
@@ -20,7 +20,7 @@ from twisted.internet import defer
 from synapse.api.errors import AuthError, NotFoundError, SynapseError
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -30,7 +30,7 @@ class AccountDataServlet(RestServlet):
     PUT /user/{user_id}/account_data/{account_dataType} HTTP/1.1
     GET /user/{user_id}/account_data/{account_dataType} HTTP/1.1
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/user/(?P<user_id>[^/]*)/account_data/(?P<account_data_type>[^/]*)"
     )
 
@@ -79,7 +79,7 @@ class RoomAccountDataServlet(RestServlet):
     PUT /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1
     GET /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/user/(?P<user_id>[^/]*)"
         "/rooms/(?P<room_id>[^/]*)"
         "/account_data/(?P<account_data_type>[^/]*)"
diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py
new file mode 100644
index 0000000000..63bdc33564
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/account_validity.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.errors import AuthError, SynapseError
+from synapse.http.server import finish_request
+from synapse.http.servlet import RestServlet
+
+from ._base import client_patterns
+
+logger = logging.getLogger(__name__)
+
+
+class AccountValidityRenewServlet(RestServlet):
+    PATTERNS = client_patterns("/account_validity/renew$")
+    SUCCESS_HTML = b"<html><body>Your account has been successfully renewed.</body><html>"
+
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        super(AccountValidityRenewServlet, self).__init__()
+
+        self.hs = hs
+        self.account_activity_handler = hs.get_account_validity_handler()
+        self.auth = hs.get_auth()
+
+    @defer.inlineCallbacks
+    def on_GET(self, request):
+        if b"token" not in request.args:
+            raise SynapseError(400, "Missing renewal token")
+        renewal_token = request.args[b"token"][0]
+
+        yield self.account_activity_handler.renew_account(renewal_token.decode('utf8'))
+
+        request.setResponseCode(200)
+        request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
+        request.setHeader(b"Content-Length", b"%d" % (
+            len(AccountValidityRenewServlet.SUCCESS_HTML),
+        ))
+        request.write(AccountValidityRenewServlet.SUCCESS_HTML)
+        finish_request(request)
+        defer.returnValue(None)
+
+
+class AccountValiditySendMailServlet(RestServlet):
+    PATTERNS = client_patterns("/account_validity/send_mail$")
+
+    def __init__(self, hs):
+        """
+        Args:
+            hs (synapse.server.HomeServer): server
+        """
+        super(AccountValiditySendMailServlet, self).__init__()
+
+        self.hs = hs
+        self.account_activity_handler = hs.get_account_validity_handler()
+        self.auth = hs.get_auth()
+        self.account_validity = self.hs.config.account_validity
+
+    @defer.inlineCallbacks
+    def on_POST(self, request):
+        if not self.account_validity.renew_by_email_enabled:
+            raise AuthError(403, "Account renewal via email is disabled on this server.")
+
+        requester = yield self.auth.get_user_by_req(request, allow_expired=True)
+        user_id = requester.user.to_string()
+        yield self.account_activity_handler.send_renewal_email_to_user(user_id)
+
+        defer.returnValue((200, {}))
+
+
+def register_servlets(hs, http_server):
+    AccountValidityRenewServlet(hs).register(http_server)
+    AccountValiditySendMailServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
index ac035c7735..8dfe5cba02 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -19,11 +19,11 @@ from twisted.internet import defer
 
 from synapse.api.constants import LoginType
 from synapse.api.errors import SynapseError
-from synapse.api.urls import CLIENT_V2_ALPHA_PREFIX
+from synapse.api.urls import CLIENT_API_PREFIX
 from synapse.http.server import finish_request
 from synapse.http.servlet import RestServlet, parse_string
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -122,7 +122,7 @@ class AuthRestServlet(RestServlet):
     cannot be handled in the normal flow (with requests to the same endpoint).
     Current use is for web fallback auth.
     """
-    PATTERNS = client_v2_patterns(r"/auth/(?P<stagetype>[\w\.]*)/fallback/web")
+    PATTERNS = client_patterns(r"/auth/(?P<stagetype>[\w\.]*)/fallback/web")
 
     def __init__(self, hs):
         super(AuthRestServlet, self).__init__()
@@ -139,8 +139,8 @@ class AuthRestServlet(RestServlet):
         if stagetype == LoginType.RECAPTCHA:
             html = RECAPTCHA_TEMPLATE % {
                 'session': session,
-                'myurl': "%s/auth/%s/fallback/web" % (
-                    CLIENT_V2_ALPHA_PREFIX, LoginType.RECAPTCHA
+                'myurl': "%s/r0/auth/%s/fallback/web" % (
+                    CLIENT_API_PREFIX, LoginType.RECAPTCHA
                 ),
                 'sitekey': self.hs.config.recaptcha_public_key,
             }
@@ -159,8 +159,8 @@ class AuthRestServlet(RestServlet):
                     self.hs.config.public_baseurl,
                     self.hs.config.user_consent_version,
                 ),
-                'myurl': "%s/auth/%s/fallback/web" % (
-                    CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS
+                'myurl': "%s/r0/auth/%s/fallback/web" % (
+                    CLIENT_API_PREFIX, LoginType.TERMS
                 ),
             }
             html_bytes = html.encode("utf8")
@@ -203,8 +203,8 @@ class AuthRestServlet(RestServlet):
             else:
                 html = RECAPTCHA_TEMPLATE % {
                     'session': session,
-                    'myurl': "%s/auth/%s/fallback/web" % (
-                        CLIENT_V2_ALPHA_PREFIX, LoginType.RECAPTCHA
+                    'myurl': "%s/r0/auth/%s/fallback/web" % (
+                        CLIENT_API_PREFIX, LoginType.RECAPTCHA
                     ),
                     'sitekey': self.hs.config.recaptcha_public_key,
                 }
@@ -240,8 +240,8 @@ class AuthRestServlet(RestServlet):
                         self.hs.config.public_baseurl,
                         self.hs.config.user_consent_version,
                     ),
-                    'myurl': "%s/auth/%s/fallback/web" % (
-                        CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS
+                    'myurl': "%s/r0/auth/%s/fallback/web" % (
+                        CLIENT_API_PREFIX, LoginType.TERMS
                     ),
                 }
             html_bytes = html.encode("utf8")
diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py
index a868d06098..fc7e2f4dd5 100644
--- a/synapse/rest/client/v2_alpha/capabilities.py
+++ b/synapse/rest/client/v2_alpha/capabilities.py
@@ -16,10 +16,10 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.http.servlet import RestServlet
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -27,7 +27,7 @@ logger = logging.getLogger(__name__)
 class CapabilitiesRestServlet(RestServlet):
     """End point to expose the capabilities of the server."""
 
-    PATTERNS = client_v2_patterns("/capabilities$")
+    PATTERNS = client_patterns("/capabilities$")
 
     def __init__(self, hs):
         """
@@ -36,6 +36,7 @@ class CapabilitiesRestServlet(RestServlet):
         """
         super(CapabilitiesRestServlet, self).__init__()
         self.hs = hs
+        self.config = hs.config
         self.auth = hs.get_auth()
         self.store = hs.get_datastore()
 
@@ -48,7 +49,7 @@ class CapabilitiesRestServlet(RestServlet):
         response = {
             "capabilities": {
                 "m.room_versions": {
-                    "default": DEFAULT_ROOM_VERSION.identifier,
+                    "default": self.config.default_room_version.identifier,
                     "available": {
                         v.identifier: v.disposition
                         for v in KNOWN_ROOM_VERSIONS.values()
diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py
index 9b75bb1377..78665304a5 100644
--- a/synapse/rest/client/v2_alpha/devices.py
+++ b/synapse/rest/client/v2_alpha/devices.py
@@ -24,13 +24,13 @@ from synapse.http.servlet import (
     parse_json_object_from_request,
 )
 
-from ._base import client_v2_patterns, interactive_auth_handler
+from ._base import client_patterns, interactive_auth_handler
 
 logger = logging.getLogger(__name__)
 
 
 class DevicesRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/devices$", v2_alpha=False)
+    PATTERNS = client_patterns("/devices$")
 
     def __init__(self, hs):
         """
@@ -56,7 +56,7 @@ class DeleteDevicesRestServlet(RestServlet):
     API for bulk deletion of devices. Accepts a JSON object with a devices
     key which lists the device_ids to delete. Requires user interactive auth.
     """
-    PATTERNS = client_v2_patterns("/delete_devices", v2_alpha=False)
+    PATTERNS = client_patterns("/delete_devices")
 
     def __init__(self, hs):
         super(DeleteDevicesRestServlet, self).__init__()
@@ -95,7 +95,7 @@ class DeleteDevicesRestServlet(RestServlet):
 
 
 class DeviceRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/devices/(?P<device_id>[^/]*)$", v2_alpha=False)
+    PATTERNS = client_patterns("/devices/(?P<device_id>[^/]*)$")
 
     def __init__(self, hs):
         """
diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py
index ae86728879..65db48c3cc 100644
--- a/synapse/rest/client/v2_alpha/filter.py
+++ b/synapse/rest/client/v2_alpha/filter.py
@@ -21,13 +21,13 @@ from synapse.api.errors import AuthError, Codes, StoreError, SynapseError
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.types import UserID
 
-from ._base import client_v2_patterns, set_timeline_upper_limit
+from ._base import client_patterns, set_timeline_upper_limit
 
 logger = logging.getLogger(__name__)
 
 
 class GetFilterRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/user/(?P<user_id>[^/]*)/filter/(?P<filter_id>[^/]*)")
+    PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/filter/(?P<filter_id>[^/]*)")
 
     def __init__(self, hs):
         super(GetFilterRestServlet, self).__init__()
@@ -63,7 +63,7 @@ class GetFilterRestServlet(RestServlet):
 
 
 class CreateFilterRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/user/(?P<user_id>[^/]*)/filter")
+    PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/filter")
 
     def __init__(self, hs):
         super(CreateFilterRestServlet, self).__init__()
diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py
index 21e02c07c0..d082385ec7 100644
--- a/synapse/rest/client/v2_alpha/groups.py
+++ b/synapse/rest/client/v2_alpha/groups.py
@@ -21,7 +21,7 @@ from twisted.internet import defer
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.types import GroupID
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -29,7 +29,7 @@ logger = logging.getLogger(__name__)
 class GroupServlet(RestServlet):
     """Get the group profile
     """
-    PATTERNS = client_v2_patterns("/groups/(?P<group_id>[^/]*)/profile$")
+    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/profile$")
 
     def __init__(self, hs):
         super(GroupServlet, self).__init__()
@@ -65,7 +65,7 @@ class GroupServlet(RestServlet):
 class GroupSummaryServlet(RestServlet):
     """Get the full group summary
     """
-    PATTERNS = client_v2_patterns("/groups/(?P<group_id>[^/]*)/summary$")
+    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/summary$")
 
     def __init__(self, hs):
         super(GroupSummaryServlet, self).__init__()
@@ -93,7 +93,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
         - /groups/:group/summary/rooms/:room_id
         - /groups/:group/summary/categories/:category/rooms/:room_id
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/summary"
         "(/categories/(?P<category_id>[^/]+))?"
         "/rooms/(?P<room_id>[^/]*)$"
@@ -137,7 +137,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
 class GroupCategoryServlet(RestServlet):
     """Get/add/update/delete a group category
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)$"
     )
 
@@ -189,7 +189,7 @@ class GroupCategoryServlet(RestServlet):
 class GroupCategoriesServlet(RestServlet):
     """Get all group categories
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/categories/$"
     )
 
@@ -214,7 +214,7 @@ class GroupCategoriesServlet(RestServlet):
 class GroupRoleServlet(RestServlet):
     """Get/add/update/delete a group role
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)$"
     )
 
@@ -266,7 +266,7 @@ class GroupRoleServlet(RestServlet):
 class GroupRolesServlet(RestServlet):
     """Get all group roles
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/roles/$"
     )
 
@@ -295,7 +295,7 @@ class GroupSummaryUsersRoleServlet(RestServlet):
         - /groups/:group/summary/users/:room_id
         - /groups/:group/summary/roles/:role/users/:user_id
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/summary"
         "(/roles/(?P<role_id>[^/]+))?"
         "/users/(?P<user_id>[^/]*)$"
@@ -339,7 +339,7 @@ class GroupSummaryUsersRoleServlet(RestServlet):
 class GroupRoomServlet(RestServlet):
     """Get all rooms in a group
     """
-    PATTERNS = client_v2_patterns("/groups/(?P<group_id>[^/]*)/rooms$")
+    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/rooms$")
 
     def __init__(self, hs):
         super(GroupRoomServlet, self).__init__()
@@ -360,7 +360,7 @@ class GroupRoomServlet(RestServlet):
 class GroupUsersServlet(RestServlet):
     """Get all users in a group
     """
-    PATTERNS = client_v2_patterns("/groups/(?P<group_id>[^/]*)/users$")
+    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/users$")
 
     def __init__(self, hs):
         super(GroupUsersServlet, self).__init__()
@@ -381,7 +381,7 @@ class GroupUsersServlet(RestServlet):
 class GroupInvitedUsersServlet(RestServlet):
     """Get users invited to a group
     """
-    PATTERNS = client_v2_patterns("/groups/(?P<group_id>[^/]*)/invited_users$")
+    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/invited_users$")
 
     def __init__(self, hs):
         super(GroupInvitedUsersServlet, self).__init__()
@@ -405,7 +405,7 @@ class GroupInvitedUsersServlet(RestServlet):
 class GroupSettingJoinPolicyServlet(RestServlet):
     """Set group join policy
     """
-    PATTERNS = client_v2_patterns("/groups/(?P<group_id>[^/]*)/settings/m.join_policy$")
+    PATTERNS = client_patterns("/groups/(?P<group_id>[^/]*)/settings/m.join_policy$")
 
     def __init__(self, hs):
         super(GroupSettingJoinPolicyServlet, self).__init__()
@@ -431,7 +431,7 @@ class GroupSettingJoinPolicyServlet(RestServlet):
 class GroupCreateServlet(RestServlet):
     """Create a group
     """
-    PATTERNS = client_v2_patterns("/create_group$")
+    PATTERNS = client_patterns("/create_group$")
 
     def __init__(self, hs):
         super(GroupCreateServlet, self).__init__()
@@ -462,7 +462,7 @@ class GroupCreateServlet(RestServlet):
 class GroupAdminRoomsServlet(RestServlet):
     """Add a room to the group
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/admin/rooms/(?P<room_id>[^/]*)$"
     )
 
@@ -499,7 +499,7 @@ class GroupAdminRoomsServlet(RestServlet):
 class GroupAdminRoomsConfigServlet(RestServlet):
     """Update the config of a room in a group
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/admin/rooms/(?P<room_id>[^/]*)"
         "/config/(?P<config_key>[^/]*)$"
     )
@@ -526,7 +526,7 @@ class GroupAdminRoomsConfigServlet(RestServlet):
 class GroupAdminUsersInviteServlet(RestServlet):
     """Invite a user to the group
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/admin/users/invite/(?P<user_id>[^/]*)$"
     )
 
@@ -555,7 +555,7 @@ class GroupAdminUsersInviteServlet(RestServlet):
 class GroupAdminUsersKickServlet(RestServlet):
     """Kick a user from the group
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/admin/users/remove/(?P<user_id>[^/]*)$"
     )
 
@@ -581,7 +581,7 @@ class GroupAdminUsersKickServlet(RestServlet):
 class GroupSelfLeaveServlet(RestServlet):
     """Leave a joined group
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/self/leave$"
     )
 
@@ -607,7 +607,7 @@ class GroupSelfLeaveServlet(RestServlet):
 class GroupSelfJoinServlet(RestServlet):
     """Attempt to join a group, or knock
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/self/join$"
     )
 
@@ -633,7 +633,7 @@ class GroupSelfJoinServlet(RestServlet):
 class GroupSelfAcceptInviteServlet(RestServlet):
     """Accept a group invite
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/self/accept_invite$"
     )
 
@@ -659,7 +659,7 @@ class GroupSelfAcceptInviteServlet(RestServlet):
 class GroupSelfUpdatePublicityServlet(RestServlet):
     """Update whether we publicise a users membership of a group
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/groups/(?P<group_id>[^/]*)/self/update_publicity$"
     )
 
@@ -686,7 +686,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet):
 class PublicisedGroupsForUserServlet(RestServlet):
     """Get the list of groups a user is advertising
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/publicised_groups/(?P<user_id>[^/]*)$"
     )
 
@@ -711,7 +711,7 @@ class PublicisedGroupsForUserServlet(RestServlet):
 class PublicisedGroupsForUsersServlet(RestServlet):
     """Get the list of groups a user is advertising
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/publicised_groups$"
     )
 
@@ -739,7 +739,7 @@ class PublicisedGroupsForUsersServlet(RestServlet):
 class GroupsForUserServlet(RestServlet):
     """Get all groups the logged in user is joined to
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/joined_groups$"
     )
 
diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py
index 8486086b51..4cbfbf5631 100644
--- a/synapse/rest/client/v2_alpha/keys.py
+++ b/synapse/rest/client/v2_alpha/keys.py
@@ -26,7 +26,7 @@ from synapse.http.servlet import (
 )
 from synapse.types import StreamToken
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -56,7 +56,7 @@ class KeyUploadServlet(RestServlet):
       },
     }
     """
-    PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
+    PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
 
     def __init__(self, hs):
         """
@@ -130,7 +130,7 @@ class KeyQueryServlet(RestServlet):
     } } } } } }
     """
 
-    PATTERNS = client_v2_patterns("/keys/query$")
+    PATTERNS = client_patterns("/keys/query$")
 
     def __init__(self, hs):
         """
@@ -159,7 +159,7 @@ class KeyChangesServlet(RestServlet):
         200 OK
         { "changed": ["@foo:example.com"] }
     """
-    PATTERNS = client_v2_patterns("/keys/changes$")
+    PATTERNS = client_patterns("/keys/changes$")
 
     def __init__(self, hs):
         """
@@ -209,7 +209,7 @@ class OneTimeKeyServlet(RestServlet):
     } } } }
 
     """
-    PATTERNS = client_v2_patterns("/keys/claim$")
+    PATTERNS = client_patterns("/keys/claim$")
 
     def __init__(self, hs):
         super(OneTimeKeyServlet, self).__init__()
diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py
index 2a6ea3df5f..53e666989b 100644
--- a/synapse/rest/client/v2_alpha/notifications.py
+++ b/synapse/rest/client/v2_alpha/notifications.py
@@ -17,25 +17,23 @@ import logging
 
 from twisted.internet import defer
 
-from synapse.events.utils import (
-    format_event_for_client_v2_without_room_id,
-    serialize_event,
-)
+from synapse.events.utils import format_event_for_client_v2_without_room_id
 from synapse.http.servlet import RestServlet, parse_integer, parse_string
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
 
 class NotificationsServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/notifications$")
+    PATTERNS = client_patterns("/notifications$")
 
     def __init__(self, hs):
         super(NotificationsServlet, self).__init__()
         self.store = hs.get_datastore()
         self.auth = hs.get_auth()
         self.clock = hs.get_clock()
+        self._event_serializer = hs.get_event_client_serializer()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
@@ -69,11 +67,11 @@ class NotificationsServlet(RestServlet):
                 "profile_tag": pa["profile_tag"],
                 "actions": pa["actions"],
                 "ts": pa["received_ts"],
-                "event": serialize_event(
+                "event": (yield self._event_serializer.serialize_event(
                     notif_events[pa["event_id"]],
                     self.clock.time_msec(),
                     event_format=format_event_for_client_v2_without_room_id,
-                ),
+                )),
             }
 
             if pa["room_id"] not in receipts_by_room:
diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py
index 01c90aa2a3..bb927d9f9d 100644
--- a/synapse/rest/client/v2_alpha/openid.py
+++ b/synapse/rest/client/v2_alpha/openid.py
@@ -22,7 +22,7 @@ from synapse.api.errors import AuthError
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.util.stringutils import random_string
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -56,7 +56,7 @@ class IdTokenServlet(RestServlet):
         "expires_in": 3600,
     }
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/user/(?P<user_id>[^/]*)/openid/request_token"
     )
 
diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py
index a6e582a5ae..f4bd0d077f 100644
--- a/synapse/rest/client/v2_alpha/read_marker.py
+++ b/synapse/rest/client/v2_alpha/read_marker.py
@@ -19,13 +19,13 @@ from twisted.internet import defer
 
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
 
 class ReadMarkerRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/rooms/(?P<room_id>[^/]*)/read_markers$")
+    PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/read_markers$")
 
     def __init__(self, hs):
         super(ReadMarkerRestServlet, self).__init__()
diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py
index de370cac45..fa12ac3e4d 100644
--- a/synapse/rest/client/v2_alpha/receipts.py
+++ b/synapse/rest/client/v2_alpha/receipts.py
@@ -20,13 +20,13 @@ from twisted.internet import defer
 from synapse.api.errors import SynapseError
 from synapse.http.servlet import RestServlet
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
 
 class ReceiptRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/rooms/(?P<room_id>[^/]*)"
         "/receipt/(?P<receipt_type>[^/]*)"
         "/(?P<event_id>[^/]*)$"
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 6d235262c8..79c085408b 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -31,6 +31,7 @@ from synapse.api.errors import (
     SynapseError,
     UnrecognizedRequestError,
 )
+from synapse.config.ratelimiting import FederationRateLimitConfig
 from synapse.config.server import is_threepid_reserved
 from synapse.http.servlet import (
     RestServlet,
@@ -42,7 +43,7 @@ from synapse.util.msisdn import phone_number_to_msisdn
 from synapse.util.ratelimitutils import FederationRateLimiter
 from synapse.util.threepids import check_3pid_allowed
 
-from ._base import client_v2_patterns, interactive_auth_handler
+from ._base import client_patterns, interactive_auth_handler
 
 # We ought to be using hmac.compare_digest() but on older pythons it doesn't
 # exist. It's a _really minor_ security flaw to use plain string comparison
@@ -59,7 +60,7 @@ logger = logging.getLogger(__name__)
 
 
 class EmailRegisterRequestTokenRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/register/email/requestToken$")
+    PATTERNS = client_patterns("/register/email/requestToken$")
 
     def __init__(self, hs):
         """
@@ -97,7 +98,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
 
 
 class MsisdnRegisterRequestTokenRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/register/msisdn/requestToken$")
+    PATTERNS = client_patterns("/register/msisdn/requestToken$")
 
     def __init__(self, hs):
         """
@@ -141,7 +142,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
 
 
 class UsernameAvailabilityRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/register/available")
+    PATTERNS = client_patterns("/register/available")
 
     def __init__(self, hs):
         """
@@ -153,16 +154,18 @@ class UsernameAvailabilityRestServlet(RestServlet):
         self.registration_handler = hs.get_registration_handler()
         self.ratelimiter = FederationRateLimiter(
             hs.get_clock(),
-            # Time window of 2s
-            window_size=2000,
-            # Artificially delay requests if rate > sleep_limit/window_size
-            sleep_limit=1,
-            # Amount of artificial delay to apply
-            sleep_msec=1000,
-            # Error with 429 if more than reject_limit requests are queued
-            reject_limit=1,
-            # Allow 1 request at a time
-            concurrent_requests=1,
+            FederationRateLimitConfig(
+                # Time window of 2s
+                window_size=2000,
+                # Artificially delay requests if rate > sleep_limit/window_size
+                sleep_limit=1,
+                # Amount of artificial delay to apply
+                sleep_msec=1000,
+                # Error with 429 if more than reject_limit requests are queued
+                reject_limit=1,
+                # Allow 1 request at a time
+                concurrent_requests=1,
+            )
         )
 
     @defer.inlineCallbacks
@@ -179,7 +182,7 @@ class UsernameAvailabilityRestServlet(RestServlet):
 
 
 class RegisterRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/register$")
+    PATTERNS = client_patterns("/register$")
 
     def __init__(self, hs):
         """
@@ -345,18 +348,22 @@ class RegisterRestServlet(RestServlet):
         if self.hs.config.enable_registration_captcha:
             # only support 3PIDless registration if no 3PIDs are required
             if not require_email and not require_msisdn:
-                flows.extend([[LoginType.RECAPTCHA]])
+                # Also add a dummy flow here, otherwise if a client completes
+                # recaptcha first we'll assume they were going for this flow
+                # and complete the request, when they could have been trying to
+                # complete one of the flows with email/msisdn auth.
+                flows.extend([[LoginType.RECAPTCHA, LoginType.DUMMY]])
             # only support the email-only flow if we don't require MSISDN 3PIDs
             if not require_msisdn:
-                flows.extend([[LoginType.EMAIL_IDENTITY, LoginType.RECAPTCHA]])
+                flows.extend([[LoginType.RECAPTCHA, LoginType.EMAIL_IDENTITY]])
 
             if show_msisdn:
                 # only support the MSISDN-only flow if we don't require email 3PIDs
                 if not require_email:
-                    flows.extend([[LoginType.MSISDN, LoginType.RECAPTCHA]])
+                    flows.extend([[LoginType.RECAPTCHA, LoginType.MSISDN]])
                 # always let users provide both MSISDN & email
                 flows.extend([
-                    [LoginType.MSISDN, LoginType.EMAIL_IDENTITY, LoginType.RECAPTCHA],
+                    [LoginType.RECAPTCHA, LoginType.MSISDN, LoginType.EMAIL_IDENTITY],
                 ])
         else:
             # only support 3PIDless registration if no 3PIDs are required
@@ -379,7 +386,15 @@ class RegisterRestServlet(RestServlet):
         if self.hs.config.user_consent_at_registration:
             new_flows = []
             for flow in flows:
-                flow.append(LoginType.TERMS)
+                inserted = False
+                # m.login.terms should go near the end but before msisdn or email auth
+                for i, stage in enumerate(flow):
+                    if stage == LoginType.EMAIL_IDENTITY or stage == LoginType.MSISDN:
+                        flow.insert(i, LoginType.TERMS)
+                        inserted = True
+                        break
+                if not inserted:
+                    flow.append(LoginType.TERMS)
             flows.extend(new_flows)
 
         auth_result, params, session_id = yield self.auth_handler.check_auth(
@@ -428,6 +443,28 @@ class RegisterRestServlet(RestServlet):
             if auth_result:
                 threepid = auth_result.get(LoginType.EMAIL_IDENTITY)
 
+                # Also check that we're not trying to register a 3pid that's already
+                # been registered.
+                #
+                # This has probably happened in /register/email/requestToken as well,
+                # but if a user hits this endpoint twice then clicks on each link from
+                # the two activation emails, they would register the same 3pid twice.
+                for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]:
+                    if login_type in auth_result:
+                        medium = auth_result[login_type]['medium']
+                        address = auth_result[login_type]['address']
+
+                        existingUid = yield self.store.get_user_id_by_threepid(
+                            medium, address,
+                        )
+
+                        if existingUid is not None:
+                            raise SynapseError(
+                                400,
+                                "%s is already in use" % medium,
+                                Codes.THREEPID_IN_USE,
+                            )
+
             (registered_user_id, _) = yield self.registration_handler.register(
                 localpart=desired_username,
                 password=new_password,
diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py
new file mode 100644
index 0000000000..f8f8742bdc
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/relations.py
@@ -0,0 +1,338 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This class implements the proposed relation APIs from MSC 1849.
+
+Since the MSC has not been approved all APIs here are unstable and may change at
+any time to reflect changes in the MSC.
+"""
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, RelationTypes
+from synapse.api.errors import SynapseError
+from synapse.http.servlet import (
+    RestServlet,
+    parse_integer,
+    parse_json_object_from_request,
+    parse_string,
+)
+from synapse.rest.client.transactions import HttpTransactionCache
+from synapse.storage.relations import AggregationPaginationToken, RelationPaginationToken
+
+from ._base import client_patterns
+
+logger = logging.getLogger(__name__)
+
+
+class RelationSendServlet(RestServlet):
+    """Helper API for sending events that have relation data.
+
+    Example API shape to send a 👍 reaction to a room:
+
+        POST /rooms/!foo/send_relation/$bar/m.annotation/m.reaction?key=%F0%9F%91%8D
+        {}
+
+        {
+            "event_id": "$foobar"
+        }
+    """
+
+    PATTERN = (
+        "/rooms/(?P<room_id>[^/]*)/send_relation"
+        "/(?P<parent_id>[^/]*)/(?P<relation_type>[^/]*)/(?P<event_type>[^/]*)"
+    )
+
+    def __init__(self, hs):
+        super(RelationSendServlet, self).__init__()
+        self.auth = hs.get_auth()
+        self.event_creation_handler = hs.get_event_creation_handler()
+        self.txns = HttpTransactionCache(hs)
+
+    def register(self, http_server):
+        http_server.register_paths(
+            "POST",
+            client_patterns(self.PATTERN + "$", releases=()),
+            self.on_PUT_or_POST,
+        )
+        http_server.register_paths(
+            "PUT",
+            client_patterns(self.PATTERN + "/(?P<txn_id>[^/]*)$", releases=()),
+            self.on_PUT,
+        )
+
+    def on_PUT(self, request, *args, **kwargs):
+        return self.txns.fetch_or_execute_request(
+            request, self.on_PUT_or_POST, request, *args, **kwargs
+        )
+
+    @defer.inlineCallbacks
+    def on_PUT_or_POST(
+        self, request, room_id, parent_id, relation_type, event_type, txn_id=None
+    ):
+        requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+
+        if event_type == EventTypes.Member:
+            # Add relations to a membership is meaningless, so we just deny it
+            # at the CS API rather than trying to handle it correctly.
+            raise SynapseError(400, "Cannot send member events with relations")
+
+        content = parse_json_object_from_request(request)
+
+        aggregation_key = parse_string(request, "key", encoding="utf-8")
+
+        content["m.relates_to"] = {
+            "event_id": parent_id,
+            "key": aggregation_key,
+            "rel_type": relation_type,
+        }
+
+        event_dict = {
+            "type": event_type,
+            "content": content,
+            "room_id": room_id,
+            "sender": requester.user.to_string(),
+        }
+
+        event = yield self.event_creation_handler.create_and_send_nonmember_event(
+            requester, event_dict=event_dict, txn_id=txn_id
+        )
+
+        defer.returnValue((200, {"event_id": event.event_id}))
+
+
+class RelationPaginationServlet(RestServlet):
+    """API to paginate relations on an event by topological ordering, optionally
+    filtered by relation type and event type.
+    """
+
+    PATTERNS = client_patterns(
+        "/rooms/(?P<room_id>[^/]*)/relations/(?P<parent_id>[^/]*)"
+        "(/(?P<relation_type>[^/]*)(/(?P<event_type>[^/]*))?)?$",
+        releases=(),
+    )
+
+    def __init__(self, hs):
+        super(RelationPaginationServlet, self).__init__()
+        self.auth = hs.get_auth()
+        self.store = hs.get_datastore()
+        self.clock = hs.get_clock()
+        self._event_serializer = hs.get_event_client_serializer()
+        self.event_handler = hs.get_event_handler()
+
+    @defer.inlineCallbacks
+    def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=None):
+        requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+
+        yield self.auth.check_in_room_or_world_readable(
+            room_id, requester.user.to_string()
+        )
+
+        # This checks that a) the event exists and b) the user is allowed to
+        # view it.
+        yield self.event_handler.get_event(requester.user, room_id, parent_id)
+
+        limit = parse_integer(request, "limit", default=5)
+        from_token = parse_string(request, "from")
+        to_token = parse_string(request, "to")
+
+        if from_token:
+            from_token = RelationPaginationToken.from_string(from_token)
+
+        if to_token:
+            to_token = RelationPaginationToken.from_string(to_token)
+
+        result = yield self.store.get_relations_for_event(
+            event_id=parent_id,
+            relation_type=relation_type,
+            event_type=event_type,
+            limit=limit,
+            from_token=from_token,
+            to_token=to_token,
+        )
+
+        events = yield self.store.get_events_as_list(
+            [c["event_id"] for c in result.chunk]
+        )
+
+        now = self.clock.time_msec()
+        events = yield self._event_serializer.serialize_events(events, now)
+
+        return_value = result.to_dict()
+        return_value["chunk"] = events
+
+        defer.returnValue((200, return_value))
+
+
+class RelationAggregationPaginationServlet(RestServlet):
+    """API to paginate aggregation groups of relations, e.g. paginate the
+    types and counts of the reactions on the events.
+
+    Example request and response:
+
+        GET /rooms/{room_id}/aggregations/{parent_id}
+
+        {
+            chunk: [
+                {
+                    "type": "m.reaction",
+                    "key": "👍",
+                    "count": 3
+                }
+            ]
+        }
+    """
+
+    PATTERNS = client_patterns(
+        "/rooms/(?P<room_id>[^/]*)/aggregations/(?P<parent_id>[^/]*)"
+        "(/(?P<relation_type>[^/]*)(/(?P<event_type>[^/]*))?)?$",
+        releases=(),
+    )
+
+    def __init__(self, hs):
+        super(RelationAggregationPaginationServlet, self).__init__()
+        self.auth = hs.get_auth()
+        self.store = hs.get_datastore()
+        self.event_handler = hs.get_event_handler()
+
+    @defer.inlineCallbacks
+    def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=None):
+        requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+
+        yield self.auth.check_in_room_or_world_readable(
+            room_id, requester.user.to_string()
+        )
+
+        # This checks that a) the event exists and b) the user is allowed to
+        # view it.
+        yield self.event_handler.get_event(requester.user, room_id, parent_id)
+
+        if relation_type not in (RelationTypes.ANNOTATION, None):
+            raise SynapseError(400, "Relation type must be 'annotation'")
+
+        limit = parse_integer(request, "limit", default=5)
+        from_token = parse_string(request, "from")
+        to_token = parse_string(request, "to")
+
+        if from_token:
+            from_token = AggregationPaginationToken.from_string(from_token)
+
+        if to_token:
+            to_token = AggregationPaginationToken.from_string(to_token)
+
+        res = yield self.store.get_aggregation_groups_for_event(
+            event_id=parent_id,
+            event_type=event_type,
+            limit=limit,
+            from_token=from_token,
+            to_token=to_token,
+        )
+
+        defer.returnValue((200, res.to_dict()))
+
+
+class RelationAggregationGroupPaginationServlet(RestServlet):
+    """API to paginate within an aggregation group of relations, e.g. paginate
+    all the 👍 reactions on an event.
+
+    Example request and response:
+
+        GET /rooms/{room_id}/aggregations/{parent_id}/m.annotation/m.reaction/👍
+
+        {
+            chunk: [
+                {
+                    "type": "m.reaction",
+                    "content": {
+                        "m.relates_to": {
+                            "rel_type": "m.annotation",
+                            "key": "👍"
+                        }
+                    }
+                },
+                ...
+            ]
+        }
+    """
+
+    PATTERNS = client_patterns(
+        "/rooms/(?P<room_id>[^/]*)/aggregations/(?P<parent_id>[^/]*)"
+        "/(?P<relation_type>[^/]*)/(?P<event_type>[^/]*)/(?P<key>[^/]*)$",
+        releases=(),
+    )
+
+    def __init__(self, hs):
+        super(RelationAggregationGroupPaginationServlet, self).__init__()
+        self.auth = hs.get_auth()
+        self.store = hs.get_datastore()
+        self.clock = hs.get_clock()
+        self._event_serializer = hs.get_event_client_serializer()
+        self.event_handler = hs.get_event_handler()
+
+    @defer.inlineCallbacks
+    def on_GET(self, request, room_id, parent_id, relation_type, event_type, key):
+        requester = yield self.auth.get_user_by_req(request, allow_guest=True)
+
+        yield self.auth.check_in_room_or_world_readable(
+            room_id, requester.user.to_string()
+        )
+
+        # This checks that a) the event exists and b) the user is allowed to
+        # view it.
+        yield self.event_handler.get_event(requester.user, room_id, parent_id)
+
+        if relation_type != RelationTypes.ANNOTATION:
+            raise SynapseError(400, "Relation type must be 'annotation'")
+
+        limit = parse_integer(request, "limit", default=5)
+        from_token = parse_string(request, "from")
+        to_token = parse_string(request, "to")
+
+        if from_token:
+            from_token = RelationPaginationToken.from_string(from_token)
+
+        if to_token:
+            to_token = RelationPaginationToken.from_string(to_token)
+
+        result = yield self.store.get_relations_for_event(
+            event_id=parent_id,
+            relation_type=relation_type,
+            event_type=event_type,
+            aggregation_key=key,
+            limit=limit,
+            from_token=from_token,
+            to_token=to_token,
+        )
+
+        events = yield self.store.get_events_as_list(
+            [c["event_id"] for c in result.chunk]
+        )
+
+        now = self.clock.time_msec()
+        events = yield self._event_serializer.serialize_events(events, now)
+
+        return_value = result.to_dict()
+        return_value["chunk"] = events
+
+        defer.returnValue((200, return_value))
+
+
+def register_servlets(hs, http_server):
+    RelationSendServlet(hs).register(http_server)
+    RelationPaginationServlet(hs).register(http_server)
+    RelationAggregationPaginationServlet(hs).register(http_server)
+    RelationAggregationGroupPaginationServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py
index 95d2a71ec2..10198662a9 100644
--- a/synapse/rest/client/v2_alpha/report_event.py
+++ b/synapse/rest/client/v2_alpha/report_event.py
@@ -27,13 +27,13 @@ from synapse.http.servlet import (
     parse_json_object_from_request,
 )
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
 
 class ReportEventRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/rooms/(?P<room_id>[^/]*)/report/(?P<event_id>[^/]*)$"
     )
 
diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py
index 220a0de30b..87779645f9 100644
--- a/synapse/rest/client/v2_alpha/room_keys.py
+++ b/synapse/rest/client/v2_alpha/room_keys.py
@@ -24,13 +24,13 @@ from synapse.http.servlet import (
     parse_string,
 )
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
 
 class RoomKeysServlet(RestServlet):
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/room_keys/keys(/(?P<room_id>[^/]+))?(/(?P<session_id>[^/]+))?$"
     )
 
@@ -256,7 +256,7 @@ class RoomKeysServlet(RestServlet):
 
 
 class RoomKeysNewVersionServlet(RestServlet):
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/room_keys/version$"
     )
 
@@ -314,7 +314,7 @@ class RoomKeysNewVersionServlet(RestServlet):
 
 
 class RoomKeysVersionServlet(RestServlet):
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/room_keys/version(/(?P<version>[^/]+))?$"
     )
 
diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
index 3db7ff8d1b..c621a90fba 100644
--- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
+++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
@@ -25,7 +25,7 @@ from synapse.http.servlet import (
     parse_json_object_from_request,
 )
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -47,10 +47,9 @@ class RoomUpgradeRestServlet(RestServlet):
     Args:
         hs (synapse.server.HomeServer):
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         # /rooms/$roomid/upgrade
         "/rooms/(?P<room_id>[^/]*)/upgrade$",
-        v2_alpha=False,
     )
 
     def __init__(self, hs):
diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py
index a9e9a47a0b..120a713361 100644
--- a/synapse/rest/client/v2_alpha/sendtodevice.py
+++ b/synapse/rest/client/v2_alpha/sendtodevice.py
@@ -21,15 +21,14 @@ from synapse.http import servlet
 from synapse.http.servlet import parse_json_object_from_request
 from synapse.rest.client.transactions import HttpTransactionCache
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
 
 class SendToDeviceRestServlet(servlet.RestServlet):
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/sendToDevice/(?P<message_type>[^/]*)/(?P<txn_id>[^/]*)$",
-        v2_alpha=False
     )
 
     def __init__(self, hs):
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index 39d157a44b..148fc6c985 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -26,14 +26,13 @@ from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection
 from synapse.events.utils import (
     format_event_for_client_v2_without_room_id,
     format_event_raw,
-    serialize_event,
 )
 from synapse.handlers.presence import format_user_presence_state
 from synapse.handlers.sync import SyncConfig
 from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
 from synapse.types import StreamToken
 
-from ._base import client_v2_patterns, set_timeline_upper_limit
+from ._base import client_patterns, set_timeline_upper_limit
 
 logger = logging.getLogger(__name__)
 
@@ -74,7 +73,7 @@ class SyncRestServlet(RestServlet):
         }
     """
 
-    PATTERNS = client_v2_patterns("/sync$")
+    PATTERNS = client_patterns("/sync$")
     ALLOWED_PRESENCE = set(["online", "offline", "unavailable"])
 
     def __init__(self, hs):
@@ -86,6 +85,7 @@ class SyncRestServlet(RestServlet):
         self.filtering = hs.get_filtering()
         self.presence_handler = hs.get_presence_handler()
         self._server_notices_sender = hs.get_server_notices_sender()
+        self._event_serializer = hs.get_event_client_serializer()
 
     @defer.inlineCallbacks
     def on_GET(self, request):
@@ -168,14 +168,14 @@ class SyncRestServlet(RestServlet):
             )
 
         time_now = self.clock.time_msec()
-        response_content = self.encode_response(
+        response_content = yield self.encode_response(
             time_now, sync_result, requester.access_token_id, filter
         )
 
         defer.returnValue((200, response_content))
 
-    @staticmethod
-    def encode_response(time_now, sync_result, access_token_id, filter):
+    @defer.inlineCallbacks
+    def encode_response(self, time_now, sync_result, access_token_id, filter):
         if filter.event_format == 'client':
             event_formatter = format_event_for_client_v2_without_room_id
         elif filter.event_format == 'federation':
@@ -183,24 +183,24 @@ class SyncRestServlet(RestServlet):
         else:
             raise Exception("Unknown event format %s" % (filter.event_format, ))
 
-        joined = SyncRestServlet.encode_joined(
+        joined = yield self.encode_joined(
             sync_result.joined, time_now, access_token_id,
             filter.event_fields,
             event_formatter,
         )
 
-        invited = SyncRestServlet.encode_invited(
+        invited = yield self.encode_invited(
             sync_result.invited, time_now, access_token_id,
             event_formatter,
         )
 
-        archived = SyncRestServlet.encode_archived(
+        archived = yield self.encode_archived(
             sync_result.archived, time_now, access_token_id,
             filter.event_fields,
             event_formatter,
         )
 
-        return {
+        defer.returnValue({
             "account_data": {"events": sync_result.account_data},
             "to_device": {"events": sync_result.to_device},
             "device_lists": {
@@ -222,7 +222,7 @@ class SyncRestServlet(RestServlet):
             },
             "device_one_time_keys_count": sync_result.device_one_time_keys_count,
             "next_batch": sync_result.next_batch.to_string(),
-        }
+        })
 
     @staticmethod
     def encode_presence(events, time_now):
@@ -239,8 +239,8 @@ class SyncRestServlet(RestServlet):
             ]
         }
 
-    @staticmethod
-    def encode_joined(rooms, time_now, token_id, event_fields, event_formatter):
+    @defer.inlineCallbacks
+    def encode_joined(self, rooms, time_now, token_id, event_fields, event_formatter):
         """
         Encode the joined rooms in a sync result
 
@@ -261,15 +261,15 @@ class SyncRestServlet(RestServlet):
         """
         joined = {}
         for room in rooms:
-            joined[room.room_id] = SyncRestServlet.encode_room(
+            joined[room.room_id] = yield self.encode_room(
                 room, time_now, token_id, joined=True, only_fields=event_fields,
                 event_formatter=event_formatter,
             )
 
-        return joined
+        defer.returnValue(joined)
 
-    @staticmethod
-    def encode_invited(rooms, time_now, token_id, event_formatter):
+    @defer.inlineCallbacks
+    def encode_invited(self, rooms, time_now, token_id, event_formatter):
         """
         Encode the invited rooms in a sync result
 
@@ -289,7 +289,7 @@ class SyncRestServlet(RestServlet):
         """
         invited = {}
         for room in rooms:
-            invite = serialize_event(
+            invite = yield self._event_serializer.serialize_event(
                 room.invite, time_now, token_id=token_id,
                 event_format=event_formatter,
                 is_invite=True,
@@ -302,10 +302,10 @@ class SyncRestServlet(RestServlet):
                 "invite_state": {"events": invited_state}
             }
 
-        return invited
+        defer.returnValue(invited)
 
-    @staticmethod
-    def encode_archived(rooms, time_now, token_id, event_fields, event_formatter):
+    @defer.inlineCallbacks
+    def encode_archived(self, rooms, time_now, token_id, event_fields, event_formatter):
         """
         Encode the archived rooms in a sync result
 
@@ -326,17 +326,17 @@ class SyncRestServlet(RestServlet):
         """
         joined = {}
         for room in rooms:
-            joined[room.room_id] = SyncRestServlet.encode_room(
+            joined[room.room_id] = yield self.encode_room(
                 room, time_now, token_id, joined=False,
                 only_fields=event_fields,
                 event_formatter=event_formatter,
             )
 
-        return joined
+        defer.returnValue(joined)
 
-    @staticmethod
+    @defer.inlineCallbacks
     def encode_room(
-            room, time_now, token_id, joined,
+            self, room, time_now, token_id, joined,
             only_fields, event_formatter,
     ):
         """
@@ -355,9 +355,13 @@ class SyncRestServlet(RestServlet):
         Returns:
             dict[str, object]: the room, encoded in our response format
         """
-        def serialize(event):
-            return serialize_event(
-                event, time_now, token_id=token_id,
+        def serialize(events):
+            return self._event_serializer.serialize_events(
+                events, time_now=time_now,
+                # We don't bundle "live" events, as otherwise clients
+                # will end up double counting annotations.
+                bundle_aggregations=False,
+                token_id=token_id,
                 event_format=event_formatter,
                 only_event_fields=only_fields,
             )
@@ -376,8 +380,8 @@ class SyncRestServlet(RestServlet):
                     event.event_id, room.room_id, event.room_id,
                 )
 
-        serialized_state = [serialize(e) for e in state_events]
-        serialized_timeline = [serialize(e) for e in timeline_events]
+        serialized_state = yield serialize(state_events)
+        serialized_timeline = yield serialize(timeline_events)
 
         account_data = room.account_data
 
@@ -397,7 +401,7 @@ class SyncRestServlet(RestServlet):
             result["unread_notifications"] = room.unread_notifications
             result["summary"] = room.summary
 
-        return result
+        defer.returnValue(result)
 
 
 def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py
index 4fea614e95..ebff7cff45 100644
--- a/synapse/rest/client/v2_alpha/tags.py
+++ b/synapse/rest/client/v2_alpha/tags.py
@@ -20,7 +20,7 @@ from twisted.internet import defer
 from synapse.api.errors import AuthError
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
@@ -29,7 +29,7 @@ class TagListServlet(RestServlet):
     """
     GET /user/{user_id}/rooms/{room_id}/tags HTTP/1.1
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/user/(?P<user_id>[^/]*)/rooms/(?P<room_id>[^/]*)/tags"
     )
 
@@ -54,7 +54,7 @@ class TagServlet(RestServlet):
     PUT /user/{user_id}/rooms/{room_id}/tags/{tag} HTTP/1.1
     DELETE /user/{user_id}/rooms/{room_id}/tags/{tag} HTTP/1.1
     """
-    PATTERNS = client_v2_patterns(
+    PATTERNS = client_patterns(
         "/user/(?P<user_id>[^/]*)/rooms/(?P<room_id>[^/]*)/tags/(?P<tag>[^/]*)"
     )
 
diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py
index b9b5d07677..e7a987466a 100644
--- a/synapse/rest/client/v2_alpha/thirdparty.py
+++ b/synapse/rest/client/v2_alpha/thirdparty.py
@@ -21,13 +21,13 @@ from twisted.internet import defer
 from synapse.api.constants import ThirdPartyEntityKind
 from synapse.http.servlet import RestServlet
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
 
 class ThirdPartyProtocolsServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/thirdparty/protocols")
+    PATTERNS = client_patterns("/thirdparty/protocols")
 
     def __init__(self, hs):
         super(ThirdPartyProtocolsServlet, self).__init__()
@@ -44,7 +44,7 @@ class ThirdPartyProtocolsServlet(RestServlet):
 
 
 class ThirdPartyProtocolServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/thirdparty/protocol/(?P<protocol>[^/]+)$")
+    PATTERNS = client_patterns("/thirdparty/protocol/(?P<protocol>[^/]+)$")
 
     def __init__(self, hs):
         super(ThirdPartyProtocolServlet, self).__init__()
@@ -66,7 +66,7 @@ class ThirdPartyProtocolServlet(RestServlet):
 
 
 class ThirdPartyUserServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/thirdparty/user(/(?P<protocol>[^/]+))?$")
+    PATTERNS = client_patterns("/thirdparty/user(/(?P<protocol>[^/]+))?$")
 
     def __init__(self, hs):
         super(ThirdPartyUserServlet, self).__init__()
@@ -89,7 +89,7 @@ class ThirdPartyUserServlet(RestServlet):
 
 
 class ThirdPartyLocationServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/thirdparty/location(/(?P<protocol>[^/]+))?$")
+    PATTERNS = client_patterns("/thirdparty/location(/(?P<protocol>[^/]+))?$")
 
     def __init__(self, hs):
         super(ThirdPartyLocationServlet, self).__init__()
diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py
index 6e76b9e9c2..6c366142e1 100644
--- a/synapse/rest/client/v2_alpha/tokenrefresh.py
+++ b/synapse/rest/client/v2_alpha/tokenrefresh.py
@@ -18,7 +18,7 @@ from twisted.internet import defer
 from synapse.api.errors import AuthError
 from synapse.http.servlet import RestServlet
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 
 class TokenRefreshRestServlet(RestServlet):
@@ -26,7 +26,7 @@ class TokenRefreshRestServlet(RestServlet):
     Exchanges refresh tokens for a pair of an access token and a new refresh
     token.
     """
-    PATTERNS = client_v2_patterns("/tokenrefresh")
+    PATTERNS = client_patterns("/tokenrefresh")
 
     def __init__(self, hs):
         super(TokenRefreshRestServlet, self).__init__()
diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py
index 36b02de37f..69e4efc47a 100644
--- a/synapse/rest/client/v2_alpha/user_directory.py
+++ b/synapse/rest/client/v2_alpha/user_directory.py
@@ -20,13 +20,13 @@ from twisted.internet import defer
 from synapse.api.errors import SynapseError
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 
-from ._base import client_v2_patterns
+from ._base import client_patterns
 
 logger = logging.getLogger(__name__)
 
 
 class UserDirectorySearchRestServlet(RestServlet):
-    PATTERNS = client_v2_patterns("/user_directory/search$")
+    PATTERNS = client_patterns("/user_directory/search$")
 
     def __init__(self, hs):
         """
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 27e7cbf3cc..babbf6a23c 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -39,6 +39,7 @@ class VersionsRestServlet(RestServlet):
                 "r0.2.0",
                 "r0.3.0",
                 "r0.4.0",
+                "r0.5.0",
             ],
             # as per MSC1497:
             "unstable_features": {
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index eb8782aa6e..8a730bbc35 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -20,7 +20,7 @@ from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
 from synapse.api.errors import Codes, SynapseError
-from synapse.crypto.keyring import KeyLookupError
+from synapse.crypto.keyring import ServerKeyFetcher
 from synapse.http.server import respond_with_json_bytes, wrap_json_request_handler
 from synapse.http.servlet import parse_integer, parse_json_object_from_request
 
@@ -89,7 +89,7 @@ class RemoteKey(Resource):
     isLeaf = True
 
     def __init__(self, hs):
-        self.keyring = hs.get_keyring()
+        self.fetcher = ServerKeyFetcher(hs)
         self.store = hs.get_datastore()
         self.clock = hs.get_clock()
         self.federation_domain_whitelist = hs.config.federation_domain_whitelist
@@ -215,15 +215,7 @@ class RemoteKey(Resource):
                     json_results.add(bytes(result["key_json"]))
 
         if cache_misses and query_remote_on_cache_miss:
-            for server_name, key_ids in cache_misses.items():
-                try:
-                    yield self.keyring.get_server_verify_key_v2_direct(
-                        server_name, key_ids
-                    )
-                except KeyLookupError as e:
-                    logger.info("Failed to fetch key: %s", e)
-                except Exception:
-                    logger.exception("Failed to get key for %r", server_name)
+            yield self.fetcher.get_keys(cache_misses)
             yield self.query_keys(
                 request, query, query_remote_on_cache_miss=False
             )
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index 953d89bd82..2dcc8f74d6 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2019 New Vector Ltd.
+# Copyright 2019 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -191,6 +191,10 @@ def respond_with_responder(request, responder, media_type, file_size, upload_nam
         # in that case.
         logger.warning("Failed to write to consumer: %s %s", type(e), e)
 
+        # Unregister the producer, if it has one, so Twisted doesn't complain
+        if request.producer:
+            request.unregisterProducer()
+
     finish_request(request)
 
 
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index bdffa97805..a4929dd5db 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -386,8 +386,10 @@ class MediaRepository(object):
                 raise SynapseError(502, "Failed to fetch remote media")
 
             except SynapseError:
-                logger.exception("Failed to fetch remote media %s/%s",
-                                 server_name, media_id)
+                logger.warn(
+                    "Failed to fetch remote media %s/%s",
+                    server_name, media_id,
+                )
                 raise
             except NotRetryingDestination:
                 logger.warn("Not retrying destination %r", server_name)
@@ -444,6 +446,9 @@ class MediaRepository(object):
             )
             return
 
+        if thumbnailer.transpose_method is not None:
+            m_width, m_height = thumbnailer.transpose()
+
         if t_method == "crop":
             t_byte_source = thumbnailer.crop(t_width, t_height, t_type)
         elif t_method == "scale":
@@ -578,6 +583,12 @@ class MediaRepository(object):
             )
             return
 
+        if thumbnailer.transpose_method is not None:
+            m_width, m_height = yield logcontext.defer_to_thread(
+                self.hs.get_reactor(),
+                thumbnailer.transpose
+            )
+
         # We deduplicate the thumbnail sizes by ignoring the cropped versions if
         # they have the same dimensions of a scaled one.
         thumbnails = {}
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index ba3ab1d37d..acf87709f2 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -31,6 +31,7 @@ from six.moves import urllib_parse as urlparse
 from canonicaljson import json
 
 from twisted.internet import defer
+from twisted.internet.error import DNSLookupError
 from twisted.web.resource import Resource
 from twisted.web.server import NOT_DONE_YET
 
@@ -328,9 +329,18 @@ class PreviewUrlResource(Resource):
                 # handler will return a SynapseError to the client instead of
                 # blank data or a 500.
                 raise
+            except DNSLookupError:
+                # DNS lookup returned no results
+                # Note: This will also be the case if one of the resolved IP
+                # addresses is blacklisted
+                raise SynapseError(
+                    502, "DNS resolution failure during URL preview generation",
+                    Codes.UNKNOWN
+                )
             except Exception as e:
                 # FIXME: pass through 404s and other error messages nicely
                 logger.warn("Error downloading %s: %r", url, e)
+
                 raise SynapseError(
                     500, "Failed to download content: %s" % (
                         traceback.format_exception_only(sys.exc_info()[0], e),
diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py
index 5aa03031f6..d90cbfb56a 100644
--- a/synapse/rest/media/v1/storage_provider.py
+++ b/synapse/rest/media/v1/storage_provider.py
@@ -108,6 +108,7 @@ class FileStorageProviderBackend(StorageProvider):
     """
 
     def __init__(self, hs, config):
+        self.hs = hs
         self.cache_directory = hs.config.media_store_path
         self.base_directory = config
 
diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py
index 5305e9175f..35a750923b 100644
--- a/synapse/rest/media/v1/thumbnail_resource.py
+++ b/synapse/rest/media/v1/thumbnail_resource.py
@@ -56,8 +56,8 @@ class ThumbnailResource(Resource):
     def _async_render_GET(self, request):
         set_cors_headers(request)
         server_name, media_id, _ = parse_media_id(request)
-        width = parse_integer(request, "width")
-        height = parse_integer(request, "height")
+        width = parse_integer(request, "width", required=True)
+        height = parse_integer(request, "height", required=True)
         method = parse_string(request, "method", "scale")
         m_type = parse_string(request, "type", "image/png")
 
diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py
index a4b26c2587..3efd0d80fc 100644
--- a/synapse/rest/media/v1/thumbnailer.py
+++ b/synapse/rest/media/v1/thumbnailer.py
@@ -20,6 +20,17 @@ import PIL.Image as Image
 
 logger = logging.getLogger(__name__)
 
+EXIF_ORIENTATION_TAG = 0x0112
+EXIF_TRANSPOSE_MAPPINGS = {
+    2: Image.FLIP_LEFT_RIGHT,
+    3: Image.ROTATE_180,
+    4: Image.FLIP_TOP_BOTTOM,
+    5: Image.TRANSPOSE,
+    6: Image.ROTATE_270,
+    7: Image.TRANSVERSE,
+    8: Image.ROTATE_90
+}
+
 
 class Thumbnailer(object):
 
@@ -31,6 +42,30 @@ class Thumbnailer(object):
     def __init__(self, input_path):
         self.image = Image.open(input_path)
         self.width, self.height = self.image.size
+        self.transpose_method = None
+        try:
+            # We don't use ImageOps.exif_transpose since it crashes with big EXIF
+            image_exif = self.image._getexif()
+            if image_exif is not None:
+                image_orientation = image_exif.get(EXIF_ORIENTATION_TAG)
+                self.transpose_method = EXIF_TRANSPOSE_MAPPINGS.get(image_orientation)
+        except Exception as e:
+            # A lot of parsing errors can happen when parsing EXIF
+            logger.info("Error parsing image EXIF information: %s", e)
+
+    def transpose(self):
+        """Transpose the image using its EXIF Orientation tag
+
+        Returns:
+            Tuple[int, int]: (width, height) containing the new image size in pixels.
+        """
+        if self.transpose_method is not None:
+            self.image = self.image.transpose(self.transpose_method)
+            self.width, self.height = self.image.size
+            self.transpose_method = None
+            # We don't need EXIF any more
+            self.image.info["exif"] = None
+        return self.image.size
 
     def aspect(self, max_width, max_height):
         """Calculate the largest size that preserves aspect ratio which
diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py
index c0a4ae93e5..a7fa4f39af 100644
--- a/synapse/rest/well_known.py
+++ b/synapse/rest/well_known.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright 2018 New Vector Ltd.
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -68,6 +68,6 @@ class WellKnownResource(Resource):
             request.setHeader(b"Content-Type", b"text/plain")
             return b'.well-known not available'
 
-        logger.error("returning: %s", r)
+        logger.debug("returning: %s", r)
         request.setHeader(b"Content-Type", b"application/json")
         return json.dumps(r).encode("utf-8")
diff --git a/synapse/server.py b/synapse/server.py
index dc8f1ccb8c..a54e023cc9 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -1,5 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -35,6 +37,8 @@ from synapse.crypto import context_factory
 from synapse.crypto.keyring import Keyring
 from synapse.events.builder import EventBuilderFactory
 from synapse.events.spamcheck import SpamChecker
+from synapse.events.third_party_rules import ThirdPartyEventRules
+from synapse.events.utils import EventClientSerializer
 from synapse.federation.federation_client import FederationClient
 from synapse.federation.federation_server import (
     FederationHandlerRegistry,
@@ -47,6 +51,7 @@ from synapse.federation.transport.client import TransportLayerClient
 from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer
 from synapse.groups.groups_server import GroupsServerHandler
 from synapse.handlers import Handlers
+from synapse.handlers.account_validity import AccountValidityHandler
 from synapse.handlers.acme import AcmeHandler
 from synapse.handlers.appservice import ApplicationServicesHandler
 from synapse.handlers.auth import AuthHandler, MacaroonGenerator
@@ -70,6 +75,7 @@ from synapse.handlers.room_list import RoomListHandler
 from synapse.handlers.room_member import RoomMemberMasterHandler
 from synapse.handlers.room_member_worker import RoomMemberWorkerHandler
 from synapse.handlers.set_password import SetPasswordHandler
+from synapse.handlers.stats import StatsHandler
 from synapse.handlers.sync import SyncHandler
 from synapse.handlers.typing import TypingHandler
 from synapse.handlers.user_directory import UserDirectoryHandler
@@ -137,6 +143,7 @@ class HomeServer(object):
         'acme_handler',
         'auth_handler',
         'device_handler',
+        'stats_handler',
         'e2e_keys_handler',
         'e2e_room_keys_handler',
         'event_handler',
@@ -174,6 +181,7 @@ class HomeServer(object):
         'groups_attestation_renewer',
         'secrets',
         'spam_checker',
+        'third_party_event_rules',
         'room_member_handler',
         'federation_registry',
         'server_notices_manager',
@@ -183,10 +191,13 @@ class HomeServer(object):
         'room_context_handler',
         'sendmail',
         'registration_handler',
+        'account_validity_handler',
+        'event_client_serializer',
     ]
 
     REQUIRED_ON_MASTER_STARTUP = [
         "user_directory_handler",
+        "stats_handler"
     ]
 
     # This is overridden in derived application classes
@@ -470,9 +481,15 @@ class HomeServer(object):
     def build_secrets(self):
         return Secrets()
 
+    def build_stats_handler(self):
+        return StatsHandler(self)
+
     def build_spam_checker(self):
         return SpamChecker(self)
 
+    def build_third_party_event_rules(self):
+        return ThirdPartyEventRules(self)
+
     def build_room_member_handler(self):
         if self.config.worker_app:
             return RoomMemberWorkerHandler(self)
@@ -506,6 +523,12 @@ class HomeServer(object):
     def build_registration_handler(self):
         return RegistrationHandler(self)
 
+    def build_account_validity_handler(self):
+        return AccountValidityHandler(self)
+
+    def build_event_client_serializer(self):
+        return EventClientSerializer(self)
+
     def remove_pusher(self, app_id, push_key, user_id):
         return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
 
diff --git a/synapse/server.pyi b/synapse/server.pyi
index 3ba3a967c2..9583e82d52 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -18,7 +18,6 @@ import synapse.server_notices.server_notices_sender
 import synapse.state
 import synapse.storage
 
-
 class HomeServer(object):
     @property
     def config(self) -> synapse.config.homeserver.HomeServerConfig:
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index c432041b4e..0ca6f6121f 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -36,6 +36,7 @@ from .engines import PostgresEngine
 from .event_federation import EventFederationStore
 from .event_push_actions import EventPushActionsStore
 from .events import EventsStore
+from .events_bg_updates import EventsBackgroundUpdatesStore
 from .filtering import FilteringStore
 from .group_server import GroupServerStore
 from .keys import KeyStore
@@ -49,11 +50,13 @@ from .pusher import PusherStore
 from .receipts import ReceiptsStore
 from .registration import RegistrationStore
 from .rejections import RejectionsStore
+from .relations import RelationsStore
 from .room import RoomStore
 from .roommember import RoomMemberStore
 from .search import SearchStore
 from .signatures import SignatureStore
 from .state import StateStore
+from .stats import StatsStore
 from .stream import StreamStore
 from .tags import TagsStore
 from .transactions import TransactionStore
@@ -64,6 +67,7 @@ logger = logging.getLogger(__name__)
 
 
 class DataStore(
+    EventsBackgroundUpdatesStore,
     RoomMemberStore,
     RoomStore,
     RegistrationStore,
@@ -99,6 +103,8 @@ class DataStore(
     GroupServerStore,
     UserErasureStore,
     MonthlyActiveUsersStore,
+    StatsStore,
+    RelationsStore,
 ):
     def __init__(self, db_conn, hs):
         self.hs = hs
@@ -273,23 +279,37 @@ class DataStore(
         """
         Counts the number of users who used this homeserver in the last 24 hours.
         """
+        yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)
+        return self.runInteraction("count_daily_users", self._count_users, yesterday,)
 
-        def _count_users(txn):
-            yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)
-
-            sql = """
-                SELECT COALESCE(count(*), 0) FROM (
-                    SELECT user_id FROM user_ips
-                    WHERE last_seen > ?
-                    GROUP BY user_id
-                ) u
-            """
-
-            txn.execute(sql, (yesterday,))
-            count, = txn.fetchone()
-            return count
+    def count_monthly_users(self):
+        """
+        Counts the number of users who used this homeserver in the last 30 days.
+        Note this method is intended for phonehome metrics only and is different
+        from the mau figure in synapse.storage.monthly_active_users which,
+        amongst other things, includes a 3 day grace period before a user counts.
+        """
+        thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
+        return self.runInteraction(
+            "count_monthly_users",
+            self._count_users,
+            thirty_days_ago,
+        )
 
-        return self.runInteraction("count_users", _count_users)
+    def _count_users(self, txn, time_from):
+        """
+        Returns number of users seen in the past time_from period
+        """
+        sql = """
+            SELECT COALESCE(count(*), 0) FROM (
+                SELECT user_id FROM user_ips
+                WHERE last_seen > ?
+                GROUP BY user_id
+            ) u
+        """
+        txn.execute(sql, (time_from,))
+        count, = txn.fetchone()
+        return count
 
     def count_r30_users(self):
         """
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 983ce026e1..941c07fce5 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -1,5 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,6 +16,7 @@
 # limitations under the License.
 import itertools
 import logging
+import random
 import sys
 import threading
 import time
@@ -227,6 +230,8 @@ class SQLBaseStore(object):
         # A set of tables that are not safe to use native upserts in.
         self._unsafe_to_upsert_tables = set(UNIQUE_INDEX_BACKGROUND_UPDATES.keys())
 
+        self._account_validity = self.hs.config.account_validity
+
         # We add the user_directory_search table to the blacklist on SQLite
         # because the existing search table does not have an index, making it
         # unsafe to use native upserts.
@@ -243,6 +248,16 @@ class SQLBaseStore(object):
                 self._check_safe_to_upsert,
             )
 
+        self.rand = random.SystemRandom()
+
+        if self._account_validity.enabled:
+            self._clock.call_later(
+                0.0,
+                run_as_background_process,
+                "account_validity_set_expiration_dates",
+                self._set_expiration_date_when_missing,
+            )
+
     @defer.inlineCallbacks
     def _check_safe_to_upsert(self):
         """
@@ -275,6 +290,67 @@ class SQLBaseStore(object):
                 self._check_safe_to_upsert,
             )
 
+    @defer.inlineCallbacks
+    def _set_expiration_date_when_missing(self):
+        """
+        Retrieves the list of registered users that don't have an expiration date, and
+        adds an expiration date for each of them.
+        """
+
+        def select_users_with_no_expiration_date_txn(txn):
+            """Retrieves the list of registered users with no expiration date from the
+            database, filtering out deactivated users.
+            """
+            sql = (
+                "SELECT users.name FROM users"
+                " LEFT JOIN account_validity ON (users.name = account_validity.user_id)"
+                " WHERE account_validity.user_id is NULL AND users.deactivated = 0;"
+            )
+            txn.execute(sql, [])
+
+            res = self.cursor_to_dict(txn)
+            if res:
+                for user in res:
+                    self.set_expiration_date_for_user_txn(
+                        txn,
+                        user["name"],
+                        use_delta=True,
+                    )
+
+        yield self.runInteraction(
+            "get_users_with_no_expiration_date",
+            select_users_with_no_expiration_date_txn,
+        )
+
+    def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False):
+        """Sets an expiration date to the account with the given user ID.
+
+        Args:
+             user_id (str): User ID to set an expiration date for.
+             use_delta (bool): If set to False, the expiration date for the user will be
+                now + validity period. If set to True, this expiration date will be a
+                random value in the [now + period - d ; now + period] range, d being a
+                delta equal to 10% of the validity period.
+        """
+        now_ms = self._clock.time_msec()
+        expiration_ts = now_ms + self._account_validity.period
+
+        if use_delta:
+            expiration_ts = self.rand.randrange(
+                expiration_ts - self._account_validity.startup_job_max_delta,
+                expiration_ts,
+            )
+
+        self._simple_insert_txn(
+            txn,
+            "account_validity",
+            values={
+                "user_id": user_id,
+                "expiration_ts_ms": expiration_ts,
+                "email_sent": False,
+            },
+        )
+
     def start_profiling(self):
         self._previous_loop_ts = self._clock.time_msec()
 
@@ -512,6 +588,10 @@ class SQLBaseStore(object):
         Args:
             table : string giving the table name
             values : dict of new column names and values for them
+            or_ignore : bool stating whether an exception should be raised
+                when a conflicting row already exists. If True, False will be
+                returned by the function instead
+            desc : string giving a description of the transaction
 
         Returns:
             bool: Whether the row was inserted or not. Only useful when
@@ -1152,8 +1232,8 @@ class SQLBaseStore(object):
         )
 
         txn.execute(select_sql, list(keyvalues.values()))
-
         row = txn.fetchone()
+
         if not row:
             if allow_none:
                 return None
@@ -1203,7 +1283,8 @@ class SQLBaseStore(object):
             " AND ".join("%s = ?" % (k,) for k in keyvalues),
         )
 
-        return txn.execute(sql, list(keyvalues.values()))
+        txn.execute(sql, list(keyvalues.values()))
+        return txn.rowcount
 
     def _simple_delete_many(self, table, column, iterable, keyvalues, desc):
         return self.runInteraction(
@@ -1222,9 +1303,12 @@ class SQLBaseStore(object):
             column : column name to test for inclusion against `iterable`
             iterable : list
             keyvalues : dict of column names and values to select the rows with
+
+        Returns:
+            int: Number rows deleted
         """
         if not iterable:
-            return
+            return 0
 
         sql = "DELETE FROM %s" % table
 
@@ -1239,7 +1323,9 @@ class SQLBaseStore(object):
 
         if clauses:
             sql = "%s WHERE %s" % (sql, " AND ".join(clauses))
-        return txn.execute(sql, values)
+        txn.execute(sql, values)
+
+        return txn.rowcount
 
     def _get_cache_dict(
         self, db_conn, table, entity_column, stream_column, max_value, limit=100000
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index 6092f600ba..eb329ebd8b 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -302,7 +302,7 @@ class ApplicationServiceTransactionWorkerStore(
 
         event_ids = json.loads(entry["event_ids"])
 
-        events = yield self._get_events(event_ids)
+        events = yield self.get_events_as_list(event_ids)
 
         defer.returnValue(
             AppServiceTransaction(service=service, id=entry["txn_id"], events=events)
@@ -358,7 +358,7 @@ class ApplicationServiceTransactionWorkerStore(
             "get_new_events_for_appservice", get_new_events_for_appservice_txn
         )
 
-        events = yield self._get_events(event_ids)
+        events = yield self.get_events_as_list(event_ids)
 
         defer.returnValue((upper_bound, events))
 
diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py
index fed4ea3610..4ea0deea4f 100644
--- a/synapse/storage/deviceinbox.py
+++ b/synapse/storage/deviceinbox.py
@@ -118,7 +118,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
         defer.returnValue(count)
 
     def get_new_device_msgs_for_remote(
-        self, destination, last_stream_id, current_stream_id, limit=100
+        self, destination, last_stream_id, current_stream_id, limit
     ):
         """
         Args:
@@ -138,6 +138,10 @@ class DeviceInboxWorkerStore(SQLBaseStore):
         if not has_changed or last_stream_id == current_stream_id:
             return defer.succeed(([], current_stream_id))
 
+        if limit <= 0:
+            # This can happen if we run out of room for EDUs in the transaction.
+            return defer.succeed(([], last_stream_id))
+
         def get_new_messages_for_remote_destination_txn(txn):
             sql = (
                 "SELECT stream_id, messages_json FROM device_federation_outbox"
diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py
index fd869b934c..d102e07372 100644
--- a/synapse/storage/devices.py
+++ b/synapse/storage/devices.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 import logging
 
-from six import iteritems, itervalues
+from six import iteritems
 
 from canonicaljson import json
 
@@ -72,11 +72,14 @@ class DeviceWorkerStore(SQLBaseStore):
 
         defer.returnValue({d["device_id"]: d for d in devices})
 
-    def get_devices_by_remote(self, destination, from_stream_id):
+    @defer.inlineCallbacks
+    def get_devices_by_remote(self, destination, from_stream_id, limit):
         """Get stream of updates to send to remote servers
 
         Returns:
-            (int, list[dict]): current stream id and list of updates
+            Deferred[tuple[int, list[dict]]]:
+                current stream id (ie, the stream id of the last update included in the
+                response), and the list of updates
         """
         now_stream_id = self._device_list_id_gen.get_current_token()
 
@@ -84,55 +87,131 @@ class DeviceWorkerStore(SQLBaseStore):
             destination, int(from_stream_id)
         )
         if not has_changed:
-            return (now_stream_id, [])
-
-        return self.runInteraction(
+            defer.returnValue((now_stream_id, []))
+
+        # We retrieve n+1 devices from the list of outbound pokes where n is
+        # our outbound device update limit. We then check if the very last
+        # device has the same stream_id as the second-to-last device. If so,
+        # then we ignore all devices with that stream_id and only send the
+        # devices with a lower stream_id.
+        #
+        # If when culling the list we end up with no devices afterwards, we
+        # consider the device update to be too large, and simply skip the
+        # stream_id; the rationale being that such a large device list update
+        # is likely an error.
+        updates = yield self.runInteraction(
             "get_devices_by_remote",
             self._get_devices_by_remote_txn,
             destination,
             from_stream_id,
             now_stream_id,
+            limit + 1,
         )
 
+        # Return an empty list if there are no updates
+        if not updates:
+            defer.returnValue((now_stream_id, []))
+
+        # if we have exceeded the limit, we need to exclude any results with the
+        # same stream_id as the last row.
+        if len(updates) > limit:
+            stream_id_cutoff = updates[-1][2]
+            now_stream_id = stream_id_cutoff - 1
+        else:
+            stream_id_cutoff = None
+
+        # Perform the equivalent of a GROUP BY
+        #
+        # Iterate through the updates list and copy non-duplicate
+        # (user_id, device_id) entries into a map, with the value being
+        # the max stream_id across each set of duplicate entries
+        #
+        # maps (user_id, device_id) -> stream_id
+        # as long as their stream_id does not match that of the last row
+        query_map = {}
+        for update in updates:
+            if stream_id_cutoff is not None and update[2] >= stream_id_cutoff:
+                # Stop processing updates
+                break
+
+            key = (update[0], update[1])
+            query_map[key] = max(query_map.get(key, 0), update[2])
+
+        # If we didn't find any updates with a stream_id lower than the cutoff, it
+        # means that there are more than limit updates all of which have the same
+        # steam_id.
+
+        # That should only happen if a client is spamming the server with new
+        # devices, in which case E2E isn't going to work well anyway. We'll just
+        # skip that stream_id and return an empty list, and continue with the next
+        # stream_id next time.
+        if not query_map:
+            defer.returnValue((stream_id_cutoff, []))
+
+        results = yield self._get_device_update_edus_by_remote(
+            destination,
+            from_stream_id,
+            query_map,
+        )
+
+        defer.returnValue((now_stream_id, results))
+
     def _get_devices_by_remote_txn(
-        self, txn, destination, from_stream_id, now_stream_id
+        self, txn, destination, from_stream_id, now_stream_id, limit
     ):
+        """Return device update information for a given remote destination
+
+        Args:
+            txn (LoggingTransaction): The transaction to execute
+            destination (str): The host the device updates are intended for
+            from_stream_id (int): The minimum stream_id to filter updates by, exclusive
+            now_stream_id (int): The maximum stream_id to filter updates by, inclusive
+            limit (int): Maximum number of device updates to return
+
+        Returns:
+            List: List of device updates
+        """
         sql = """
-            SELECT user_id, device_id, max(stream_id) FROM device_lists_outbound_pokes
+            SELECT user_id, device_id, stream_id FROM device_lists_outbound_pokes
             WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ?
-            GROUP BY user_id, device_id
-            LIMIT 20
+            ORDER BY stream_id
+            LIMIT ?
         """
-        txn.execute(sql, (destination, from_stream_id, now_stream_id, False))
+        txn.execute(sql, (destination, from_stream_id, now_stream_id, False, limit))
 
-        # maps (user_id, device_id) -> stream_id
-        query_map = {(r[0], r[1]): r[2] for r in txn}
-        if not query_map:
-            return (now_stream_id, [])
+        return list(txn)
 
-        if len(query_map) >= 20:
-            now_stream_id = max(stream_id for stream_id in itervalues(query_map))
+    @defer.inlineCallbacks
+    def _get_device_update_edus_by_remote(
+        self, destination, from_stream_id, query_map,
+    ):
+        """Returns a list of device update EDUs as well as E2EE keys
 
-        devices = self._get_e2e_device_keys_txn(
-            txn,
+        Args:
+            destination (str): The host the device updates are intended for
+            from_stream_id (int): The minimum stream_id to filter updates by, exclusive
+            query_map (Dict[(str, str): int]): Dictionary mapping
+                user_id/device_id to update stream_id
+
+        Returns:
+            List[Dict]: List of objects representing an device update EDU
+
+        """
+        devices = yield self.runInteraction(
+            "_get_e2e_device_keys_txn",
+            self._get_e2e_device_keys_txn,
             query_map.keys(),
             include_all_devices=True,
             include_deleted_devices=True,
         )
 
-        prev_sent_id_sql = """
-            SELECT coalesce(max(stream_id), 0) as stream_id
-            FROM device_lists_outbound_last_success
-            WHERE destination = ? AND user_id = ? AND stream_id <= ?
-        """
-
         results = []
         for user_id, user_devices in iteritems(devices):
             # The prev_id for the first row is always the last row before
             # `from_stream_id`
-            txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id))
-            rows = txn.fetchall()
-            prev_id = rows[0][0]
+            prev_id = yield self._get_last_device_update_for_remote_user(
+                destination, user_id, from_stream_id,
+            )
             for device_id, device in iteritems(user_devices):
                 stream_id = query_map[(user_id, device_id)]
                 result = {
@@ -156,7 +235,22 @@ class DeviceWorkerStore(SQLBaseStore):
 
                 results.append(result)
 
-        return (now_stream_id, results)
+        defer.returnValue(results)
+
+    def _get_last_device_update_for_remote_user(
+        self, destination, user_id, from_stream_id,
+    ):
+        def f(txn):
+            prev_sent_id_sql = """
+                SELECT coalesce(max(stream_id), 0) as stream_id
+                FROM device_lists_outbound_last_success
+                WHERE destination = ? AND user_id = ? AND stream_id <= ?
+            """
+            txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id))
+            rows = txn.fetchall()
+            return rows[0][0]
+
+        return self.runInteraction("get_last_device_update_for_remote_user", f)
 
     def mark_as_sent_devices_by_remote(self, destination, stream_id):
         """Mark that updates have successfully been sent to the destination.
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 1b97ee74e3..289b6bc281 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -45,6 +45,10 @@ class PostgresEngine(object):
         # together. For example, version 8.1.5 will be returned as 80105
         self._version = db_conn.server_version
 
+        # Are we on a supported PostgreSQL version?
+        if self._version < 90500:
+            raise RuntimeError("Synapse requires PostgreSQL 9.5+ or above.")
+
         db_conn.set_isolation_level(
             self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
         )
@@ -64,9 +68,9 @@ class PostgresEngine(object):
     @property
     def can_native_upsert(self):
         """
-        Can we use native UPSERTs? This requires PostgreSQL 9.5+.
+        Can we use native UPSERTs?
         """
-        return self._version >= 90500
+        return True
 
     def is_deadlock(self, error):
         if isinstance(error, self.module.DatabaseError):
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
index 956f876572..09e39c2c28 100644
--- a/synapse/storage/event_federation.py
+++ b/synapse/storage/event_federation.py
@@ -45,7 +45,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         """
         return self.get_auth_chain_ids(
             event_ids, include_given=include_given
-        ).addCallback(self._get_events)
+        ).addCallback(self.get_events_as_list)
 
     def get_auth_chain_ids(self, event_ids, include_given=False):
         """Get auth events for given event_ids. The events *must* be state events.
@@ -316,7 +316,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
                 event_list,
                 limit,
             )
-            .addCallback(self._get_events)
+            .addCallback(self.get_events_as_list)
             .addCallback(lambda l: sorted(l, key=lambda e: -e.depth))
         )
 
@@ -382,7 +382,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
             latest_events,
             limit,
         )
-        events = yield self._get_events(ids)
+        events = yield self.get_events_as_list(ids)
         defer.returnValue(events)
 
     def _get_missing_events(self, txn, room_id, earliest_events, latest_events, limit):
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index dfda39bbe0..f631fb1733 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
+# Copyright 2018-2019 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +17,7 @@
 
 import itertools
 import logging
-from collections import OrderedDict, deque, namedtuple
+from collections import Counter as c_counter, OrderedDict, deque, namedtuple
 from functools import wraps
 
 from six import iteritems, text_type
@@ -32,6 +33,7 @@ from synapse.api.constants import EventTypes
 from synapse.api.errors import SynapseError
 from synapse.events import EventBase  # noqa: F401
 from synapse.events.snapshot import EventContext  # noqa: F401
+from synapse.metrics import BucketCollector
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.state import StateResolutionStore
 from synapse.storage.background_updates import BackgroundUpdateStore
@@ -219,42 +221,38 @@ class EventsStore(
     EventsWorkerStore,
     BackgroundUpdateStore,
 ):
-    EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
-    EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
-
     def __init__(self, db_conn, hs):
         super(EventsStore, self).__init__(db_conn, hs)
-        self.register_background_update_handler(
-            self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
-        )
-        self.register_background_update_handler(
-            self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
-            self._background_reindex_fields_sender,
-        )
 
-        self.register_background_index_update(
-            "event_contains_url_index",
-            index_name="event_contains_url_index",
-            table="events",
-            columns=["room_id", "topological_ordering", "stream_ordering"],
-            where_clause="contains_url = true AND outlier = false",
-        )
+        self._event_persist_queue = _EventPeristenceQueue()
+        self._state_resolution_handler = hs.get_state_resolution_handler()
 
-        # an event_id index on event_search is useful for the purge_history
-        # api. Plus it means we get to enforce some integrity with a UNIQUE
-        # clause
-        self.register_background_index_update(
-            "event_search_event_id_idx",
-            index_name="event_search_event_id_idx",
-            table="event_search",
-            columns=["event_id"],
-            unique=True,
-            psql_only=True,
+        # Collect metrics on the number of forward extremities that exist.
+        # Counter of number of extremities to count
+        self._current_forward_extremities_amount = c_counter()
+
+        BucketCollector(
+            "synapse_forward_extremities",
+            lambda: self._current_forward_extremities_amount,
+            buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"]
         )
 
-        self._event_persist_queue = _EventPeristenceQueue()
+        # Read the extrems every 60 minutes
+        hs.get_clock().looping_call(self._read_forward_extremities, 60 * 60 * 1000)
 
-        self._state_resolution_handler = hs.get_state_resolution_handler()
+    @defer.inlineCallbacks
+    def _read_forward_extremities(self):
+        def fetch(txn):
+            txn.execute(
+                """
+                select count(*) c from event_forward_extremities
+                group by room_id
+                """
+            )
+            return txn.fetchall()
+
+        res = yield self.runInteraction("read_forward_extremities", fetch)
+        self._current_forward_extremities_amount = c_counter(list(x[0] for x in res))
 
     @defer.inlineCallbacks
     def persist_events(self, events_and_contexts, backfilled=False):
@@ -554,10 +552,18 @@ class EventsStore(
             e_id for event in new_events for e_id in event.prev_event_ids()
         )
 
-        # Finally, remove any events which are prev_events of any existing events.
+        # Remove any events which are prev_events of any existing events.
         existing_prevs = yield self._get_events_which_are_prevs(result)
         result.difference_update(existing_prevs)
 
+        # Finally handle the case where the new events have soft-failed prev
+        # events. If they do we need to remove them and their prev events,
+        # otherwise we end up with dangling extremities.
+        existing_prevs = yield self._get_prevs_before_rejected(
+            e_id for event in new_events for e_id in event.prev_event_ids()
+        )
+        result.difference_update(existing_prevs)
+
         defer.returnValue(result)
 
     @defer.inlineCallbacks
@@ -573,12 +579,13 @@ class EventsStore(
         """
         results = []
 
-        def _get_events(txn, batch):
+        def _get_events_which_are_prevs_txn(txn, batch):
             sql = """
-            SELECT prev_event_id
+            SELECT prev_event_id, internal_metadata
             FROM event_edges
                 INNER JOIN events USING (event_id)
                 LEFT JOIN rejections USING (event_id)
+                LEFT JOIN event_json USING (event_id)
             WHERE
                 prev_event_id IN (%s)
                 AND NOT events.outlier
@@ -588,14 +595,78 @@ class EventsStore(
             )
 
             txn.execute(sql, batch)
-            results.extend(r[0] for r in txn)
+            results.extend(r[0] for r in txn if not json.loads(r[1]).get("soft_failed"))
 
         for chunk in batch_iter(event_ids, 100):
-            yield self.runInteraction("_get_events_which_are_prevs", _get_events, chunk)
+            yield self.runInteraction(
+                "_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk
+            )
 
         defer.returnValue(results)
 
     @defer.inlineCallbacks
+    def _get_prevs_before_rejected(self, event_ids):
+        """Get soft-failed ancestors to remove from the extremities.
+
+        Given a set of events, find all those that have been soft-failed or
+        rejected. Returns those soft failed/rejected events and their prev
+        events (whether soft-failed/rejected or not), and recurses up the
+        prev-event graph until it finds no more soft-failed/rejected events.
+
+        This is used to find extremities that are ancestors of new events, but
+        are separated by soft failed events.
+
+        Args:
+            event_ids (Iterable[str]): Events to find prev events for. Note
+                that these must have already been persisted.
+
+        Returns:
+            Deferred[set[str]]
+        """
+
+        # The set of event_ids to return. This includes all soft-failed events
+        # and their prev events.
+        existing_prevs = set()
+
+        def _get_prevs_before_rejected_txn(txn, batch):
+            to_recursively_check = batch
+
+            while to_recursively_check:
+                sql = """
+                SELECT
+                    event_id, prev_event_id, internal_metadata,
+                    rejections.event_id IS NOT NULL
+                FROM event_edges
+                    INNER JOIN events USING (event_id)
+                    LEFT JOIN rejections USING (event_id)
+                    LEFT JOIN event_json USING (event_id)
+                WHERE
+                    event_id IN (%s)
+                    AND NOT events.outlier
+                """ % (
+                    ",".join("?" for _ in to_recursively_check),
+                )
+
+                txn.execute(sql, to_recursively_check)
+                to_recursively_check = []
+
+                for event_id, prev_event_id, metadata, rejected in txn:
+                    if prev_event_id in existing_prevs:
+                        continue
+
+                    soft_failed = json.loads(metadata).get("soft_failed")
+                    if soft_failed or rejected:
+                        to_recursively_check.append(prev_event_id)
+                        existing_prevs.add(prev_event_id)
+
+        for chunk in batch_iter(event_ids, 100):
+            yield self.runInteraction(
+                "_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk
+            )
+
+        defer.returnValue(existing_prevs)
+
+    @defer.inlineCallbacks
     def _get_new_state_after_events(
         self, room_id, events_context, old_latest_event_ids, new_latest_event_ids
     ):
@@ -1179,14 +1250,10 @@ class EventsStore(
             "events",
             "event_auth",
             "event_json",
-            "event_content_hashes",
-            "event_destinations",
-            "event_edge_hashes",
             "event_edges",
             "event_forward_extremities",
             "event_reference_hashes",
             "event_search",
-            "event_signatures",
             "event_to_state_groups",
             "guest_access",
             "history_visibility",
@@ -1329,6 +1396,9 @@ class EventsStore(
                     txn, event.room_id, event.redacts
                 )
 
+                # Remove from relations table.
+                self._handle_redaction(txn, event.redacts)
+
         # Update the event_forward_extremities, event_backward_extremities and
         # event_edges tables.
         self._handle_mult_prev_events(
@@ -1355,6 +1425,8 @@ class EventsStore(
                 # Insert into the event_search table.
                 self._store_guest_access_txn(txn, event)
 
+            self._handle_event_relations(txn, event)
+
         # Insert into the room_memberships table.
         self._store_room_members_txn(
             txn,
@@ -1497,153 +1569,6 @@ class EventsStore(
         ret = yield self.runInteraction("count_daily_active_rooms", _count)
         defer.returnValue(ret)
 
-    @defer.inlineCallbacks
-    def _background_reindex_fields_sender(self, progress, batch_size):
-        target_min_stream_id = progress["target_min_stream_id_inclusive"]
-        max_stream_id = progress["max_stream_id_exclusive"]
-        rows_inserted = progress.get("rows_inserted", 0)
-
-        INSERT_CLUMP_SIZE = 1000
-
-        def reindex_txn(txn):
-            sql = (
-                "SELECT stream_ordering, event_id, json FROM events"
-                " INNER JOIN event_json USING (event_id)"
-                " WHERE ? <= stream_ordering AND stream_ordering < ?"
-                " ORDER BY stream_ordering DESC"
-                " LIMIT ?"
-            )
-
-            txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
-
-            rows = txn.fetchall()
-            if not rows:
-                return 0
-
-            min_stream_id = rows[-1][0]
-
-            update_rows = []
-            for row in rows:
-                try:
-                    event_id = row[1]
-                    event_json = json.loads(row[2])
-                    sender = event_json["sender"]
-                    content = event_json["content"]
-
-                    contains_url = "url" in content
-                    if contains_url:
-                        contains_url &= isinstance(content["url"], text_type)
-                except (KeyError, AttributeError):
-                    # If the event is missing a necessary field then
-                    # skip over it.
-                    continue
-
-                update_rows.append((sender, contains_url, event_id))
-
-            sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?"
-
-            for index in range(0, len(update_rows), INSERT_CLUMP_SIZE):
-                clump = update_rows[index : index + INSERT_CLUMP_SIZE]
-                txn.executemany(sql, clump)
-
-            progress = {
-                "target_min_stream_id_inclusive": target_min_stream_id,
-                "max_stream_id_exclusive": min_stream_id,
-                "rows_inserted": rows_inserted + len(rows),
-            }
-
-            self._background_update_progress_txn(
-                txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
-            )
-
-            return len(rows)
-
-        result = yield self.runInteraction(
-            self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
-        )
-
-        if not result:
-            yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME)
-
-        defer.returnValue(result)
-
-    @defer.inlineCallbacks
-    def _background_reindex_origin_server_ts(self, progress, batch_size):
-        target_min_stream_id = progress["target_min_stream_id_inclusive"]
-        max_stream_id = progress["max_stream_id_exclusive"]
-        rows_inserted = progress.get("rows_inserted", 0)
-
-        INSERT_CLUMP_SIZE = 1000
-
-        def reindex_search_txn(txn):
-            sql = (
-                "SELECT stream_ordering, event_id FROM events"
-                " WHERE ? <= stream_ordering AND stream_ordering < ?"
-                " ORDER BY stream_ordering DESC"
-                " LIMIT ?"
-            )
-
-            txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
-
-            rows = txn.fetchall()
-            if not rows:
-                return 0
-
-            min_stream_id = rows[-1][0]
-            event_ids = [row[1] for row in rows]
-
-            rows_to_update = []
-
-            chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)]
-            for chunk in chunks:
-                ev_rows = self._simple_select_many_txn(
-                    txn,
-                    table="event_json",
-                    column="event_id",
-                    iterable=chunk,
-                    retcols=["event_id", "json"],
-                    keyvalues={},
-                )
-
-                for row in ev_rows:
-                    event_id = row["event_id"]
-                    event_json = json.loads(row["json"])
-                    try:
-                        origin_server_ts = event_json["origin_server_ts"]
-                    except (KeyError, AttributeError):
-                        # If the event is missing a necessary field then
-                        # skip over it.
-                        continue
-
-                    rows_to_update.append((origin_server_ts, event_id))
-
-            sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?"
-
-            for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE):
-                clump = rows_to_update[index : index + INSERT_CLUMP_SIZE]
-                txn.executemany(sql, clump)
-
-            progress = {
-                "target_min_stream_id_inclusive": target_min_stream_id,
-                "max_stream_id_exclusive": min_stream_id,
-                "rows_inserted": rows_inserted + len(rows_to_update),
-            }
-
-            self._background_update_progress_txn(
-                txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress
-            )
-
-            return len(rows_to_update)
-
-        result = yield self.runInteraction(
-            self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
-        )
-
-        if not result:
-            yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME)
-
-        defer.returnValue(result)
-
     def get_current_backfill_token(self):
         """The current minimum token that backfilled events have reached"""
         return -self._backfill_id_gen.get_current_token()
@@ -1659,10 +1584,11 @@ class EventsStore(
         def get_all_new_forward_event_rows(txn):
             sql = (
                 "SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
-                " state_key, redacts"
+                " state_key, redacts, relates_to_id"
                 " FROM events AS e"
                 " LEFT JOIN redactions USING (event_id)"
                 " LEFT JOIN state_events USING (event_id)"
+                " LEFT JOIN event_relations USING (event_id)"
                 " WHERE ? < stream_ordering AND stream_ordering <= ?"
                 " ORDER BY stream_ordering ASC"
                 " LIMIT ?"
@@ -1677,11 +1603,12 @@ class EventsStore(
 
             sql = (
                 "SELECT event_stream_ordering, e.event_id, e.room_id, e.type,"
-                " state_key, redacts"
+                " state_key, redacts, relates_to_id"
                 " FROM events AS e"
                 " INNER JOIN ex_outlier_stream USING (event_id)"
                 " LEFT JOIN redactions USING (event_id)"
                 " LEFT JOIN state_events USING (event_id)"
+                " LEFT JOIN event_relations USING (event_id)"
                 " WHERE ? < event_stream_ordering"
                 " AND event_stream_ordering <= ?"
                 " ORDER BY event_stream_ordering DESC"
@@ -1702,10 +1629,11 @@ class EventsStore(
         def get_all_new_backfill_event_rows(txn):
             sql = (
                 "SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
-                " state_key, redacts"
+                " state_key, redacts, relates_to_id"
                 " FROM events AS e"
                 " LEFT JOIN redactions USING (event_id)"
                 " LEFT JOIN state_events USING (event_id)"
+                " LEFT JOIN event_relations USING (event_id)"
                 " WHERE ? > stream_ordering AND stream_ordering >= ?"
                 " ORDER BY stream_ordering ASC"
                 " LIMIT ?"
@@ -1720,11 +1648,12 @@ class EventsStore(
 
             sql = (
                 "SELECT -event_stream_ordering, e.event_id, e.room_id, e.type,"
-                " state_key, redacts"
+                " state_key, redacts, relates_to_id"
                 " FROM events AS e"
                 " INNER JOIN ex_outlier_stream USING (event_id)"
                 " LEFT JOIN redactions USING (event_id)"
                 " LEFT JOIN state_events USING (event_id)"
+                " LEFT JOIN event_relations USING (event_id)"
                 " WHERE ? > event_stream_ordering"
                 " AND event_stream_ordering >= ?"
                 " ORDER BY event_stream_ordering DESC"
@@ -1857,16 +1786,12 @@ class EventsStore(
         # Tables that should be pruned:
         #     event_auth
         #     event_backward_extremities
-        #     event_content_hashes
-        #     event_destinations
-        #     event_edge_hashes
         #     event_edges
         #     event_forward_extremities
         #     event_json
         #     event_push_actions
         #     event_reference_hashes
         #     event_search
-        #     event_signatures
         #     event_to_state_groups
         #     events
         #     rejections
@@ -2065,14 +1990,10 @@ class EventsStore(
             "events",
             "event_json",
             "event_auth",
-            "event_content_hashes",
-            "event_destinations",
-            "event_edge_hashes",
             "event_edges",
             "event_forward_extremities",
             "event_reference_hashes",
             "event_search",
-            "event_signatures",
             "rejections",
         ):
             logger.info("[purge] removing events from %s", table)
diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py
new file mode 100644
index 0000000000..75c1935bf3
--- /dev/null
+++ b/synapse/storage/events_bg_updates.py
@@ -0,0 +1,401 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from six import text_type
+
+from canonicaljson import json
+
+from twisted.internet import defer
+
+from synapse.storage.background_updates import BackgroundUpdateStore
+
+logger = logging.getLogger(__name__)
+
+
+class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
+
+    EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
+    EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
+    DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities"
+
+    def __init__(self, db_conn, hs):
+        super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs)
+
+        self.register_background_update_handler(
+            self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
+        )
+        self.register_background_update_handler(
+            self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
+            self._background_reindex_fields_sender,
+        )
+
+        self.register_background_index_update(
+            "event_contains_url_index",
+            index_name="event_contains_url_index",
+            table="events",
+            columns=["room_id", "topological_ordering", "stream_ordering"],
+            where_clause="contains_url = true AND outlier = false",
+        )
+
+        # an event_id index on event_search is useful for the purge_history
+        # api. Plus it means we get to enforce some integrity with a UNIQUE
+        # clause
+        self.register_background_index_update(
+            "event_search_event_id_idx",
+            index_name="event_search_event_id_idx",
+            table="event_search",
+            columns=["event_id"],
+            unique=True,
+            psql_only=True,
+        )
+
+        self.register_background_update_handler(
+            self.DELETE_SOFT_FAILED_EXTREMITIES,
+            self._cleanup_extremities_bg_update,
+        )
+
+    @defer.inlineCallbacks
+    def _background_reindex_fields_sender(self, progress, batch_size):
+        target_min_stream_id = progress["target_min_stream_id_inclusive"]
+        max_stream_id = progress["max_stream_id_exclusive"]
+        rows_inserted = progress.get("rows_inserted", 0)
+
+        INSERT_CLUMP_SIZE = 1000
+
+        def reindex_txn(txn):
+            sql = (
+                "SELECT stream_ordering, event_id, json FROM events"
+                " INNER JOIN event_json USING (event_id)"
+                " WHERE ? <= stream_ordering AND stream_ordering < ?"
+                " ORDER BY stream_ordering DESC"
+                " LIMIT ?"
+            )
+
+            txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
+
+            rows = txn.fetchall()
+            if not rows:
+                return 0
+
+            min_stream_id = rows[-1][0]
+
+            update_rows = []
+            for row in rows:
+                try:
+                    event_id = row[1]
+                    event_json = json.loads(row[2])
+                    sender = event_json["sender"]
+                    content = event_json["content"]
+
+                    contains_url = "url" in content
+                    if contains_url:
+                        contains_url &= isinstance(content["url"], text_type)
+                except (KeyError, AttributeError):
+                    # If the event is missing a necessary field then
+                    # skip over it.
+                    continue
+
+                update_rows.append((sender, contains_url, event_id))
+
+            sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?"
+
+            for index in range(0, len(update_rows), INSERT_CLUMP_SIZE):
+                clump = update_rows[index : index + INSERT_CLUMP_SIZE]
+                txn.executemany(sql, clump)
+
+            progress = {
+                "target_min_stream_id_inclusive": target_min_stream_id,
+                "max_stream_id_exclusive": min_stream_id,
+                "rows_inserted": rows_inserted + len(rows),
+            }
+
+            self._background_update_progress_txn(
+                txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
+            )
+
+            return len(rows)
+
+        result = yield self.runInteraction(
+            self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
+        )
+
+        if not result:
+            yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME)
+
+        defer.returnValue(result)
+
+    @defer.inlineCallbacks
+    def _background_reindex_origin_server_ts(self, progress, batch_size):
+        target_min_stream_id = progress["target_min_stream_id_inclusive"]
+        max_stream_id = progress["max_stream_id_exclusive"]
+        rows_inserted = progress.get("rows_inserted", 0)
+
+        INSERT_CLUMP_SIZE = 1000
+
+        def reindex_search_txn(txn):
+            sql = (
+                "SELECT stream_ordering, event_id FROM events"
+                " WHERE ? <= stream_ordering AND stream_ordering < ?"
+                " ORDER BY stream_ordering DESC"
+                " LIMIT ?"
+            )
+
+            txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
+
+            rows = txn.fetchall()
+            if not rows:
+                return 0
+
+            min_stream_id = rows[-1][0]
+            event_ids = [row[1] for row in rows]
+
+            rows_to_update = []
+
+            chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)]
+            for chunk in chunks:
+                ev_rows = self._simple_select_many_txn(
+                    txn,
+                    table="event_json",
+                    column="event_id",
+                    iterable=chunk,
+                    retcols=["event_id", "json"],
+                    keyvalues={},
+                )
+
+                for row in ev_rows:
+                    event_id = row["event_id"]
+                    event_json = json.loads(row["json"])
+                    try:
+                        origin_server_ts = event_json["origin_server_ts"]
+                    except (KeyError, AttributeError):
+                        # If the event is missing a necessary field then
+                        # skip over it.
+                        continue
+
+                    rows_to_update.append((origin_server_ts, event_id))
+
+            sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?"
+
+            for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE):
+                clump = rows_to_update[index : index + INSERT_CLUMP_SIZE]
+                txn.executemany(sql, clump)
+
+            progress = {
+                "target_min_stream_id_inclusive": target_min_stream_id,
+                "max_stream_id_exclusive": min_stream_id,
+                "rows_inserted": rows_inserted + len(rows_to_update),
+            }
+
+            self._background_update_progress_txn(
+                txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress
+            )
+
+            return len(rows_to_update)
+
+        result = yield self.runInteraction(
+            self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
+        )
+
+        if not result:
+            yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME)
+
+        defer.returnValue(result)
+
+    @defer.inlineCallbacks
+    def _cleanup_extremities_bg_update(self, progress, batch_size):
+        """Background update to clean out extremities that should have been
+        deleted previously.
+
+        Mainly used to deal with the aftermath of #5269.
+        """
+
+        # This works by first copying all existing forward extremities into the
+        # `_extremities_to_check` table at start up, and then checking each
+        # event in that table whether we have any descendants that are not
+        # soft-failed/rejected. If that is the case then we delete that event
+        # from the forward extremities table.
+        #
+        # For efficiency, we do this in batches by recursively pulling out all
+        # descendants of a batch until we find the non soft-failed/rejected
+        # events, i.e. the set of descendants whose chain of prev events back
+        # to the batch of extremities are all soft-failed or rejected.
+        # Typically, we won't find any such events as extremities will rarely
+        # have any descendants, but if they do then we should delete those
+        # extremities.
+
+        def _cleanup_extremities_bg_update_txn(txn):
+            # The set of extremity event IDs that we're checking this round
+            original_set = set()
+
+            # A dict[str, set[str]] of event ID to their prev events.
+            graph = {}
+
+            # The set of descendants of the original set that are not rejected
+            # nor soft-failed. Ancestors of these events should be removed
+            # from the forward extremities table.
+            non_rejected_leaves = set()
+
+            # Set of event IDs that have been soft failed, and for which we
+            # should check if they have descendants which haven't been soft
+            # failed.
+            soft_failed_events_to_lookup = set()
+
+            # First, we get `batch_size` events from the table, pulling out
+            # their successor events, if any, and the successor events'
+            # rejection status.
+            txn.execute(
+                """SELECT prev_event_id, event_id, internal_metadata,
+                    rejections.event_id IS NOT NULL, events.outlier
+                FROM (
+                    SELECT event_id AS prev_event_id
+                    FROM _extremities_to_check
+                    LIMIT ?
+                ) AS f
+                LEFT JOIN event_edges USING (prev_event_id)
+                LEFT JOIN events USING (event_id)
+                LEFT JOIN event_json USING (event_id)
+                LEFT JOIN rejections USING (event_id)
+                """, (batch_size,)
+            )
+
+            for prev_event_id, event_id, metadata, rejected, outlier in txn:
+                original_set.add(prev_event_id)
+
+                if not event_id or outlier:
+                    # Common case where the forward extremity doesn't have any
+                    # descendants.
+                    continue
+
+                graph.setdefault(event_id, set()).add(prev_event_id)
+
+                soft_failed = False
+                if metadata:
+                    soft_failed = json.loads(metadata).get("soft_failed")
+
+                if soft_failed or rejected:
+                    soft_failed_events_to_lookup.add(event_id)
+                else:
+                    non_rejected_leaves.add(event_id)
+
+            # Now we recursively check all the soft-failed descendants we
+            # found above in the same way, until we have nothing left to
+            # check.
+            while soft_failed_events_to_lookup:
+                # We only want to do 100 at a time, so we split given list
+                # into two.
+                batch = list(soft_failed_events_to_lookup)
+                to_check, to_defer = batch[:100], batch[100:]
+                soft_failed_events_to_lookup = set(to_defer)
+
+                sql = """SELECT prev_event_id, event_id, internal_metadata,
+                    rejections.event_id IS NOT NULL
+                    FROM event_edges
+                    INNER JOIN events USING (event_id)
+                    INNER JOIN event_json USING (event_id)
+                    LEFT JOIN rejections USING (event_id)
+                    WHERE
+                        prev_event_id IN (%s)
+                        AND NOT events.outlier
+                """ % (
+                    ",".join("?" for _ in to_check),
+                )
+                txn.execute(sql, to_check)
+
+                for prev_event_id, event_id, metadata, rejected in txn:
+                    if event_id in graph:
+                        # Already handled this event previously, but we still
+                        # want to record the edge.
+                        graph[event_id].add(prev_event_id)
+                        continue
+
+                    graph[event_id] = {prev_event_id}
+
+                    soft_failed = json.loads(metadata).get("soft_failed")
+                    if soft_failed or rejected:
+                        soft_failed_events_to_lookup.add(event_id)
+                    else:
+                        non_rejected_leaves.add(event_id)
+
+            # We have a set of non-soft-failed descendants, so we recurse up
+            # the graph to find all ancestors and add them to the set of event
+            # IDs that we can delete from forward extremities table.
+            to_delete = set()
+            while non_rejected_leaves:
+                event_id = non_rejected_leaves.pop()
+                prev_event_ids = graph.get(event_id, set())
+                non_rejected_leaves.update(prev_event_ids)
+                to_delete.update(prev_event_ids)
+
+            to_delete.intersection_update(original_set)
+
+            deleted = self._simple_delete_many_txn(
+                txn=txn,
+                table="event_forward_extremities",
+                column="event_id",
+                iterable=to_delete,
+                keyvalues={},
+            )
+
+            logger.info(
+                "Deleted %d forward extremities of %d checked, to clean up #5269",
+                deleted,
+                len(original_set),
+            )
+
+            if deleted:
+                # We now need to invalidate the caches of these rooms
+                rows = self._simple_select_many_txn(
+                    txn,
+                    table="events",
+                    column="event_id",
+                    iterable=to_delete,
+                    keyvalues={},
+                    retcols=("room_id",)
+                )
+                room_ids = set(row["room_id"] for row in rows)
+                for room_id in room_ids:
+                    txn.call_after(
+                        self.get_latest_event_ids_in_room.invalidate,
+                        (room_id,)
+                    )
+
+            self._simple_delete_many_txn(
+                txn=txn,
+                table="_extremities_to_check",
+                column="event_id",
+                iterable=original_set,
+                keyvalues={},
+            )
+
+            return len(original_set)
+
+        num_handled = yield self.runInteraction(
+            "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn,
+        )
+
+        if not num_handled:
+            yield self._end_background_update(self.DELETE_SOFT_FAILED_EXTREMITIES)
+
+            def _drop_table_txn(txn):
+                txn.execute("DROP TABLE _extremities_to_check")
+
+            yield self.runInteraction(
+                "_cleanup_extremities_bg_update_drop_table",
+                _drop_table_txn,
+            )
+
+        defer.returnValue(num_handled)
diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py
index 663991a9b6..cc7df5cf14 100644
--- a/synapse/storage/events_worker.py
+++ b/synapse/storage/events_worker.py
@@ -13,6 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from __future__ import division
+
 import itertools
 import logging
 from collections import namedtuple
@@ -76,6 +78,43 @@ class EventsWorkerStore(SQLBaseStore):
             desc="get_received_ts",
         )
 
+    def get_received_ts_by_stream_pos(self, stream_ordering):
+        """Given a stream ordering get an approximate timestamp of when it
+        happened.
+
+        This is done by simply taking the received ts of the first event that
+        has a stream ordering greater than or equal to the given stream pos.
+        If none exists returns the current time, on the assumption that it must
+        have happened recently.
+
+        Args:
+            stream_ordering (int)
+
+        Returns:
+            Deferred[int]
+        """
+
+        def _get_approximate_received_ts_txn(txn):
+            sql = """
+                SELECT received_ts FROM events
+                WHERE stream_ordering >= ?
+                LIMIT 1
+            """
+
+            txn.execute(sql, (stream_ordering,))
+            row = txn.fetchone()
+            if row and row[0]:
+                ts = row[0]
+            else:
+                ts = self.clock.time_msec()
+
+            return ts
+
+        return self.runInteraction(
+            "get_approximate_received_ts",
+            _get_approximate_received_ts_txn,
+        )
+
     @defer.inlineCallbacks
     def get_event(
         self,
@@ -103,7 +142,7 @@ class EventsWorkerStore(SQLBaseStore):
         Returns:
             Deferred : A FrozenEvent.
         """
-        events = yield self._get_events(
+        events = yield self.get_events_as_list(
             [event_id],
             check_redacted=check_redacted,
             get_prev_content=get_prev_content,
@@ -142,7 +181,7 @@ class EventsWorkerStore(SQLBaseStore):
         Returns:
             Deferred : Dict from event_id to event.
         """
-        events = yield self._get_events(
+        events = yield self.get_events_as_list(
             event_ids,
             check_redacted=check_redacted,
             get_prev_content=get_prev_content,
@@ -152,13 +191,32 @@ class EventsWorkerStore(SQLBaseStore):
         defer.returnValue({e.event_id: e for e in events})
 
     @defer.inlineCallbacks
-    def _get_events(
+    def get_events_as_list(
         self,
         event_ids,
         check_redacted=True,
         get_prev_content=False,
         allow_rejected=False,
     ):
+        """Get events from the database and return in a list in the same order
+        as given by `event_ids` arg.
+
+        Args:
+            event_ids (list): The event_ids of the events to fetch
+            check_redacted (bool): If True, check if event has been redacted
+                and redact it.
+            get_prev_content (bool): If True and event is a state event,
+                include the previous states content in the unsigned field.
+            allow_rejected (bool): If True return rejected events.
+
+        Returns:
+            Deferred[list[EventBase]]: List of events fetched from the database. The
+            events are in the same order as `event_ids` arg.
+
+            Note that the returned list may be smaller than the list of event
+            IDs if not all events could be fetched.
+        """
+
         if not event_ids:
             defer.returnValue([])
 
@@ -202,21 +260,22 @@ class EventsWorkerStore(SQLBaseStore):
                     #
                     # The problem is that we end up at this point when an event
                     # which has been redacted is pulled out of the database by
-                    # _enqueue_events, because _enqueue_events needs to check the
-                    # redaction before it can cache the redacted event. So obviously,
-                    # calling get_event to get the redacted event out of the database
-                    # gives us an infinite loop.
+                    # _enqueue_events, because _enqueue_events needs to check
+                    # the redaction before it can cache the redacted event. So
+                    # obviously, calling get_event to get the redacted event out
+                    # of the database gives us an infinite loop.
                     #
-                    # For now (quick hack to fix during 0.99 release cycle), we just
-                    # go and fetch the relevant row from the db, but it would be nice
-                    # to think about how we can cache this rather than hit the db
-                    # every time we access a redaction event.
+                    # For now (quick hack to fix during 0.99 release cycle), we
+                    # just go and fetch the relevant row from the db, but it
+                    # would be nice to think about how we can cache this rather
+                    # than hit the db every time we access a redaction event.
                     #
                     # One thought on how to do this:
-                    #  1. split _get_events up so that it is divided into (a) get the
-                    #     rawish event from the db/cache, (b) do the redaction/rejection
-                    #     filtering
-                    #  2. have _get_event_from_row just call the first half of that
+                    #  1. split get_events_as_list up so that it is divided into
+                    #     (a) get the rawish event from the db/cache, (b) do the
+                    #     redaction/rejection filtering
+                    #  2. have _get_event_from_row just call the first half of
+                    #     that
 
                     orig_sender = yield self._simple_select_one_onecol(
                         table="events",
@@ -590,4 +649,79 @@ class EventsWorkerStore(SQLBaseStore):
 
             return res
 
-        return self.runInteraction("get_rejection_reasons", f)
+        return self.runInteraction("get_seen_events_with_rejections", f)
+
+    def _get_total_state_event_counts_txn(self, txn, room_id):
+        """
+        See get_total_state_event_counts.
+        """
+        # We join against the events table as that has an index on room_id
+        sql = """
+            SELECT COUNT(*) FROM state_events
+            INNER JOIN events USING (room_id, event_id)
+            WHERE room_id=?
+        """
+        txn.execute(sql, (room_id,))
+        row = txn.fetchone()
+        return row[0] if row else 0
+
+    def get_total_state_event_counts(self, room_id):
+        """
+        Gets the total number of state events in a room.
+
+        Args:
+            room_id (str)
+
+        Returns:
+            Deferred[int]
+        """
+        return self.runInteraction(
+            "get_total_state_event_counts",
+            self._get_total_state_event_counts_txn, room_id
+        )
+
+    def _get_current_state_event_counts_txn(self, txn, room_id):
+        """
+        See get_current_state_event_counts.
+        """
+        sql = "SELECT COUNT(*) FROM current_state_events WHERE room_id=?"
+        txn.execute(sql, (room_id,))
+        row = txn.fetchone()
+        return row[0] if row else 0
+
+    def get_current_state_event_counts(self, room_id):
+        """
+        Gets the current number of state events in a room.
+
+        Args:
+            room_id (str)
+
+        Returns:
+            Deferred[int]
+        """
+        return self.runInteraction(
+            "get_current_state_event_counts",
+            self._get_current_state_event_counts_txn, room_id
+        )
+
+    @defer.inlineCallbacks
+    def get_room_complexity(self, room_id):
+        """
+        Get a rough approximation of the complexity of the room. This is used by
+        remote servers to decide whether they wish to join the room or not.
+        Higher complexity value indicates that being in the room will consume
+        more resources.
+
+        Args:
+            room_id (str)
+
+        Returns:
+            Deferred[dict[str:int]] of complexity version to complexity.
+        """
+        state_events = yield self.get_current_state_event_counts(room_id)
+
+        # Call this one "v1", so we can introduce new ones as we want to develop
+        # it.
+        complexity_v1 = round(state_events / 500, 2)
+
+        defer.returnValue({"v1": complexity_v1})
diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py
index 030cd1e5a3..e3655ad8d7 100644
--- a/synapse/storage/keys.py
+++ b/synapse/storage/keys.py
@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,17 +14,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import hashlib
+import itertools
 import logging
 
 import six
 
+import attr
 from signedjson.key import decode_verify_key_bytes
 
-import OpenSSL
-from twisted.internet import defer
-
-from synapse.util.caches.descriptors import cachedInlineCallbacks
+from synapse.util import batch_iter
+from synapse.util.caches.descriptors import cached, cachedList
 
 from ._base import SQLBaseStore
 
@@ -37,114 +37,117 @@ else:
     db_binary_type = memoryview
 
 
+@attr.s(slots=True, frozen=True)
+class FetchKeyResult(object):
+    verify_key = attr.ib()  # VerifyKey: the key itself
+    valid_until_ts = attr.ib()  # int: how long we can use this key for
+
+
 class KeyStore(SQLBaseStore):
-    """Persistence for signature verification keys and tls X.509 certificates
+    """Persistence for signature verification keys
     """
 
-    @defer.inlineCallbacks
-    def get_server_certificate(self, server_name):
-        """Retrieve the TLS X.509 certificate for the given server
+    @cached()
+    def _get_server_verify_key(self, server_name_and_key_id):
+        raise NotImplementedError()
+
+    @cachedList(
+        cached_method_name="_get_server_verify_key", list_name="server_name_and_key_ids"
+    )
+    def get_server_verify_keys(self, server_name_and_key_ids):
+        """
         Args:
-            server_name (bytes): The name of the server.
+            server_name_and_key_ids (iterable[Tuple[str, str]]):
+                iterable of (server_name, key-id) tuples to fetch keys for
+
         Returns:
-            (OpenSSL.crypto.X509): The tls certificate.
+            Deferred: resolves to dict[Tuple[str, str], FetchKeyResult|None]:
+                map from (server_name, key_id) -> FetchKeyResult, or None if the key is
+                unknown
         """
-        tls_certificate_bytes, = yield self._simple_select_one(
-            table="server_tls_certificates",
-            keyvalues={"server_name": server_name},
-            retcols=("tls_certificate",),
-            desc="get_server_certificate",
-        )
-        tls_certificate = OpenSSL.crypto.load_certificate(
-            OpenSSL.crypto.FILETYPE_ASN1, tls_certificate_bytes
-        )
-        defer.returnValue(tls_certificate)
+        keys = {}
 
-    def store_server_certificate(
-        self, server_name, from_server, time_now_ms, tls_certificate
-    ):
-        """Stores the TLS X.509 certificate for the given server
-        Args:
-            server_name (str): The name of the server.
-            from_server (str): Where the certificate was looked up
-            time_now_ms (int): The time now in milliseconds
-            tls_certificate (OpenSSL.crypto.X509): The X.509 certificate.
-        """
-        tls_certificate_bytes = OpenSSL.crypto.dump_certificate(
-            OpenSSL.crypto.FILETYPE_ASN1, tls_certificate
-        )
-        fingerprint = hashlib.sha256(tls_certificate_bytes).hexdigest()
-        return self._simple_upsert(
-            table="server_tls_certificates",
-            keyvalues={"server_name": server_name, "fingerprint": fingerprint},
-            values={
-                "from_server": from_server,
-                "ts_added_ms": time_now_ms,
-                "tls_certificate": db_binary_type(tls_certificate_bytes),
-            },
-            desc="store_server_certificate",
-        )
+        def _get_keys(txn, batch):
+            """Processes a batch of keys to fetch, and adds the result to `keys`."""
 
-    @cachedInlineCallbacks()
-    def _get_server_verify_key(self, server_name, key_id):
-        verify_key_bytes = yield self._simple_select_one_onecol(
-            table="server_signature_keys",
-            keyvalues={"server_name": server_name, "key_id": key_id},
-            retcol="verify_key",
-            desc="_get_server_verify_key",
-            allow_none=True,
-        )
+            # batch_iter always returns tuples so it's safe to do len(batch)
+            sql = (
+                "SELECT server_name, key_id, verify_key, ts_valid_until_ms "
+                "FROM server_signature_keys WHERE 1=0"
+            ) + " OR (server_name=? AND key_id=?)" * len(batch)
 
-        if verify_key_bytes:
-            defer.returnValue(decode_verify_key_bytes(key_id, bytes(verify_key_bytes)))
+            txn.execute(sql, tuple(itertools.chain.from_iterable(batch)))
 
-    @defer.inlineCallbacks
-    def get_server_verify_keys(self, server_name, key_ids):
-        """Retrieve the NACL verification key for a given server for the given
-        key_ids
-        Args:
-            server_name (str): The name of the server.
-            key_ids (iterable[str]): key_ids to try and look up.
-        Returns:
-            Deferred: resolves to dict[str, VerifyKey]: map from
-               key_id to verification key.
-        """
-        keys = {}
-        for key_id in key_ids:
-            key = yield self._get_server_verify_key(server_name, key_id)
-            if key:
-                keys[key_id] = key
-        defer.returnValue(keys)
-
-    def store_server_verify_key(
-        self, server_name, from_server, time_now_ms, verify_key
-    ):
-        """Stores a NACL verification key for the given server.
-        Args:
-            server_name (str): The name of the server.
-            from_server (str): Where the verification key was looked up
-            time_now_ms (int): The time now in milliseconds
-            verify_key (nacl.signing.VerifyKey): The NACL verify key.
-        """
-        key_id = "%s:%s" % (verify_key.alg, verify_key.version)
+            for row in txn:
+                server_name, key_id, key_bytes, ts_valid_until_ms = row
+
+                if ts_valid_until_ms is None:
+                    # Old keys may be stored with a ts_valid_until_ms of null,
+                    # in which case we treat this as if it was set to `0`, i.e.
+                    # it won't match key requests that define a minimum
+                    # `ts_valid_until_ms`.
+                    ts_valid_until_ms = 0
+
+                res = FetchKeyResult(
+                    verify_key=decode_verify_key_bytes(key_id, bytes(key_bytes)),
+                    valid_until_ts=ts_valid_until_ms,
+                )
+                keys[(server_name, key_id)] = res
 
-        # XXX fix this to not need a lock (#3819)
         def _txn(txn):
-            self._simple_upsert_txn(
-                txn,
-                table="server_signature_keys",
-                keyvalues={"server_name": server_name, "key_id": key_id},
-                values={
-                    "from_server": from_server,
-                    "ts_added_ms": time_now_ms,
-                    "verify_key": db_binary_type(verify_key.encode()),
-                },
-            )
-            txn.call_after(
-                self._get_server_verify_key.invalidate, (server_name, key_id)
-            )
+            for batch in batch_iter(server_name_and_key_ids, 50):
+                _get_keys(txn, batch)
+            return keys
+
+        return self.runInteraction("get_server_verify_keys", _txn)
 
-        return self.runInteraction("store_server_verify_key", _txn)
+    def store_server_verify_keys(self, from_server, ts_added_ms, verify_keys):
+        """Stores NACL verification keys for remote servers.
+        Args:
+            from_server (str): Where the verification keys were looked up
+            ts_added_ms (int): The time to record that the key was added
+            verify_keys (iterable[tuple[str, str, FetchKeyResult]]):
+                keys to be stored. Each entry is a triplet of
+                (server_name, key_id, key).
+        """
+        key_values = []
+        value_values = []
+        invalidations = []
+        for server_name, key_id, fetch_result in verify_keys:
+            key_values.append((server_name, key_id))
+            value_values.append(
+                (
+                    from_server,
+                    ts_added_ms,
+                    fetch_result.valid_until_ts,
+                    db_binary_type(fetch_result.verify_key.encode()),
+                )
+            )
+            # invalidate takes a tuple corresponding to the params of
+            # _get_server_verify_key. _get_server_verify_key only takes one
+            # param, which is itself the 2-tuple (server_name, key_id).
+            invalidations.append((server_name, key_id))
+
+        def _invalidate(res):
+            f = self._get_server_verify_key.invalidate
+            for i in invalidations:
+                f((i, ))
+            return res
+
+        return self.runInteraction(
+            "store_server_verify_keys",
+            self._simple_upsert_many_txn,
+            table="server_signature_keys",
+            key_names=("server_name", "key_id"),
+            key_values=key_values,
+            value_names=(
+                "from_server",
+                "ts_added_ms",
+                "ts_valid_until_ms",
+                "verify_key",
+            ),
+            value_values=value_values,
+        ).addCallback(_invalidate)
 
     def store_server_keys_json(
         self, server_name, key_id, from_server, ts_now_ms, ts_expires_ms, key_json_bytes
@@ -188,8 +191,8 @@ class KeyStore(SQLBaseStore):
         Args:
             server_keys (list): List of (server_name, key_id, source) triplets.
         Returns:
-            Dict mapping (server_name, key_id, source) triplets to dicts with
-            "ts_valid_until_ms" and "key_json" keys.
+            Deferred[dict[Tuple[str, str, str|None], list[dict]]]:
+                Dict mapping (server_name, key_id, source) triplets to lists of dicts
         """
 
         def _get_server_keys_json_txn(txn):
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index c1711bc8bd..f2c1bed487 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -20,12 +20,14 @@ import logging
 import os
 import re
 
+from synapse.storage.engines.postgres import PostgresEngine
+
 logger = logging.getLogger(__name__)
 
 
 # Remember to update this number every time a change is made to database
 # schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 54
+SCHEMA_VERSION = 55
 
 dir_path = os.path.abspath(os.path.dirname(__file__))
 
@@ -115,8 +117,16 @@ def _setup_new_database(cur, database_engine):
 
     valid_dirs = []
     pattern = re.compile(r"^\d+(\.sql)?$")
+
+    if isinstance(database_engine, PostgresEngine):
+        specific = "postgres"
+    else:
+        specific = "sqlite"
+
+    specific_pattern = re.compile(r"^\d+(\.sql." + specific + r")?$")
+
     for filename in directory_entries:
-        match = pattern.match(filename)
+        match = pattern.match(filename) or specific_pattern.match(filename)
         abs_path = os.path.join(current_dir, filename)
         if match and os.path.isdir(abs_path):
             ver = int(match.group(0))
@@ -136,7 +146,9 @@ def _setup_new_database(cur, database_engine):
 
     directory_entries = os.listdir(sql_dir)
 
-    for filename in fnmatch.filter(directory_entries, "*.sql"):
+    for filename in sorted(fnmatch.filter(directory_entries, "*.sql") + fnmatch.filter(
+        directory_entries, "*.sql." + specific
+    )):
         sql_loc = os.path.join(sql_dir, filename)
         logger.debug("Applying schema %s", sql_loc)
         executescript(cur, sql_loc)
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index e30b86c346..d36917e4d6 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -1,5 +1,7 @@
 # -*- coding: utf-8 -*-
-# Copyright 2014 - 2016 OpenMarket Ltd
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,25 +15,32 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
 import re
 
+from six import iterkeys
 from six.moves import range
 
 from twisted.internet import defer
 
 from synapse.api.constants import UserTypes
-from synapse.api.errors import Codes, StoreError
+from synapse.api.errors import Codes, StoreError, ThreepidValidationError
 from synapse.storage import background_updates
 from synapse.storage._base import SQLBaseStore
 from synapse.types import UserID
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 
+THIRTY_MINUTES_IN_MS = 30 * 60 * 1000
+
+logger = logging.getLogger(__name__)
+
 
 class RegistrationWorkerStore(SQLBaseStore):
     def __init__(self, db_conn, hs):
         super(RegistrationWorkerStore, self).__init__(db_conn, hs)
 
         self.config = hs.config
+        self.clock = hs.get_clock()
 
     @cached()
     def get_user_by_id(self, user_id):
@@ -86,6 +95,176 @@ class RegistrationWorkerStore(SQLBaseStore):
             "get_user_by_access_token", self._query_for_auth, token
         )
 
+    @cachedInlineCallbacks()
+    def get_expiration_ts_for_user(self, user_id):
+        """Get the expiration timestamp for the account bearing a given user ID.
+
+        Args:
+            user_id (str): The ID of the user.
+        Returns:
+            defer.Deferred: None, if the account has no expiration timestamp,
+                otherwise int representation of the timestamp (as a number of
+                milliseconds since epoch).
+        """
+        res = yield self._simple_select_one_onecol(
+            table="account_validity",
+            keyvalues={"user_id": user_id},
+            retcol="expiration_ts_ms",
+            allow_none=True,
+            desc="get_expiration_ts_for_user",
+        )
+        defer.returnValue(res)
+
+    @defer.inlineCallbacks
+    def set_account_validity_for_user(self, user_id, expiration_ts, email_sent,
+                                      renewal_token=None):
+        """Updates the account validity properties of the given account, with the
+        given values.
+
+        Args:
+            user_id (str): ID of the account to update properties for.
+            expiration_ts (int): New expiration date, as a timestamp in milliseconds
+                since epoch.
+            email_sent (bool): True means a renewal email has been sent for this
+                account and there's no need to send another one for the current validity
+                period.
+            renewal_token (str): Renewal token the user can use to extend the validity
+                of their account. Defaults to no token.
+        """
+        def set_account_validity_for_user_txn(txn):
+            self._simple_update_txn(
+                txn=txn,
+                table="account_validity",
+                keyvalues={"user_id": user_id},
+                updatevalues={
+                    "expiration_ts_ms": expiration_ts,
+                    "email_sent": email_sent,
+                    "renewal_token": renewal_token,
+                },
+            )
+            self._invalidate_cache_and_stream(
+                txn, self.get_expiration_ts_for_user, (user_id,),
+            )
+
+        yield self.runInteraction(
+            "set_account_validity_for_user",
+            set_account_validity_for_user_txn,
+        )
+
+    @defer.inlineCallbacks
+    def set_renewal_token_for_user(self, user_id, renewal_token):
+        """Defines a renewal token for a given user.
+
+        Args:
+            user_id (str): ID of the user to set the renewal token for.
+            renewal_token (str): Random unique string that will be used to renew the
+                user's account.
+
+        Raises:
+            StoreError: The provided token is already set for another user.
+        """
+        yield self._simple_update_one(
+            table="account_validity",
+            keyvalues={"user_id": user_id},
+            updatevalues={"renewal_token": renewal_token},
+            desc="set_renewal_token_for_user",
+        )
+
+    @defer.inlineCallbacks
+    def get_user_from_renewal_token(self, renewal_token):
+        """Get a user ID from a renewal token.
+
+        Args:
+            renewal_token (str): The renewal token to perform the lookup with.
+
+        Returns:
+            defer.Deferred[str]: The ID of the user to which the token belongs.
+        """
+        res = yield self._simple_select_one_onecol(
+            table="account_validity",
+            keyvalues={"renewal_token": renewal_token},
+            retcol="user_id",
+            desc="get_user_from_renewal_token",
+        )
+
+        defer.returnValue(res)
+
+    @defer.inlineCallbacks
+    def get_renewal_token_for_user(self, user_id):
+        """Get the renewal token associated with a given user ID.
+
+        Args:
+            user_id (str): The user ID to lookup a token for.
+
+        Returns:
+            defer.Deferred[str]: The renewal token associated with this user ID.
+        """
+        res = yield self._simple_select_one_onecol(
+            table="account_validity",
+            keyvalues={"user_id": user_id},
+            retcol="renewal_token",
+            desc="get_renewal_token_for_user",
+        )
+
+        defer.returnValue(res)
+
+    @defer.inlineCallbacks
+    def get_users_expiring_soon(self):
+        """Selects users whose account will expire in the [now, now + renew_at] time
+        window (see configuration for account_validity for information on what renew_at
+        refers to).
+
+        Returns:
+            Deferred: Resolves to a list[dict[user_id (str), expiration_ts_ms (int)]]
+        """
+        def select_users_txn(txn, now_ms, renew_at):
+            sql = (
+                "SELECT user_id, expiration_ts_ms FROM account_validity"
+                " WHERE email_sent = ? AND (expiration_ts_ms - ?) <= ?"
+            )
+            values = [False, now_ms, renew_at]
+            txn.execute(sql, values)
+            return self.cursor_to_dict(txn)
+
+        res = yield self.runInteraction(
+            "get_users_expiring_soon",
+            select_users_txn,
+            self.clock.time_msec(), self.config.account_validity.renew_at,
+        )
+
+        defer.returnValue(res)
+
+    @defer.inlineCallbacks
+    def set_renewal_mail_status(self, user_id, email_sent):
+        """Sets or unsets the flag that indicates whether a renewal email has been sent
+        to the user (and the user hasn't renewed their account yet).
+
+        Args:
+            user_id (str): ID of the user to set/unset the flag for.
+            email_sent (bool): Flag which indicates whether a renewal email has been sent
+                to this user.
+        """
+        yield self._simple_update_one(
+            table="account_validity",
+            keyvalues={"user_id": user_id},
+            updatevalues={"email_sent": email_sent},
+            desc="set_renewal_mail_status",
+        )
+
+    @defer.inlineCallbacks
+    def delete_account_validity_for_user(self, user_id):
+        """Deletes the entry for the given user in the account validity table, removing
+        their expiration date and renewal token.
+
+        Args:
+            user_id (str): ID of the user to remove from the account validity table.
+        """
+        yield self._simple_delete_one(
+            table="account_validity",
+            keyvalues={"user_id": user_id},
+            desc="delete_account_validity_for_user",
+        )
+
     @defer.inlineCallbacks
     def is_server_admin(self, user):
         res = yield self._simple_select_one_onecol(
@@ -263,7 +442,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         defer.returnValue(None)
 
     @defer.inlineCallbacks
-    def get_user_id_by_threepid(self, medium, address):
+    def get_user_id_by_threepid(self, medium, address, require_verified=False):
         """Returns user id from threepid
 
         Args:
@@ -425,6 +604,8 @@ class RegistrationStore(
             columns=["creation_ts"],
         )
 
+        self._account_validity = hs.config.account_validity
+
         # we no longer use refresh tokens, but it's possible that some people
         # might have a background update queued to build this index. Just
         # clear the background update.
@@ -434,6 +615,77 @@ class RegistrationStore(
             "user_threepids_grandfather", self._bg_user_threepids_grandfather,
         )
 
+        self.register_background_update_handler(
+            "users_set_deactivated_flag", self._backgroud_update_set_deactivated_flag,
+        )
+
+        # Create a background job for culling expired 3PID validity tokens
+        hs.get_clock().looping_call(
+            self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS,
+        )
+
+    @defer.inlineCallbacks
+    def _backgroud_update_set_deactivated_flag(self, progress, batch_size):
+        """Retrieves a list of all deactivated users and sets the 'deactivated' flag to 1
+        for each of them.
+        """
+
+        last_user = progress.get("user_id", "")
+
+        def _backgroud_update_set_deactivated_flag_txn(txn):
+            txn.execute(
+                """
+                SELECT
+                    users.name,
+                    COUNT(access_tokens.token) AS count_tokens,
+                    COUNT(user_threepids.address) AS count_threepids
+                FROM users
+                    LEFT JOIN access_tokens ON (access_tokens.user_id = users.name)
+                    LEFT JOIN user_threepids ON (user_threepids.user_id = users.name)
+                WHERE (users.password_hash IS NULL OR users.password_hash = '')
+                AND (users.appservice_id IS NULL OR users.appservice_id = '')
+                AND users.is_guest = 0
+                AND users.name > ?
+                GROUP BY users.name
+                ORDER BY users.name ASC
+                LIMIT ?;
+                """,
+                (last_user, batch_size),
+            )
+
+            rows = self.cursor_to_dict(txn)
+
+            if not rows:
+                return True
+
+            rows_processed_nb = 0
+
+            for user in rows:
+                if not user["count_tokens"] and not user["count_threepids"]:
+                    self.set_user_deactivated_status_txn(txn, user["user_id"], True)
+                    rows_processed_nb += 1
+
+            logger.info("Marked %d rows as deactivated", rows_processed_nb)
+
+            self._background_update_progress_txn(
+                txn, "users_set_deactivated_flag", {"user_id": rows[-1]["name"]}
+            )
+
+            if batch_size > len(rows):
+                return True
+            else:
+                return False
+
+        end = yield self.runInteraction(
+            "users_set_deactivated_flag",
+            _backgroud_update_set_deactivated_flag_txn,
+        )
+
+        if end:
+            yield self._end_background_update("users_set_deactivated_flag")
+
+        defer.returnValue(batch_size)
+
     @defer.inlineCallbacks
     def add_access_token_to_user(self, user_id, token, device_id=None):
         """Adds an access token for the given user.
@@ -561,9 +813,13 @@ class RegistrationStore(
                         "user_type": user_type,
                     },
                 )
+
         except self.database_engine.module.IntegrityError:
             raise StoreError(400, "User ID already taken.", errcode=Codes.USER_IN_USE)
 
+        if self._account_validity.enabled:
+            self.set_expiration_date_for_user_txn(txn, user_id)
+
         if token:
             # it's possible for this to get a conflict, but only for a single user
             # since tokens are namespaced based on their user ID
@@ -798,7 +1054,6 @@ class RegistrationStore(
         We do this by grandfathering in existing user threepids assuming that
         they used one of the server configured trusted identity servers.
         """
-
         id_servers = set(self.config.trusted_third_party_id_servers)
 
         def _bg_user_threepids_grandfather_txn(txn):
@@ -819,3 +1074,327 @@ class RegistrationStore(
         yield self._end_background_update("user_threepids_grandfather")
 
         defer.returnValue(1)
+
+    def get_threepid_validation_session(
+        self,
+        medium,
+        client_secret,
+        address=None,
+        sid=None,
+        validated=True,
+    ):
+        """Gets a session_id and last_send_attempt (if available) for a
+        client_secret/medium/(address|session_id) combo
+
+        Args:
+            medium (str|None): The medium of the 3PID
+            address (str|None): The address of the 3PID
+            sid (str|None): The ID of the validation session
+            client_secret (str|None): A unique string provided by the client to
+                help identify this validation attempt
+            validated (bool|None): Whether sessions should be filtered by
+                whether they have been validated already or not. None to
+                perform no filtering
+
+        Returns:
+            deferred {str, int}|None: A dict containing the
+                latest session_id and send_attempt count for this 3PID.
+                Otherwise None if there hasn't been a previous attempt
+        """
+        keyvalues = {
+            "medium": medium,
+            "client_secret": client_secret,
+        }
+        if address:
+            keyvalues["address"] = address
+        if sid:
+            keyvalues["session_id"] = sid
+
+        assert(address or sid)
+
+        def get_threepid_validation_session_txn(txn):
+            sql = """
+                SELECT address, session_id, medium, client_secret,
+                last_send_attempt, validated_at
+                FROM threepid_validation_session WHERE %s
+                """ % (" AND ".join("%s = ?" % k for k in iterkeys(keyvalues)),)
+
+            if validated is not None:
+                sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL")
+
+            sql += " LIMIT 1"
+
+            txn.execute(sql, list(keyvalues.values()))
+            rows = self.cursor_to_dict(txn)
+            if not rows:
+                return None
+
+            return rows[0]
+
+        return self.runInteraction(
+            "get_threepid_validation_session",
+            get_threepid_validation_session_txn,
+        )
+
+    def validate_threepid_session(
+        self,
+        session_id,
+        client_secret,
+        token,
+        current_ts,
+    ):
+        """Attempt to validate a threepid session using a token
+
+        Args:
+            session_id (str): The id of a validation session
+            client_secret (str): A unique string provided by the client to
+                help identify this validation attempt
+            token (str): A validation token
+            current_ts (int): The current unix time in milliseconds. Used for
+                checking token expiry status
+
+        Returns:
+            deferred str|None: A str representing a link to redirect the user
+            to if there is one.
+        """
+        # Insert everything into a transaction in order to run atomically
+        def validate_threepid_session_txn(txn):
+            row = self._simple_select_one_txn(
+                txn,
+                table="threepid_validation_session",
+                keyvalues={"session_id": session_id},
+                retcols=["client_secret", "validated_at"],
+                allow_none=True,
+            )
+
+            if not row:
+                raise ThreepidValidationError(400, "Unknown session_id")
+            retrieved_client_secret = row["client_secret"]
+            validated_at = row["validated_at"]
+
+            if retrieved_client_secret != client_secret:
+                raise ThreepidValidationError(
+                    400, "This client_secret does not match the provided session_id",
+                )
+
+            row = self._simple_select_one_txn(
+                txn,
+                table="threepid_validation_token",
+                keyvalues={"session_id": session_id, "token": token},
+                retcols=["expires", "next_link"],
+                allow_none=True,
+            )
+
+            if not row:
+                raise ThreepidValidationError(
+                    400, "Validation token not found or has expired",
+                )
+            expires = row["expires"]
+            next_link = row["next_link"]
+
+            # If the session is already validated, no need to revalidate
+            if validated_at:
+                return next_link
+
+            if expires <= current_ts:
+                raise ThreepidValidationError(
+                    400, "This token has expired. Please request a new one",
+                )
+
+            # Looks good. Validate the session
+            self._simple_update_txn(
+                txn,
+                table="threepid_validation_session",
+                keyvalues={"session_id": session_id},
+                updatevalues={"validated_at": self.clock.time_msec()},
+            )
+
+            return next_link
+
+        # Return next_link if it exists
+        return self.runInteraction(
+            "validate_threepid_session_txn",
+            validate_threepid_session_txn,
+        )
+
+    def upsert_threepid_validation_session(
+        self,
+        medium,
+        address,
+        client_secret,
+        send_attempt,
+        session_id,
+        validated_at=None,
+    ):
+        """Upsert a threepid validation session
+        Args:
+            medium (str): The medium of the 3PID
+            address (str): The address of the 3PID
+            client_secret (str): A unique string provided by the client to
+                help identify this validation attempt
+            send_attempt (int): The latest send_attempt on this session
+            session_id (str): The id of this validation session
+            validated_at (int|None): The unix timestamp in milliseconds of
+                when the session was marked as valid
+        """
+        insertion_values = {
+            "medium": medium,
+            "address": address,
+            "client_secret": client_secret,
+        }
+
+        if validated_at:
+            insertion_values["validated_at"] = validated_at
+
+        return self._simple_upsert(
+            table="threepid_validation_session",
+            keyvalues={"session_id": session_id},
+            values={"last_send_attempt": send_attempt},
+            insertion_values=insertion_values,
+            desc="upsert_threepid_validation_session",
+        )
+
+    def start_or_continue_validation_session(
+        self,
+        medium,
+        address,
+        session_id,
+        client_secret,
+        send_attempt,
+        next_link,
+        token,
+        token_expires,
+    ):
+        """Creates a new threepid validation session if it does not already
+        exist and associates a new validation token with it
+
+        Args:
+            medium (str): The medium of the 3PID
+            address (str): The address of the 3PID
+            session_id (str): The id of this validation session
+            client_secret (str): A unique string provided by the client to
+                help identify this validation attempt
+            send_attempt (int): The latest send_attempt on this session
+            next_link (str|None): The link to redirect the user to upon
+                successful validation
+            token (str): The validation token
+            token_expires (int): The timestamp for which after the token
+                will no longer be valid
+        """
+        def start_or_continue_validation_session_txn(txn):
+            # Create or update a validation session
+            self._simple_upsert_txn(
+                txn,
+                table="threepid_validation_session",
+                keyvalues={"session_id": session_id},
+                values={"last_send_attempt": send_attempt},
+                insertion_values={
+                    "medium": medium,
+                    "address": address,
+                    "client_secret": client_secret,
+                },
+            )
+
+            # Create a new validation token with this session ID
+            self._simple_insert_txn(
+                txn,
+                table="threepid_validation_token",
+                values={
+                    "session_id": session_id,
+                    "token": token,
+                    "next_link": next_link,
+                    "expires": token_expires,
+                },
+            )
+
+        return self.runInteraction(
+            "start_or_continue_validation_session",
+            start_or_continue_validation_session_txn,
+        )
+
+    def cull_expired_threepid_validation_tokens(self):
+        """Remove threepid validation tokens with expiry dates that have passed"""
+        def cull_expired_threepid_validation_tokens_txn(txn, ts):
+            sql = """
+            DELETE FROM threepid_validation_token WHERE
+            expires < ?
+            """
+            return txn.execute(sql, (ts,))
+
+        return self.runInteraction(
+            "cull_expired_threepid_validation_tokens",
+            cull_expired_threepid_validation_tokens_txn,
+            self.clock.time_msec(),
+        )
+
+    def delete_threepid_session(self, session_id):
+        """Removes a threepid validation session from the database. This can
+        be done after validation has been performed and whatever action was
+        waiting on it has been carried out
+
+        Args:
+            session_id (str): The ID of the session to delete
+        """
+        def delete_threepid_session_txn(txn):
+            self._simple_delete_txn(
+                txn,
+                table="threepid_validation_token",
+                keyvalues={"session_id": session_id},
+            )
+            self._simple_delete_txn(
+                txn,
+                table="threepid_validation_session",
+                keyvalues={"session_id": session_id},
+            )
+
+        return self.runInteraction(
+            "delete_threepid_session",
+            delete_threepid_session_txn,
+        )
+
+    def set_user_deactivated_status_txn(self, txn, user_id, deactivated):
+        self._simple_update_one_txn(
+            txn=txn,
+            table="users",
+            keyvalues={"name": user_id},
+            updatevalues={"deactivated": 1 if deactivated else 0},
+        )
+        self._invalidate_cache_and_stream(
+            txn, self.get_user_deactivated_status, (user_id,),
+        )
+
+    @defer.inlineCallbacks
+    def set_user_deactivated_status(self, user_id, deactivated):
+        """Set the `deactivated` property for the provided user to the provided value.
+
+        Args:
+            user_id (str): The ID of the user to set the status for.
+            deactivated (bool): The value to set for `deactivated`.
+        """
+
+        yield self.runInteraction(
+            "set_user_deactivated_status",
+            self.set_user_deactivated_status_txn,
+            user_id, deactivated,
+        )
+
+    @cachedInlineCallbacks()
+    def get_user_deactivated_status(self, user_id):
+        """Retrieve the value for the `deactivated` property for the provided user.
+
+        Args:
+            user_id (str): The ID of the user to retrieve the status for.
+
+        Returns:
+            defer.Deferred(bool): The requested value.
+        """
+
+        res = yield self._simple_select_one_onecol(
+            table="users",
+            keyvalues={"name": user_id},
+            retcol="deactivated",
+            desc="get_user_deactivated_status",
+        )
+
+        # Convert the integer into a boolean.
+        defer.returnValue(res == 1)
diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py
new file mode 100644
index 0000000000..4c83800cca
--- /dev/null
+++ b/synapse/storage/relations.py
@@ -0,0 +1,476 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import attr
+
+from twisted.internet import defer
+
+from synapse.api.constants import RelationTypes
+from synapse.api.errors import SynapseError
+from synapse.storage._base import SQLBaseStore
+from synapse.storage.stream import generate_pagination_where_clause
+from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
+
+logger = logging.getLogger(__name__)
+
+
+@attr.s
+class PaginationChunk(object):
+    """Returned by relation pagination APIs.
+
+    Attributes:
+        chunk (list): The rows returned by pagination
+        next_batch (Any|None): Token to fetch next set of results with, if
+            None then there are no more results.
+        prev_batch (Any|None): Token to fetch previous set of results with, if
+            None then there are no previous results.
+    """
+
+    chunk = attr.ib()
+    next_batch = attr.ib(default=None)
+    prev_batch = attr.ib(default=None)
+
+    def to_dict(self):
+        d = {"chunk": self.chunk}
+
+        if self.next_batch:
+            d["next_batch"] = self.next_batch.to_string()
+
+        if self.prev_batch:
+            d["prev_batch"] = self.prev_batch.to_string()
+
+        return d
+
+
+@attr.s(frozen=True, slots=True)
+class RelationPaginationToken(object):
+    """Pagination token for relation pagination API.
+
+    As the results are order by topological ordering, we can use the
+    `topological_ordering` and `stream_ordering` fields of the events at the
+    boundaries of the chunk as pagination tokens.
+
+    Attributes:
+        topological (int): The topological ordering of the boundary event
+        stream (int): The stream ordering of the boundary event.
+    """
+
+    topological = attr.ib()
+    stream = attr.ib()
+
+    @staticmethod
+    def from_string(string):
+        try:
+            t, s = string.split("-")
+            return RelationPaginationToken(int(t), int(s))
+        except ValueError:
+            raise SynapseError(400, "Invalid token")
+
+    def to_string(self):
+        return "%d-%d" % (self.topological, self.stream)
+
+    def as_tuple(self):
+        return attr.astuple(self)
+
+
+@attr.s(frozen=True, slots=True)
+class AggregationPaginationToken(object):
+    """Pagination token for relation aggregation pagination API.
+
+    As the results are order by count and then MAX(stream_ordering) of the
+    aggregation groups, we can just use them as our pagination token.
+
+    Attributes:
+        count (int): The count of relations in the boundar group.
+        stream (int): The MAX stream ordering in the boundary group.
+    """
+
+    count = attr.ib()
+    stream = attr.ib()
+
+    @staticmethod
+    def from_string(string):
+        try:
+            c, s = string.split("-")
+            return AggregationPaginationToken(int(c), int(s))
+        except ValueError:
+            raise SynapseError(400, "Invalid token")
+
+    def to_string(self):
+        return "%d-%d" % (self.count, self.stream)
+
+    def as_tuple(self):
+        return attr.astuple(self)
+
+
+class RelationsWorkerStore(SQLBaseStore):
+    @cached(tree=True)
+    def get_relations_for_event(
+        self,
+        event_id,
+        relation_type=None,
+        event_type=None,
+        aggregation_key=None,
+        limit=5,
+        direction="b",
+        from_token=None,
+        to_token=None,
+    ):
+        """Get a list of relations for an event, ordered by topological ordering.
+
+        Args:
+            event_id (str): Fetch events that relate to this event ID.
+            relation_type (str|None): Only fetch events with this relation
+                type, if given.
+            event_type (str|None): Only fetch events with this event type, if
+                given.
+            aggregation_key (str|None): Only fetch events with this aggregation
+                key, if given.
+            limit (int): Only fetch the most recent `limit` events.
+            direction (str): Whether to fetch the most recent first (`"b"`) or
+                the oldest first (`"f"`).
+            from_token (RelationPaginationToken|None): Fetch rows from the given
+                token, or from the start if None.
+            to_token (RelationPaginationToken|None): Fetch rows up to the given
+                token, or up to the end if None.
+
+        Returns:
+            Deferred[PaginationChunk]: List of event IDs that match relations
+            requested. The rows are of the form `{"event_id": "..."}`.
+        """
+
+        where_clause = ["relates_to_id = ?"]
+        where_args = [event_id]
+
+        if relation_type is not None:
+            where_clause.append("relation_type = ?")
+            where_args.append(relation_type)
+
+        if event_type is not None:
+            where_clause.append("type = ?")
+            where_args.append(event_type)
+
+        if aggregation_key:
+            where_clause.append("aggregation_key = ?")
+            where_args.append(aggregation_key)
+
+        pagination_clause = generate_pagination_where_clause(
+            direction=direction,
+            column_names=("topological_ordering", "stream_ordering"),
+            from_token=attr.astuple(from_token) if from_token else None,
+            to_token=attr.astuple(to_token) if to_token else None,
+            engine=self.database_engine,
+        )
+
+        if pagination_clause:
+            where_clause.append(pagination_clause)
+
+        if direction == "b":
+            order = "DESC"
+        else:
+            order = "ASC"
+
+        sql = """
+            SELECT event_id, topological_ordering, stream_ordering
+            FROM event_relations
+            INNER JOIN events USING (event_id)
+            WHERE %s
+            ORDER BY topological_ordering %s, stream_ordering %s
+            LIMIT ?
+        """ % (
+            " AND ".join(where_clause),
+            order,
+            order,
+        )
+
+        def _get_recent_references_for_event_txn(txn):
+            txn.execute(sql, where_args + [limit + 1])
+
+            last_topo_id = None
+            last_stream_id = None
+            events = []
+            for row in txn:
+                events.append({"event_id": row[0]})
+                last_topo_id = row[1]
+                last_stream_id = row[2]
+
+            next_batch = None
+            if len(events) > limit and last_topo_id and last_stream_id:
+                next_batch = RelationPaginationToken(last_topo_id, last_stream_id)
+
+            return PaginationChunk(
+                chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token
+            )
+
+        return self.runInteraction(
+            "get_recent_references_for_event", _get_recent_references_for_event_txn
+        )
+
+    @cached(tree=True)
+    def get_aggregation_groups_for_event(
+        self,
+        event_id,
+        event_type=None,
+        limit=5,
+        direction="b",
+        from_token=None,
+        to_token=None,
+    ):
+        """Get a list of annotations on the event, grouped by event type and
+        aggregation key, sorted by count.
+
+        This is used e.g. to get the what and how many reactions have happend
+        on an event.
+
+        Args:
+            event_id (str): Fetch events that relate to this event ID.
+            event_type (str|None): Only fetch events with this event type, if
+                given.
+            limit (int): Only fetch the `limit` groups.
+            direction (str): Whether to fetch the highest count first (`"b"`) or
+                the lowest count first (`"f"`).
+            from_token (AggregationPaginationToken|None): Fetch rows from the
+                given token, or from the start if None.
+            to_token (AggregationPaginationToken|None): Fetch rows up to the
+                given token, or up to the end if None.
+
+
+        Returns:
+            Deferred[PaginationChunk]: List of groups of annotations that
+            match. Each row is a dict with `type`, `key` and `count` fields.
+        """
+
+        where_clause = ["relates_to_id = ?", "relation_type = ?"]
+        where_args = [event_id, RelationTypes.ANNOTATION]
+
+        if event_type:
+            where_clause.append("type = ?")
+            where_args.append(event_type)
+
+        having_clause = generate_pagination_where_clause(
+            direction=direction,
+            column_names=("COUNT(*)", "MAX(stream_ordering)"),
+            from_token=attr.astuple(from_token) if from_token else None,
+            to_token=attr.astuple(to_token) if to_token else None,
+            engine=self.database_engine,
+        )
+
+        if direction == "b":
+            order = "DESC"
+        else:
+            order = "ASC"
+
+        if having_clause:
+            having_clause = "HAVING " + having_clause
+        else:
+            having_clause = ""
+
+        sql = """
+            SELECT type, aggregation_key, COUNT(DISTINCT sender), MAX(stream_ordering)
+            FROM event_relations
+            INNER JOIN events USING (event_id)
+            WHERE {where_clause}
+            GROUP BY relation_type, type, aggregation_key
+            {having_clause}
+            ORDER BY COUNT(*) {order}, MAX(stream_ordering) {order}
+            LIMIT ?
+        """.format(
+            where_clause=" AND ".join(where_clause),
+            order=order,
+            having_clause=having_clause,
+        )
+
+        def _get_aggregation_groups_for_event_txn(txn):
+            txn.execute(sql, where_args + [limit + 1])
+
+            next_batch = None
+            events = []
+            for row in txn:
+                events.append({"type": row[0], "key": row[1], "count": row[2]})
+                next_batch = AggregationPaginationToken(row[2], row[3])
+
+            if len(events) <= limit:
+                next_batch = None
+
+            return PaginationChunk(
+                chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token
+            )
+
+        return self.runInteraction(
+            "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn
+        )
+
+    @cachedInlineCallbacks()
+    def get_applicable_edit(self, event_id):
+        """Get the most recent edit (if any) that has happened for the given
+        event.
+
+        Correctly handles checking whether edits were allowed to happen.
+
+        Args:
+            event_id (str): The original event ID
+
+        Returns:
+            Deferred[EventBase|None]: Returns the most recent edit, if any.
+        """
+
+        # We only allow edits for `m.room.message` events that have the same sender
+        # and event type. We can't assert these things during regular event auth so
+        # we have to do the checks post hoc.
+
+        # Fetches latest edit that has the same type and sender as the
+        # original, and is an `m.room.message`.
+        sql = """
+            SELECT edit.event_id FROM events AS edit
+            INNER JOIN event_relations USING (event_id)
+            INNER JOIN events AS original ON
+                original.event_id = relates_to_id
+                AND edit.type = original.type
+                AND edit.sender = original.sender
+            WHERE
+                relates_to_id = ?
+                AND relation_type = ?
+                AND edit.type = 'm.room.message'
+            ORDER by edit.origin_server_ts DESC, edit.event_id DESC
+            LIMIT 1
+        """
+
+        def _get_applicable_edit_txn(txn):
+            txn.execute(sql, (event_id, RelationTypes.REPLACE))
+            row = txn.fetchone()
+            if row:
+                return row[0]
+
+        edit_id = yield self.runInteraction(
+            "get_applicable_edit", _get_applicable_edit_txn
+        )
+
+        if not edit_id:
+            return
+
+        edit_event = yield self.get_event(edit_id, allow_none=True)
+        defer.returnValue(edit_event)
+
+    def has_user_annotated_event(self, parent_id, event_type, aggregation_key, sender):
+        """Check if a user has already annotated an event with the same key
+        (e.g. already liked an event).
+
+        Args:
+            parent_id (str): The event being annotated
+            event_type (str): The event type of the annotation
+            aggregation_key (str): The aggregation key of the annotation
+            sender (str): The sender of the annotation
+
+        Returns:
+            Deferred[bool]
+        """
+
+        sql = """
+            SELECT 1 FROM event_relations
+            INNER JOIN events USING (event_id)
+            WHERE
+                relates_to_id = ?
+                AND relation_type = ?
+                AND type = ?
+                AND sender = ?
+                AND aggregation_key = ?
+            LIMIT 1;
+        """
+
+        def _get_if_user_has_annotated_event(txn):
+            txn.execute(
+                sql,
+                (
+                    parent_id,
+                    RelationTypes.ANNOTATION,
+                    event_type,
+                    sender,
+                    aggregation_key,
+                ),
+            )
+
+            return bool(txn.fetchone())
+
+        return self.runInteraction(
+            "get_if_user_has_annotated_event", _get_if_user_has_annotated_event
+        )
+
+
+class RelationsStore(RelationsWorkerStore):
+    def _handle_event_relations(self, txn, event):
+        """Handles inserting relation data during peristence of events
+
+        Args:
+            txn
+            event (EventBase)
+        """
+        relation = event.content.get("m.relates_to")
+        if not relation:
+            # No relations
+            return
+
+        rel_type = relation.get("rel_type")
+        if rel_type not in (
+            RelationTypes.ANNOTATION,
+            RelationTypes.REFERENCE,
+            RelationTypes.REPLACE,
+        ):
+            # Unknown relation type
+            return
+
+        parent_id = relation.get("event_id")
+        if not parent_id:
+            # Invalid relation
+            return
+
+        aggregation_key = relation.get("key")
+
+        self._simple_insert_txn(
+            txn,
+            table="event_relations",
+            values={
+                "event_id": event.event_id,
+                "relates_to_id": parent_id,
+                "relation_type": rel_type,
+                "aggregation_key": aggregation_key,
+            },
+        )
+
+        txn.call_after(self.get_relations_for_event.invalidate_many, (parent_id,))
+        txn.call_after(
+            self.get_aggregation_groups_for_event.invalidate_many, (parent_id,)
+        )
+
+        if rel_type == RelationTypes.REPLACE:
+            txn.call_after(self.get_applicable_edit.invalidate, (parent_id,))
+
+    def _handle_redaction(self, txn, redacted_event_id):
+        """Handles receiving a redaction and checking whether we need to remove
+        any redacted relations from the database.
+
+        Args:
+            txn
+            redacted_event_id (str): The event that was redacted.
+        """
+
+        self._simple_delete_txn(
+            txn,
+            table="event_relations",
+            keyvalues={
+                "event_id": redacted_event_id,
+            }
+        )
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 57df17bcc2..7617913326 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -142,6 +142,27 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
         return self.runInteraction("get_room_summary", _get_room_summary_txn)
 
+    def _get_user_counts_in_room_txn(self, txn, room_id):
+        """
+        Get the user count in a room by membership.
+
+        Args:
+            room_id (str)
+            membership (Membership)
+
+        Returns:
+            Deferred[int]
+        """
+        sql = """
+        SELECT m.membership, count(*) FROM room_memberships as m
+            INNER JOIN current_state_events as c USING(event_id)
+            WHERE c.type = 'm.room.member' AND c.room_id = ?
+            GROUP BY m.membership
+        """
+
+        txn.execute(sql, (room_id,))
+        return {row[0]: row[1] for row in txn}
+
     @cached()
     def get_invited_rooms_for_user(self, user_id):
         """ Get all the rooms the user is invited to
diff --git a/synapse/storage/schema/delta/13/v13.sql b/synapse/storage/schema/delta/13/v13.sql
index 5eb93b38b2..f8649e5d99 100644
--- a/synapse/storage/schema/delta/13/v13.sql
+++ b/synapse/storage/schema/delta/13/v13.sql
@@ -13,19 +13,7 @@
  * limitations under the License.
  */
 
-CREATE TABLE IF NOT EXISTS application_services(
-    id INTEGER PRIMARY KEY AUTOINCREMENT,
-    url TEXT,
-    token TEXT,
-    hs_token TEXT,
-    sender TEXT,
-    UNIQUE(token)
-);
-
-CREATE TABLE IF NOT EXISTS application_services_regex(
-    id INTEGER PRIMARY KEY AUTOINCREMENT,
-    as_id BIGINT UNSIGNED NOT NULL,
-    namespace INTEGER,  /* enum[room_id|room_alias|user_id] */
-    regex TEXT,
-    FOREIGN KEY(as_id) REFERENCES application_services(id)
-);
+/* We used to create a tables called application_services and
+ * application_services_regex, but these are no longer used and are removed in
+ * delta 54.
+ */
diff --git a/synapse/storage/schema/delta/14/upgrade_appservice_db.py b/synapse/storage/schema/delta/14/upgrade_appservice_db.py
deleted file mode 100644
index 4d725b92fe..0000000000
--- a/synapse/storage/schema/delta/14/upgrade_appservice_db.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-
-import simplejson as json
-
-logger = logging.getLogger(__name__)
-
-
-def run_create(cur, *args, **kwargs):
-    cur.execute("SELECT id, regex FROM application_services_regex")
-    for row in cur.fetchall():
-        try:
-            logger.debug("Checking %s..." % row[0])
-            json.loads(row[1])
-        except ValueError:
-            # row isn't in json, make it so.
-            string_regex = row[1]
-            new_regex = json.dumps({
-                "regex": string_regex,
-                "exclusive": True
-            })
-            cur.execute(
-                "UPDATE application_services_regex SET regex=? WHERE id=?",
-                (new_regex, row[0])
-            )
-
-
-def run_upgrade(*args, **kwargs):
-    pass
diff --git a/synapse/storage/schema/delta/16/unique_constraints.sql b/synapse/storage/schema/delta/16/unique_constraints.sql
index fecf11118c..5b8de52c33 100644
--- a/synapse/storage/schema/delta/16/unique_constraints.sql
+++ b/synapse/storage/schema/delta/16/unique_constraints.sql
@@ -18,14 +18,6 @@ DROP INDEX IF EXISTS room_memberships_event_id;
 CREATE UNIQUE INDEX room_memberships_event_id ON room_memberships(event_id);
 
 --
-DELETE FROM feedback WHERE rowid not in (
-    SELECT MIN(rowid) FROM feedback GROUP BY event_id
-);
-
-DROP INDEX IF EXISTS feedback_event_id;
-CREATE UNIQUE INDEX feedback_event_id ON feedback(event_id);
-
---
 DELETE FROM topics WHERE rowid not in (
     SELECT MIN(rowid) FROM topics GROUP BY event_id
 );
diff --git a/synapse/storage/schema/delta/24/stats_reporting.sql b/synapse/storage/schema/delta/24/stats_reporting.sql
index 5f508af7a9..acea7483bd 100644
--- a/synapse/storage/schema/delta/24/stats_reporting.sql
+++ b/synapse/storage/schema/delta/24/stats_reporting.sql
@@ -1,4 +1,4 @@
-/* Copyright 2015, 2016 OpenMarket Ltd
+/* Copyright 2019 New Vector Ltd
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,10 +13,6 @@
  * limitations under the License.
  */
 
--- Should only ever contain one row
-CREATE TABLE IF NOT EXISTS stats_reporting(
-  -- The stream ordering token which was most recently reported as stats
-  reported_stream_token INTEGER,
-  -- The time (seconds since epoch) stats were most recently reported
-  reported_time BIGINT
-);
+ /* We used to create a table called stats_reporting, but this is no longer
+ * used and is removed in delta 54.
+ */
\ No newline at end of file
diff --git a/synapse/storage/schema/delta/30/state_stream.sql b/synapse/storage/schema/delta/30/state_stream.sql
index 706fe1dcf4..e85699e82e 100644
--- a/synapse/storage/schema/delta/30/state_stream.sql
+++ b/synapse/storage/schema/delta/30/state_stream.sql
@@ -14,15 +14,10 @@
  */
 
 
-/**
- * The positions in the event stream_ordering when the current_state was
- * replaced by the state at the event.
+/* We used to create a table called current_state_resets, but this is no
+ * longer used and is removed in delta 54.
  */
 
-CREATE TABLE IF NOT EXISTS current_state_resets(
-    event_stream_ordering BIGINT PRIMARY KEY NOT NULL
-);
-
 /* The outlier events that have aquired a state group typically through
  * backfill. This is tracked separately to the events table, as assigning a
  * state group change the position of the existing event in the stream
diff --git a/synapse/storage/schema/delta/32/remove_indices.sql b/synapse/storage/schema/delta/32/remove_indices.sql
index f859be46a6..4219cdd06a 100644
--- a/synapse/storage/schema/delta/32/remove_indices.sql
+++ b/synapse/storage/schema/delta/32/remove_indices.sql
@@ -24,13 +24,9 @@ DROP INDEX IF EXISTS state_groups_id; -- Duplicate of PRIMARY KEY
 DROP INDEX IF EXISTS event_to_state_groups_id; -- Duplicate of PRIMARY KEY
 DROP INDEX IF EXISTS event_push_actions_room_id_event_id_user_id_profile_tag; -- Duplicate of UNIQUE CONSTRAINT
 
-DROP INDEX IF EXISTS event_destinations_id; -- Prefix of UNIQUE CONSTRAINT
 DROP INDEX IF EXISTS st_extrem_id; -- Prefix of UNIQUE CONSTRAINT
-DROP INDEX IF EXISTS event_content_hashes_id; -- Prefix of UNIQUE CONSTRAINT
 DROP INDEX IF EXISTS event_signatures_id; -- Prefix of UNIQUE CONSTRAINT
-DROP INDEX IF EXISTS event_edge_hashes_id; -- Prefix of UNIQUE CONSTRAINT
 DROP INDEX IF EXISTS redactions_event_id; -- Duplicate of UNIQUE CONSTRAINT
-DROP INDEX IF EXISTS room_hosts_room_id; -- Prefix of UNIQUE CONSTRAINT
 
 -- The following indices were unused
 DROP INDEX IF EXISTS remote_media_cache_thumbnails_media_id;
diff --git a/synapse/storage/schema/delta/54/account_validity_with_renewal.sql b/synapse/storage/schema/delta/54/account_validity_with_renewal.sql
new file mode 100644
index 0000000000..0adb2ad55e
--- /dev/null
+++ b/synapse/storage/schema/delta/54/account_validity_with_renewal.sql
@@ -0,0 +1,30 @@
+/* Copyright 2019 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- We previously changed the schema for this table without renaming the file, which means
+-- that some databases might still be using the old schema. This ensures Synapse uses the
+-- right schema for the table.
+DROP TABLE IF EXISTS account_validity;
+
+-- Track what users are in public rooms.
+CREATE TABLE IF NOT EXISTS account_validity (
+    user_id TEXT PRIMARY KEY,
+    expiration_ts_ms BIGINT NOT NULL,
+    email_sent BOOLEAN NOT NULL,
+    renewal_token TEXT
+);
+
+CREATE INDEX account_validity_email_sent_idx ON account_validity(email_sent, expiration_ts_ms)
+CREATE UNIQUE INDEX account_validity_renewal_string_idx ON account_validity(renewal_token)
diff --git a/synapse/storage/schema/full_schemas/11/room_aliases.sql b/synapse/storage/schema/delta/54/add_validity_to_server_keys.sql
index 71a91f8ec9..c01aa9d2d9 100644
--- a/synapse/storage/schema/full_schemas/11/room_aliases.sql
+++ b/synapse/storage/schema/delta/54/add_validity_to_server_keys.sql
@@ -1,4 +1,4 @@
-/* Copyright 2014-2016 OpenMarket Ltd
+/* Copyright 2019 New Vector Ltd
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,12 +13,11 @@
  * limitations under the License.
  */
 
-CREATE TABLE IF NOT EXISTS room_aliases(
-    room_alias TEXT NOT NULL,
-    room_id TEXT NOT NULL
-);
+/* When we can use this key until, before we have to refresh it. */
+ALTER TABLE server_signature_keys ADD COLUMN ts_valid_until_ms BIGINT;
 
-CREATE TABLE IF NOT EXISTS room_alias_servers(
-    room_alias TEXT NOT NULL,
-    server TEXT NOT NULL
+UPDATE server_signature_keys SET ts_valid_until_ms = (
+    SELECT MAX(ts_valid_until_ms) FROM server_keys_json skj WHERE
+        skj.server_name = server_signature_keys.server_name AND
+        skj.key_id = server_signature_keys.key_id
 );
diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql
new file mode 100644
index 0000000000..b062ec840c
--- /dev/null
+++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql
@@ -0,0 +1,23 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Start a background job to cleanup extremities that were incorrectly added
+-- by bug #5269.
+INSERT INTO background_updates (update_name, progress_json) VALUES
+  ('delete_soft_failed_extremities', '{}');
+
+DROP TABLE IF EXISTS _extremities_to_check;  -- To make this delta schema file idempotent.
+CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities;
+CREATE INDEX _extremities_to_check_id ON _extremities_to_check(event_id);
diff --git a/synapse/storage/schema/delta/54/drop_legacy_tables.sql b/synapse/storage/schema/delta/54/drop_legacy_tables.sql
new file mode 100644
index 0000000000..dbbe682697
--- /dev/null
+++ b/synapse/storage/schema/delta/54/drop_legacy_tables.sql
@@ -0,0 +1,30 @@
+/* Copyright 2019 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- we need to do this first due to foreign constraints
+DROP TABLE IF EXISTS application_services_regex;
+
+DROP TABLE IF EXISTS application_services;
+DROP TABLE IF EXISTS transaction_id_to_pdu;
+DROP TABLE IF EXISTS stats_reporting;
+DROP TABLE IF EXISTS current_state_resets;
+DROP TABLE IF EXISTS event_content_hashes;
+DROP TABLE IF EXISTS event_destinations;
+DROP TABLE IF EXISTS event_edge_hashes;
+DROP TABLE IF EXISTS event_signatures;
+DROP TABLE IF EXISTS feedback;
+DROP TABLE IF EXISTS room_hosts;
+DROP TABLE IF EXISTS server_tls_certificates;
+DROP TABLE IF EXISTS state_forward_extremities;
diff --git a/synapse/storage/schema/delta/54/relations.sql b/synapse/storage/schema/delta/54/relations.sql
new file mode 100644
index 0000000000..134862b870
--- /dev/null
+++ b/synapse/storage/schema/delta/54/relations.sql
@@ -0,0 +1,27 @@
+/* Copyright 2019 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Tracks related events, like reactions, replies, edits, etc. Note that things
+-- in this table are not necessarily "valid", e.g. it may contain edits from
+-- people who don't have power to edit other peoples events.
+CREATE TABLE IF NOT EXISTS event_relations (
+    event_id TEXT NOT NULL,
+    relates_to_id TEXT NOT NULL,
+    relation_type TEXT NOT NULL,
+    aggregation_key TEXT
+);
+
+CREATE UNIQUE INDEX event_relations_id ON event_relations(event_id);
+CREATE INDEX event_relations_relates ON event_relations(relates_to_id, relation_type, aggregation_key);
diff --git a/synapse/storage/schema/delta/54/stats.sql b/synapse/storage/schema/delta/54/stats.sql
new file mode 100644
index 0000000000..652e58308e
--- /dev/null
+++ b/synapse/storage/schema/delta/54/stats.sql
@@ -0,0 +1,80 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE stats_stream_pos (
+    Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE,  -- Makes sure this table only has one row.
+    stream_id BIGINT,
+    CHECK (Lock='X')
+);
+
+INSERT INTO stats_stream_pos (stream_id) VALUES (null);
+
+CREATE TABLE user_stats (
+    user_id TEXT NOT NULL,
+    ts BIGINT NOT NULL,
+    bucket_size INT NOT NULL,
+    public_rooms INT NOT NULL,
+    private_rooms INT NOT NULL
+);
+
+CREATE UNIQUE INDEX user_stats_user_ts ON user_stats(user_id, ts);
+
+CREATE TABLE room_stats (
+    room_id TEXT NOT NULL,
+    ts BIGINT NOT NULL,
+    bucket_size INT NOT NULL,
+    current_state_events INT NOT NULL,
+    joined_members INT NOT NULL,
+    invited_members INT NOT NULL,
+    left_members INT NOT NULL,
+    banned_members INT NOT NULL,
+    state_events INT NOT NULL
+);
+
+CREATE UNIQUE INDEX room_stats_room_ts ON room_stats(room_id, ts);
+
+-- cache of current room state; useful for the publicRooms list
+CREATE TABLE room_state (
+    room_id TEXT NOT NULL,
+    join_rules TEXT,
+    history_visibility TEXT,
+    encryption TEXT,
+    name TEXT,
+    topic TEXT,
+    avatar TEXT,
+    canonical_alias TEXT
+    -- get aliases straight from the right table
+);
+
+CREATE UNIQUE INDEX room_state_room ON room_state(room_id);
+
+CREATE TABLE room_stats_earliest_token (
+    room_id TEXT NOT NULL,
+    token BIGINT NOT NULL
+);
+
+CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token(room_id);
+
+-- Set up staging tables
+INSERT INTO background_updates (update_name, progress_json) VALUES
+    ('populate_stats_createtables', '{}');
+
+-- Run through each room and update stats
+INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+    ('populate_stats_process_rooms', '{}', 'populate_stats_createtables');
+
+-- Clean up staging tables
+INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+    ('populate_stats_cleanup', '{}', 'populate_stats_process_rooms');
diff --git a/synapse/storage/schema/delta/54/stats2.sql b/synapse/storage/schema/delta/54/stats2.sql
new file mode 100644
index 0000000000..3b2d48447f
--- /dev/null
+++ b/synapse/storage/schema/delta/54/stats2.sql
@@ -0,0 +1,28 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- This delta file gets run after `54/stats.sql` delta.
+
+-- We want to add some indices to the temporary stats table, so we re-insert
+-- 'populate_stats_createtables' if we are still processing the rooms update.
+INSERT INTO background_updates (update_name, progress_json)
+    SELECT 'populate_stats_createtables', '{}'
+    WHERE
+        'populate_stats_process_rooms' IN (
+            SELECT update_name FROM background_updates
+        )
+        AND 'populate_stats_createtables' NOT IN (  -- don't insert if already exists
+            SELECT update_name FROM background_updates
+        );
diff --git a/synapse/storage/schema/delta/55/track_threepid_validations.sql b/synapse/storage/schema/delta/55/track_threepid_validations.sql
new file mode 100644
index 0000000000..a8eced2e0a
--- /dev/null
+++ b/synapse/storage/schema/delta/55/track_threepid_validations.sql
@@ -0,0 +1,31 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+CREATE TABLE IF NOT EXISTS threepid_validation_session (
+    session_id TEXT PRIMARY KEY,
+    medium TEXT NOT NULL,
+    address TEXT NOT NULL,
+    client_secret TEXT NOT NULL,
+    last_send_attempt BIGINT NOT NULL,
+    validated_at BIGINT
+);
+
+CREATE TABLE IF NOT EXISTS threepid_validation_token (
+    token TEXT PRIMARY KEY,
+    session_id TEXT NOT NULL,
+    next_link TEXT,
+    expires BIGINT NOT NULL
+);
+
+CREATE INDEX threepid_validation_token_session_id ON threepid_validation_token(session_id);
diff --git a/synapse/storage/schema/full_schemas/11/profiles.sql b/synapse/storage/schema/delta/55/users_alter_deactivated.sql
index b314e6df75..dabdde489b 100644
--- a/synapse/storage/schema/full_schemas/11/profiles.sql
+++ b/synapse/storage/schema/delta/55/users_alter_deactivated.sql
@@ -1,4 +1,4 @@
-/* Copyright 2014-2016 OpenMarket Ltd
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -12,8 +12,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-CREATE TABLE IF NOT EXISTS profiles(
-    user_id TEXT NOT NULL,
-    displayname TEXT,
-    avatar_url TEXT
-);
+
+ALTER TABLE users ADD deactivated SMALLINT DEFAULT 0 NOT NULL;
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+  ('users_set_deactivated_flag', '{}');
diff --git a/synapse/storage/schema/full_schemas/11/event_edges.sql b/synapse/storage/schema/full_schemas/11/event_edges.sql
deleted file mode 100644
index bccd1c6f74..0000000000
--- a/synapse/storage/schema/full_schemas/11/event_edges.sql
+++ /dev/null
@@ -1,91 +0,0 @@
-/* Copyright 2014-2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-CREATE TABLE IF NOT EXISTS event_forward_extremities(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    UNIQUE (event_id, room_id)
-);
-
-CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id);
-CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id);
-
-
-CREATE TABLE IF NOT EXISTS event_backward_extremities(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    UNIQUE (event_id, room_id)
-);
-
-CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id);
-CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id);
-
-
-CREATE TABLE IF NOT EXISTS event_edges(
-    event_id TEXT NOT NULL,
-    prev_event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    -- We no longer insert prev_state into this table, so all new rows will have
-    -- is_state as false.
-    is_state BOOL NOT NULL,
-    UNIQUE (event_id, prev_event_id, room_id, is_state)
-);
-
-CREATE INDEX ev_edges_id ON event_edges(event_id);
-CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id);
-
-
-CREATE TABLE IF NOT EXISTS room_depth(
-    room_id TEXT NOT NULL,
-    min_depth INTEGER NOT NULL,
-    UNIQUE (room_id)
-);
-
-CREATE INDEX room_depth_room ON room_depth(room_id);
-
-
-create TABLE IF NOT EXISTS event_destinations(
-    event_id TEXT NOT NULL,
-    destination TEXT NOT NULL,
-    delivered_ts BIGINT DEFAULT 0, -- or 0 if not delivered
-    UNIQUE (event_id, destination)
-);
-
-CREATE INDEX event_destinations_id ON event_destinations(event_id);
-
-
-CREATE TABLE IF NOT EXISTS state_forward_extremities(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    type TEXT NOT NULL,
-    state_key TEXT NOT NULL,
-    UNIQUE (event_id, room_id)
-);
-
-CREATE INDEX st_extrem_keys ON state_forward_extremities(
-    room_id, type, state_key
-);
-CREATE INDEX st_extrem_id ON state_forward_extremities(event_id);
-
-
-CREATE TABLE IF NOT EXISTS event_auth(
-    event_id TEXT NOT NULL,
-    auth_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    UNIQUE (event_id, auth_id, room_id)
-);
-
-CREATE INDEX evauth_edges_id ON event_auth(event_id);
-CREATE INDEX evauth_edges_auth_id ON event_auth(auth_id);
diff --git a/synapse/storage/schema/full_schemas/11/event_signatures.sql b/synapse/storage/schema/full_schemas/11/event_signatures.sql
deleted file mode 100644
index 00ce85980e..0000000000
--- a/synapse/storage/schema/full_schemas/11/event_signatures.sql
+++ /dev/null
@@ -1,55 +0,0 @@
-/* Copyright 2014-2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-CREATE TABLE IF NOT EXISTS event_content_hashes (
-    event_id TEXT,
-    algorithm TEXT,
-    hash bytea,
-    UNIQUE (event_id, algorithm)
-);
-
-CREATE INDEX event_content_hashes_id ON event_content_hashes(event_id);
-
-
-CREATE TABLE IF NOT EXISTS event_reference_hashes (
-    event_id TEXT,
-    algorithm TEXT,
-    hash bytea,
-    UNIQUE (event_id, algorithm)
-);
-
-CREATE INDEX event_reference_hashes_id ON event_reference_hashes(event_id);
-
-
-CREATE TABLE IF NOT EXISTS event_signatures (
-    event_id TEXT,
-    signature_name TEXT,
-    key_id TEXT,
-    signature bytea,
-    UNIQUE (event_id, signature_name, key_id)
-);
-
-CREATE INDEX event_signatures_id ON event_signatures(event_id);
-
-
-CREATE TABLE IF NOT EXISTS event_edge_hashes(
-    event_id TEXT,
-    prev_event_id TEXT,
-    algorithm TEXT,
-    hash bytea,
-    UNIQUE (event_id, prev_event_id, algorithm)
-);
-
-CREATE INDEX event_edge_hashes_id ON event_edge_hashes(event_id);
diff --git a/synapse/storage/schema/full_schemas/11/im.sql b/synapse/storage/schema/full_schemas/11/im.sql
deleted file mode 100644
index dfbbf9fd54..0000000000
--- a/synapse/storage/schema/full_schemas/11/im.sql
+++ /dev/null
@@ -1,123 +0,0 @@
-/* Copyright 2014-2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-CREATE TABLE IF NOT EXISTS events(
-    stream_ordering INTEGER PRIMARY KEY AUTOINCREMENT,
-    topological_ordering BIGINT NOT NULL,
-    event_id TEXT NOT NULL,
-    type TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    content TEXT NOT NULL,
-    unrecognized_keys TEXT,
-    processed BOOL NOT NULL,
-    outlier BOOL NOT NULL,
-    depth BIGINT DEFAULT 0 NOT NULL,
-    UNIQUE (event_id)
-);
-
-CREATE INDEX events_stream_ordering ON events (stream_ordering);
-CREATE INDEX events_topological_ordering ON events (topological_ordering);
-CREATE INDEX events_room_id ON events (room_id);
-
-
-CREATE TABLE IF NOT EXISTS event_json(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    internal_metadata TEXT NOT NULL,
-    json TEXT NOT NULL,
-    UNIQUE (event_id)
-);
-
-CREATE INDEX event_json_room_id ON event_json(room_id);
-
-
-CREATE TABLE IF NOT EXISTS state_events(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    type TEXT NOT NULL,
-    state_key TEXT NOT NULL,
-    prev_state TEXT,
-    UNIQUE (event_id)
-);
-
-CREATE INDEX state_events_room_id ON state_events (room_id);
-CREATE INDEX state_events_type ON state_events (type);
-CREATE INDEX state_events_state_key ON state_events (state_key);
-
-
-CREATE TABLE IF NOT EXISTS current_state_events(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    type TEXT NOT NULL,
-    state_key TEXT NOT NULL,
-    UNIQUE (room_id, type, state_key)
-);
-
-CREATE INDEX curr_events_event_id ON current_state_events (event_id);
-CREATE INDEX current_state_events_room_id ON current_state_events (room_id);
-CREATE INDEX current_state_events_type ON current_state_events (type);
-CREATE INDEX current_state_events_state_key ON current_state_events (state_key);
-
-CREATE TABLE IF NOT EXISTS room_memberships(
-    event_id TEXT NOT NULL,
-    user_id TEXT NOT NULL,
-    sender TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    membership TEXT NOT NULL
-);
-
-CREATE INDEX room_memberships_event_id ON room_memberships (event_id);
-CREATE INDEX room_memberships_room_id ON room_memberships (room_id);
-CREATE INDEX room_memberships_user_id ON room_memberships (user_id);
-
-CREATE TABLE IF NOT EXISTS feedback(
-    event_id TEXT NOT NULL,
-    feedback_type TEXT,
-    target_event_id TEXT,
-    sender TEXT,
-    room_id TEXT
-);
-
-CREATE TABLE IF NOT EXISTS topics(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    topic TEXT NOT NULL
-);
-
-CREATE INDEX topics_event_id ON topics(event_id);
-CREATE INDEX topics_room_id ON topics(room_id);
-
-CREATE TABLE IF NOT EXISTS room_names(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    name TEXT NOT NULL
-);
-
-CREATE INDEX room_names_event_id ON room_names(event_id);
-CREATE INDEX room_names_room_id ON room_names(room_id);
-
-CREATE TABLE IF NOT EXISTS rooms(
-    room_id TEXT PRIMARY KEY NOT NULL,
-    is_public BOOL,
-    creator TEXT
-);
-
-CREATE TABLE IF NOT EXISTS room_hosts(
-    room_id TEXT NOT NULL,
-    host TEXT NOT NULL,
-    UNIQUE (room_id, host)
-);
-
-CREATE INDEX room_hosts_room_id ON room_hosts (room_id);
diff --git a/synapse/storage/schema/full_schemas/11/keys.sql b/synapse/storage/schema/full_schemas/11/keys.sql
deleted file mode 100644
index ca0ca1b694..0000000000
--- a/synapse/storage/schema/full_schemas/11/keys.sql
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright 2014-2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-CREATE TABLE IF NOT EXISTS server_tls_certificates(
-  server_name TEXT, -- Server name.
-  fingerprint TEXT, -- Certificate fingerprint.
-  from_server TEXT, -- Which key server the certificate was fetched from.
-  ts_added_ms BIGINT, -- When the certifcate was added.
-  tls_certificate bytea, -- DER encoded x509 certificate.
-  UNIQUE (server_name, fingerprint)
-);
-
-CREATE TABLE IF NOT EXISTS server_signature_keys(
-  server_name TEXT, -- Server name.
-  key_id TEXT, -- Key version.
-  from_server TEXT, -- Which key server the key was fetched form.
-  ts_added_ms BIGINT, -- When the key was added.
-  verify_key bytea, -- NACL verification key.
-  UNIQUE (server_name, key_id)
-);
diff --git a/synapse/storage/schema/full_schemas/11/media_repository.sql b/synapse/storage/schema/full_schemas/11/media_repository.sql
deleted file mode 100644
index 9c264d6ece..0000000000
--- a/synapse/storage/schema/full_schemas/11/media_repository.sql
+++ /dev/null
@@ -1,65 +0,0 @@
-/* Copyright 2014-2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-CREATE TABLE IF NOT EXISTS local_media_repository (
-    media_id TEXT, -- The id used to refer to the media.
-    media_type TEXT, -- The MIME-type of the media.
-    media_length INTEGER, -- Length of the media in bytes.
-    created_ts BIGINT, -- When the content was uploaded in ms.
-    upload_name TEXT, -- The name the media was uploaded with.
-    user_id TEXT, -- The user who uploaded the file.
-    UNIQUE (media_id)
-);
-
-CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails (
-    media_id TEXT, -- The id used to refer to the media.
-    thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
-    thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
-    thumbnail_type TEXT, -- The MIME-type of the thumbnail.
-    thumbnail_method TEXT, -- The method used to make the thumbnail.
-    thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
-    UNIQUE (
-        media_id, thumbnail_width, thumbnail_height, thumbnail_type
-    )
-);
-
-CREATE INDEX local_media_repository_thumbnails_media_id
-    ON local_media_repository_thumbnails (media_id);
-
-CREATE TABLE IF NOT EXISTS remote_media_cache (
-    media_origin TEXT, -- The remote HS the media came from.
-    media_id TEXT, -- The id used to refer to the media on that server.
-    media_type TEXT, -- The MIME-type of the media.
-    created_ts BIGINT, -- When the content was uploaded in ms.
-    upload_name TEXT, -- The name the media was uploaded with.
-    media_length INTEGER, -- Length of the media in bytes.
-    filesystem_id TEXT, -- The name used to store the media on disk.
-    UNIQUE (media_origin, media_id)
-);
-
-CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails (
-    media_origin TEXT, -- The remote HS the media came from.
-    media_id TEXT, -- The id used to refer to the media.
-    thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
-    thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
-    thumbnail_method TEXT, -- The method used to make the thumbnail
-    thumbnail_type TEXT, -- The MIME-type of the thumbnail.
-    thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
-    filesystem_id TEXT, -- The name used to store the media on disk.
-    UNIQUE (
-        media_origin, media_id, thumbnail_width, thumbnail_height,
-        thumbnail_type
-     )
-);
diff --git a/synapse/storage/schema/full_schemas/11/presence.sql b/synapse/storage/schema/full_schemas/11/presence.sql
deleted file mode 100644
index 492725994c..0000000000
--- a/synapse/storage/schema/full_schemas/11/presence.sql
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright 2014-2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-CREATE TABLE IF NOT EXISTS presence(
-  user_id TEXT NOT NULL,
-  state VARCHAR(20),
-  status_msg TEXT,
-  mtime BIGINT -- miliseconds since last state change
-);
-
--- For each of /my/ users which possibly-remote users are allowed to see their
--- presence state
-CREATE TABLE IF NOT EXISTS presence_allow_inbound(
-  observed_user_id TEXT NOT NULL,
-  observer_user_id TEXT NOT NULL -- a UserID,
-);
-
--- For each of /my/ users (watcher), which possibly-remote users are they
--- watching?
-CREATE TABLE IF NOT EXISTS presence_list(
-  user_id TEXT NOT NULL,
-  observed_user_id TEXT NOT NULL, -- a UserID,
-  accepted BOOLEAN NOT NULL
-);
diff --git a/synapse/storage/schema/full_schemas/11/redactions.sql b/synapse/storage/schema/full_schemas/11/redactions.sql
deleted file mode 100644
index 318f0d9aa5..0000000000
--- a/synapse/storage/schema/full_schemas/11/redactions.sql
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright 2014-2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-CREATE TABLE IF NOT EXISTS redactions (
-    event_id TEXT NOT NULL,
-    redacts TEXT NOT NULL,
-    UNIQUE (event_id)
-);
-
-CREATE INDEX redactions_event_id ON redactions (event_id);
-CREATE INDEX redactions_redacts ON redactions (redacts);
diff --git a/synapse/storage/schema/full_schemas/11/state.sql b/synapse/storage/schema/full_schemas/11/state.sql
deleted file mode 100644
index b901e0f017..0000000000
--- a/synapse/storage/schema/full_schemas/11/state.sql
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright 2014-2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-CREATE TABLE IF NOT EXISTS state_groups(
-    id INTEGER PRIMARY KEY,
-    room_id TEXT NOT NULL,
-    event_id TEXT NOT NULL
-);
-
-CREATE TABLE IF NOT EXISTS state_groups_state(
-    state_group INTEGER NOT NULL,
-    room_id TEXT NOT NULL,
-    type TEXT NOT NULL,
-    state_key TEXT NOT NULL,
-    event_id TEXT NOT NULL
-);
-
-CREATE TABLE IF NOT EXISTS event_to_state_groups(
-    event_id TEXT NOT NULL,
-    state_group INTEGER NOT NULL,
-    UNIQUE (event_id)
-);
-
-CREATE INDEX state_groups_id ON state_groups(id);
-
-CREATE INDEX state_groups_state_id ON state_groups_state(state_group);
-CREATE INDEX state_groups_state_tuple ON state_groups_state(room_id, type, state_key);
-CREATE INDEX event_to_state_groups_id ON event_to_state_groups(event_id);
diff --git a/synapse/storage/schema/full_schemas/11/transactions.sql b/synapse/storage/schema/full_schemas/11/transactions.sql
deleted file mode 100644
index f6a058832e..0000000000
--- a/synapse/storage/schema/full_schemas/11/transactions.sql
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright 2014-2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--- Stores what transaction ids we have received and what our response was
-CREATE TABLE IF NOT EXISTS received_transactions(
-    transaction_id TEXT,
-    origin TEXT,
-    ts BIGINT,
-    response_code INTEGER,
-    response_json bytea,
-    has_been_referenced SMALLINT DEFAULT 0, -- Whether thishas been referenced by a prev_tx
-    UNIQUE (transaction_id, origin)
-);
-
-CREATE INDEX transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0;
-
--- For sent transactions only.
-CREATE TABLE IF NOT EXISTS transaction_id_to_pdu(
-    transaction_id INTEGER,
-    destination TEXT,
-    pdu_id TEXT,
-    pdu_origin TEXT
-);
-
-CREATE INDEX transaction_id_to_pdu_tx ON transaction_id_to_pdu(transaction_id, destination);
-CREATE INDEX transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination);
-
--- To track destination health
-CREATE TABLE IF NOT EXISTS destinations(
-    destination TEXT PRIMARY KEY,
-    retry_last_ts BIGINT,
-    retry_interval INTEGER
-);
diff --git a/synapse/storage/schema/full_schemas/11/users.sql b/synapse/storage/schema/full_schemas/11/users.sql
deleted file mode 100644
index 6c1d4c34a1..0000000000
--- a/synapse/storage/schema/full_schemas/11/users.sql
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright 2014-2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-CREATE TABLE IF NOT EXISTS users(
-    id INTEGER PRIMARY KEY AUTOINCREMENT,
-    name TEXT,
-    password_hash TEXT,
-    creation_ts BIGINT,
-    admin SMALLINT DEFAULT 0 NOT NULL,
-    UNIQUE(name)
-);
-
-CREATE TABLE IF NOT EXISTS access_tokens(
-    id INTEGER PRIMARY KEY AUTOINCREMENT,
-    user_id TEXT NOT NULL,
-    device_id TEXT,
-    token TEXT NOT NULL,
-    last_used BIGINT,
-    UNIQUE(token)
-);
-
-CREATE TABLE IF NOT EXISTS user_ips (
-    user TEXT NOT NULL,
-    access_token TEXT NOT NULL,
-    device_id TEXT,
-    ip TEXT NOT NULL,
-    user_agent TEXT NOT NULL,
-    last_seen BIGINT NOT NULL,
-    UNIQUE (user, access_token, ip, user_agent)
-);
-
-CREATE INDEX user_ips_user ON user_ips(user);
diff --git a/synapse/storage/schema/full_schemas/16/application_services.sql b/synapse/storage/schema/full_schemas/16/application_services.sql
index aee0e68473..883fcd10b2 100644
--- a/synapse/storage/schema/full_schemas/16/application_services.sql
+++ b/synapse/storage/schema/full_schemas/16/application_services.sql
@@ -13,22 +13,11 @@
  * limitations under the License.
  */
 
-CREATE TABLE IF NOT EXISTS application_services(
-    id BIGINT PRIMARY KEY,
-    url TEXT,
-    token TEXT,
-    hs_token TEXT,
-    sender TEXT,
-    UNIQUE(token)
-);
+/* We used to create tables called application_services and
+ * application_services_regex, but these are no longer used and are removed in
+ * delta 54.
+ */
 
-CREATE TABLE IF NOT EXISTS application_services_regex(
-    id BIGINT PRIMARY KEY,
-    as_id BIGINT NOT NULL,
-    namespace INTEGER,  /* enum[room_id|room_alias|user_id] */
-    regex TEXT,
-    FOREIGN KEY(as_id) REFERENCES application_services(id)
-);
 
 CREATE TABLE IF NOT EXISTS application_services_state(
     as_id TEXT PRIMARY KEY,
diff --git a/synapse/storage/schema/full_schemas/16/event_edges.sql b/synapse/storage/schema/full_schemas/16/event_edges.sql
index 6b5a5a88fa..10ce2aa7a0 100644
--- a/synapse/storage/schema/full_schemas/16/event_edges.sql
+++ b/synapse/storage/schema/full_schemas/16/event_edges.sql
@@ -13,6 +13,11 @@
  * limitations under the License.
  */
 
+/* We used to create tables called event_destinations and
+ * state_forward_extremities, but these are no longer used and are removed in
+ * delta 54.
+ */
+
 CREATE TABLE IF NOT EXISTS event_forward_extremities(
     event_id TEXT NOT NULL,
     room_id TEXT NOT NULL,
@@ -54,31 +59,6 @@ CREATE TABLE IF NOT EXISTS room_depth(
 
 CREATE INDEX room_depth_room ON room_depth(room_id);
 
-
-create TABLE IF NOT EXISTS event_destinations(
-    event_id TEXT NOT NULL,
-    destination TEXT NOT NULL,
-    delivered_ts BIGINT DEFAULT 0, -- or 0 if not delivered
-    UNIQUE (event_id, destination)
-);
-
-CREATE INDEX event_destinations_id ON event_destinations(event_id);
-
-
-CREATE TABLE IF NOT EXISTS state_forward_extremities(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    type TEXT NOT NULL,
-    state_key TEXT NOT NULL,
-    UNIQUE (event_id, room_id)
-);
-
-CREATE INDEX st_extrem_keys ON state_forward_extremities(
-    room_id, type, state_key
-);
-CREATE INDEX st_extrem_id ON state_forward_extremities(event_id);
-
-
 CREATE TABLE IF NOT EXISTS event_auth(
     event_id TEXT NOT NULL,
     auth_id TEXT NOT NULL,
diff --git a/synapse/storage/schema/full_schemas/16/event_signatures.sql b/synapse/storage/schema/full_schemas/16/event_signatures.sql
index 00ce85980e..95826da431 100644
--- a/synapse/storage/schema/full_schemas/16/event_signatures.sql
+++ b/synapse/storage/schema/full_schemas/16/event_signatures.sql
@@ -13,15 +13,9 @@
  * limitations under the License.
  */
 
-CREATE TABLE IF NOT EXISTS event_content_hashes (
-    event_id TEXT,
-    algorithm TEXT,
-    hash bytea,
-    UNIQUE (event_id, algorithm)
-);
-
-CREATE INDEX event_content_hashes_id ON event_content_hashes(event_id);
-
+ /* We used to create tables called event_content_hashes and event_edge_hashes,
+  * but these are no longer used and are removed in delta 54.
+  */
 
 CREATE TABLE IF NOT EXISTS event_reference_hashes (
     event_id TEXT,
@@ -42,14 +36,3 @@ CREATE TABLE IF NOT EXISTS event_signatures (
 );
 
 CREATE INDEX event_signatures_id ON event_signatures(event_id);
-
-
-CREATE TABLE IF NOT EXISTS event_edge_hashes(
-    event_id TEXT,
-    prev_event_id TEXT,
-    algorithm TEXT,
-    hash bytea,
-    UNIQUE (event_id, prev_event_id, algorithm)
-);
-
-CREATE INDEX event_edge_hashes_id ON event_edge_hashes(event_id);
diff --git a/synapse/storage/schema/full_schemas/16/im.sql b/synapse/storage/schema/full_schemas/16/im.sql
index 5f5cb8d01d..a1a2aa8e5b 100644
--- a/synapse/storage/schema/full_schemas/16/im.sql
+++ b/synapse/storage/schema/full_schemas/16/im.sql
@@ -13,6 +13,10 @@
  * limitations under the License.
  */
 
+/* We used to create tables called room_hosts and feedback,
+ * but these are no longer used and are removed in delta 54.
+ */
+
 CREATE TABLE IF NOT EXISTS events(
     stream_ordering INTEGER PRIMARY KEY,
     topological_ordering BIGINT NOT NULL,
@@ -91,15 +95,6 @@ CREATE TABLE IF NOT EXISTS room_memberships(
 CREATE INDEX room_memberships_room_id ON room_memberships (room_id);
 CREATE INDEX room_memberships_user_id ON room_memberships (user_id);
 
-CREATE TABLE IF NOT EXISTS feedback(
-    event_id TEXT NOT NULL,
-    feedback_type TEXT,
-    target_event_id TEXT,
-    sender TEXT,
-    room_id TEXT,
-    UNIQUE (event_id)
-);
-
 CREATE TABLE IF NOT EXISTS topics(
     event_id TEXT NOT NULL,
     room_id TEXT NOT NULL,
@@ -123,11 +118,3 @@ CREATE TABLE IF NOT EXISTS rooms(
     is_public BOOL,
     creator TEXT
 );
-
-CREATE TABLE IF NOT EXISTS room_hosts(
-    room_id TEXT NOT NULL,
-    host TEXT NOT NULL,
-    UNIQUE (room_id, host)
-);
-
-CREATE INDEX room_hosts_room_id ON room_hosts (room_id);
diff --git a/synapse/storage/schema/full_schemas/16/keys.sql b/synapse/storage/schema/full_schemas/16/keys.sql
index ca0ca1b694..11cdffdbb3 100644
--- a/synapse/storage/schema/full_schemas/16/keys.sql
+++ b/synapse/storage/schema/full_schemas/16/keys.sql
@@ -12,14 +12,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-CREATE TABLE IF NOT EXISTS server_tls_certificates(
-  server_name TEXT, -- Server name.
-  fingerprint TEXT, -- Certificate fingerprint.
-  from_server TEXT, -- Which key server the certificate was fetched from.
-  ts_added_ms BIGINT, -- When the certifcate was added.
-  tls_certificate bytea, -- DER encoded x509 certificate.
-  UNIQUE (server_name, fingerprint)
-);
+
+-- we used to create a table called server_tls_certificates, but this is no
+-- longer used, and is removed in delta 54.
 
 CREATE TABLE IF NOT EXISTS server_signature_keys(
   server_name TEXT, -- Server name.
diff --git a/synapse/storage/schema/full_schemas/16/presence.sql b/synapse/storage/schema/full_schemas/16/presence.sql
index 0892c4cf96..01d2d8f833 100644
--- a/synapse/storage/schema/full_schemas/16/presence.sql
+++ b/synapse/storage/schema/full_schemas/16/presence.sql
@@ -28,5 +28,5 @@ CREATE TABLE IF NOT EXISTS presence_allow_inbound(
   UNIQUE (observed_user_id, observer_user_id)
 );
 
--- We used to create a table called presence_list, but this is no longer used 
+-- We used to create a table called presence_list, but this is no longer used
 -- and is removed in delta 54.
\ No newline at end of file
diff --git a/synapse/storage/schema/full_schemas/54/full.sql.postgres b/synapse/storage/schema/full_schemas/54/full.sql.postgres
new file mode 100644
index 0000000000..098434356f
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/54/full.sql.postgres
@@ -0,0 +1,2052 @@
+
+
+
+
+
+CREATE TABLE access_tokens (
+    id bigint NOT NULL,
+    user_id text NOT NULL,
+    device_id text,
+    token text NOT NULL,
+    last_used bigint
+);
+
+
+
+CREATE TABLE account_data (
+    user_id text NOT NULL,
+    account_data_type text NOT NULL,
+    stream_id bigint NOT NULL,
+    content text NOT NULL
+);
+
+
+
+CREATE TABLE account_data_max_stream_id (
+    lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+    stream_id bigint NOT NULL,
+    CONSTRAINT private_user_data_max_stream_id_lock_check CHECK ((lock = 'X'::bpchar))
+);
+
+
+
+CREATE TABLE account_validity (
+    user_id text NOT NULL,
+    expiration_ts_ms bigint NOT NULL,
+    email_sent boolean NOT NULL,
+    renewal_token text
+);
+
+
+
+CREATE TABLE application_services_state (
+    as_id text NOT NULL,
+    state character varying(5),
+    last_txn integer
+);
+
+
+
+CREATE TABLE application_services_txns (
+    as_id text NOT NULL,
+    txn_id integer NOT NULL,
+    event_ids text NOT NULL
+);
+
+
+
+CREATE TABLE appservice_room_list (
+    appservice_id text NOT NULL,
+    network_id text NOT NULL,
+    room_id text NOT NULL
+);
+
+
+
+CREATE TABLE appservice_stream_position (
+    lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+    stream_ordering bigint,
+    CONSTRAINT appservice_stream_position_lock_check CHECK ((lock = 'X'::bpchar))
+);
+
+
+
+CREATE TABLE background_updates (
+    update_name text NOT NULL,
+    progress_json text NOT NULL,
+    depends_on text
+);
+
+
+
+CREATE TABLE blocked_rooms (
+    room_id text NOT NULL,
+    user_id text NOT NULL
+);
+
+
+
+CREATE TABLE cache_invalidation_stream (
+    stream_id bigint,
+    cache_func text,
+    keys text[],
+    invalidation_ts bigint
+);
+
+
+
+CREATE TABLE current_state_delta_stream (
+    stream_id bigint NOT NULL,
+    room_id text NOT NULL,
+    type text NOT NULL,
+    state_key text NOT NULL,
+    event_id text,
+    prev_event_id text
+);
+
+
+
+CREATE TABLE current_state_events (
+    event_id text NOT NULL,
+    room_id text NOT NULL,
+    type text NOT NULL,
+    state_key text NOT NULL
+);
+
+
+
+CREATE TABLE deleted_pushers (
+    stream_id bigint NOT NULL,
+    app_id text NOT NULL,
+    pushkey text NOT NULL,
+    user_id text NOT NULL
+);
+
+
+
+CREATE TABLE destinations (
+    destination text NOT NULL,
+    retry_last_ts bigint,
+    retry_interval integer
+);
+
+
+
+CREATE TABLE device_federation_inbox (
+    origin text NOT NULL,
+    message_id text NOT NULL,
+    received_ts bigint NOT NULL
+);
+
+
+
+CREATE TABLE device_federation_outbox (
+    destination text NOT NULL,
+    stream_id bigint NOT NULL,
+    queued_ts bigint NOT NULL,
+    messages_json text NOT NULL
+);
+
+
+
+CREATE TABLE device_inbox (
+    user_id text NOT NULL,
+    device_id text NOT NULL,
+    stream_id bigint NOT NULL,
+    message_json text NOT NULL
+);
+
+
+
+CREATE TABLE device_lists_outbound_last_success (
+    destination text NOT NULL,
+    user_id text NOT NULL,
+    stream_id bigint NOT NULL
+);
+
+
+
+CREATE TABLE device_lists_outbound_pokes (
+    destination text NOT NULL,
+    stream_id bigint NOT NULL,
+    user_id text NOT NULL,
+    device_id text NOT NULL,
+    sent boolean NOT NULL,
+    ts bigint NOT NULL
+);
+
+
+
+CREATE TABLE device_lists_remote_cache (
+    user_id text NOT NULL,
+    device_id text NOT NULL,
+    content text NOT NULL
+);
+
+
+
+CREATE TABLE device_lists_remote_extremeties (
+    user_id text NOT NULL,
+    stream_id text NOT NULL
+);
+
+
+
+CREATE TABLE device_lists_stream (
+    stream_id bigint NOT NULL,
+    user_id text NOT NULL,
+    device_id text NOT NULL
+);
+
+
+
+CREATE TABLE device_max_stream_id (
+    stream_id bigint NOT NULL
+);
+
+
+
+CREATE TABLE devices (
+    user_id text NOT NULL,
+    device_id text NOT NULL,
+    display_name text
+);
+
+
+
+CREATE TABLE e2e_device_keys_json (
+    user_id text NOT NULL,
+    device_id text NOT NULL,
+    ts_added_ms bigint NOT NULL,
+    key_json text NOT NULL
+);
+
+
+
+CREATE TABLE e2e_one_time_keys_json (
+    user_id text NOT NULL,
+    device_id text NOT NULL,
+    algorithm text NOT NULL,
+    key_id text NOT NULL,
+    ts_added_ms bigint NOT NULL,
+    key_json text NOT NULL
+);
+
+
+
+CREATE TABLE e2e_room_keys (
+    user_id text NOT NULL,
+    room_id text NOT NULL,
+    session_id text NOT NULL,
+    version bigint NOT NULL,
+    first_message_index integer,
+    forwarded_count integer,
+    is_verified boolean,
+    session_data text NOT NULL
+);
+
+
+
+CREATE TABLE e2e_room_keys_versions (
+    user_id text NOT NULL,
+    version bigint NOT NULL,
+    algorithm text NOT NULL,
+    auth_data text NOT NULL,
+    deleted smallint DEFAULT 0 NOT NULL
+);
+
+
+
+CREATE TABLE erased_users (
+    user_id text NOT NULL
+);
+
+
+
+CREATE TABLE event_auth (
+    event_id text NOT NULL,
+    auth_id text NOT NULL,
+    room_id text NOT NULL
+);
+
+
+
+CREATE TABLE event_backward_extremities (
+    event_id text NOT NULL,
+    room_id text NOT NULL
+);
+
+
+
+CREATE TABLE event_edges (
+    event_id text NOT NULL,
+    prev_event_id text NOT NULL,
+    room_id text NOT NULL,
+    is_state boolean NOT NULL
+);
+
+
+
+CREATE TABLE event_forward_extremities (
+    event_id text NOT NULL,
+    room_id text NOT NULL
+);
+
+
+
+CREATE TABLE event_json (
+    event_id text NOT NULL,
+    room_id text NOT NULL,
+    internal_metadata text NOT NULL,
+    json text NOT NULL,
+    format_version integer
+);
+
+
+
+CREATE TABLE event_push_actions (
+    room_id text NOT NULL,
+    event_id text NOT NULL,
+    user_id text NOT NULL,
+    profile_tag character varying(32),
+    actions text NOT NULL,
+    topological_ordering bigint,
+    stream_ordering bigint,
+    notif smallint,
+    highlight smallint
+);
+
+
+
+CREATE TABLE event_push_actions_staging (
+    event_id text NOT NULL,
+    user_id text NOT NULL,
+    actions text NOT NULL,
+    notif smallint NOT NULL,
+    highlight smallint NOT NULL
+);
+
+
+
+CREATE TABLE event_push_summary (
+    user_id text NOT NULL,
+    room_id text NOT NULL,
+    notif_count bigint NOT NULL,
+    stream_ordering bigint NOT NULL
+);
+
+
+
+CREATE TABLE event_push_summary_stream_ordering (
+    lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+    stream_ordering bigint NOT NULL,
+    CONSTRAINT event_push_summary_stream_ordering_lock_check CHECK ((lock = 'X'::bpchar))
+);
+
+
+
+CREATE TABLE event_reference_hashes (
+    event_id text,
+    algorithm text,
+    hash bytea
+);
+
+
+
+CREATE TABLE event_relations (
+    event_id text NOT NULL,
+    relates_to_id text NOT NULL,
+    relation_type text NOT NULL,
+    aggregation_key text
+);
+
+
+
+CREATE TABLE event_reports (
+    id bigint NOT NULL,
+    received_ts bigint NOT NULL,
+    room_id text NOT NULL,
+    event_id text NOT NULL,
+    user_id text NOT NULL,
+    reason text,
+    content text
+);
+
+
+
+CREATE TABLE event_search (
+    event_id text,
+    room_id text,
+    sender text,
+    key text,
+    vector tsvector,
+    origin_server_ts bigint,
+    stream_ordering bigint
+);
+
+
+
+CREATE TABLE event_to_state_groups (
+    event_id text NOT NULL,
+    state_group bigint NOT NULL
+);
+
+
+
+CREATE TABLE events (
+    stream_ordering integer NOT NULL,
+    topological_ordering bigint NOT NULL,
+    event_id text NOT NULL,
+    type text NOT NULL,
+    room_id text NOT NULL,
+    content text,
+    unrecognized_keys text,
+    processed boolean NOT NULL,
+    outlier boolean NOT NULL,
+    depth bigint DEFAULT 0 NOT NULL,
+    origin_server_ts bigint,
+    received_ts bigint,
+    sender text,
+    contains_url boolean
+);
+
+
+
+CREATE TABLE ex_outlier_stream (
+    event_stream_ordering bigint NOT NULL,
+    event_id text NOT NULL,
+    state_group bigint NOT NULL
+);
+
+
+
+CREATE TABLE federation_stream_position (
+    type text NOT NULL,
+    stream_id integer NOT NULL
+);
+
+
+
+CREATE TABLE group_attestations_remote (
+    group_id text NOT NULL,
+    user_id text NOT NULL,
+    valid_until_ms bigint NOT NULL,
+    attestation_json text NOT NULL
+);
+
+
+
+CREATE TABLE group_attestations_renewals (
+    group_id text NOT NULL,
+    user_id text NOT NULL,
+    valid_until_ms bigint NOT NULL
+);
+
+
+
+CREATE TABLE group_invites (
+    group_id text NOT NULL,
+    user_id text NOT NULL
+);
+
+
+
+CREATE TABLE group_roles (
+    group_id text NOT NULL,
+    role_id text NOT NULL,
+    profile text NOT NULL,
+    is_public boolean NOT NULL
+);
+
+
+
+CREATE TABLE group_room_categories (
+    group_id text NOT NULL,
+    category_id text NOT NULL,
+    profile text NOT NULL,
+    is_public boolean NOT NULL
+);
+
+
+
+CREATE TABLE group_rooms (
+    group_id text NOT NULL,
+    room_id text NOT NULL,
+    is_public boolean NOT NULL
+);
+
+
+
+CREATE TABLE group_summary_roles (
+    group_id text NOT NULL,
+    role_id text NOT NULL,
+    role_order bigint NOT NULL,
+    CONSTRAINT group_summary_roles_role_order_check CHECK ((role_order > 0))
+);
+
+
+
+CREATE TABLE group_summary_room_categories (
+    group_id text NOT NULL,
+    category_id text NOT NULL,
+    cat_order bigint NOT NULL,
+    CONSTRAINT group_summary_room_categories_cat_order_check CHECK ((cat_order > 0))
+);
+
+
+
+CREATE TABLE group_summary_rooms (
+    group_id text NOT NULL,
+    room_id text NOT NULL,
+    category_id text NOT NULL,
+    room_order bigint NOT NULL,
+    is_public boolean NOT NULL,
+    CONSTRAINT group_summary_rooms_room_order_check CHECK ((room_order > 0))
+);
+
+
+
+CREATE TABLE group_summary_users (
+    group_id text NOT NULL,
+    user_id text NOT NULL,
+    role_id text NOT NULL,
+    user_order bigint NOT NULL,
+    is_public boolean NOT NULL
+);
+
+
+
+CREATE TABLE group_users (
+    group_id text NOT NULL,
+    user_id text NOT NULL,
+    is_admin boolean NOT NULL,
+    is_public boolean NOT NULL
+);
+
+
+
+CREATE TABLE groups (
+    group_id text NOT NULL,
+    name text,
+    avatar_url text,
+    short_description text,
+    long_description text,
+    is_public boolean NOT NULL,
+    join_policy text DEFAULT 'invite'::text NOT NULL
+);
+
+
+
+CREATE TABLE guest_access (
+    event_id text NOT NULL,
+    room_id text NOT NULL,
+    guest_access text NOT NULL
+);
+
+
+
+CREATE TABLE history_visibility (
+    event_id text NOT NULL,
+    room_id text NOT NULL,
+    history_visibility text NOT NULL
+);
+
+
+
+CREATE TABLE local_group_membership (
+    group_id text NOT NULL,
+    user_id text NOT NULL,
+    is_admin boolean NOT NULL,
+    membership text NOT NULL,
+    is_publicised boolean NOT NULL,
+    content text NOT NULL
+);
+
+
+
+CREATE TABLE local_group_updates (
+    stream_id bigint NOT NULL,
+    group_id text NOT NULL,
+    user_id text NOT NULL,
+    type text NOT NULL,
+    content text NOT NULL
+);
+
+
+
+CREATE TABLE local_invites (
+    stream_id bigint NOT NULL,
+    inviter text NOT NULL,
+    invitee text NOT NULL,
+    event_id text NOT NULL,
+    room_id text NOT NULL,
+    locally_rejected text,
+    replaced_by text
+);
+
+
+
+CREATE TABLE local_media_repository (
+    media_id text,
+    media_type text,
+    media_length integer,
+    created_ts bigint,
+    upload_name text,
+    user_id text,
+    quarantined_by text,
+    url_cache text,
+    last_access_ts bigint
+);
+
+
+
+CREATE TABLE local_media_repository_thumbnails (
+    media_id text,
+    thumbnail_width integer,
+    thumbnail_height integer,
+    thumbnail_type text,
+    thumbnail_method text,
+    thumbnail_length integer
+);
+
+
+
+CREATE TABLE local_media_repository_url_cache (
+    url text,
+    response_code integer,
+    etag text,
+    expires_ts bigint,
+    og text,
+    media_id text,
+    download_ts bigint
+);
+
+
+
+CREATE TABLE monthly_active_users (
+    user_id text NOT NULL,
+    "timestamp" bigint NOT NULL
+);
+
+
+
+CREATE TABLE open_id_tokens (
+    token text NOT NULL,
+    ts_valid_until_ms bigint NOT NULL,
+    user_id text NOT NULL
+);
+
+
+
+CREATE TABLE presence (
+    user_id text NOT NULL,
+    state character varying(20),
+    status_msg text,
+    mtime bigint
+);
+
+
+
+CREATE TABLE presence_allow_inbound (
+    observed_user_id text NOT NULL,
+    observer_user_id text NOT NULL
+);
+
+
+
+CREATE TABLE presence_stream (
+    stream_id bigint,
+    user_id text,
+    state text,
+    last_active_ts bigint,
+    last_federation_update_ts bigint,
+    last_user_sync_ts bigint,
+    status_msg text,
+    currently_active boolean
+);
+
+
+
+CREATE TABLE profiles (
+    user_id text NOT NULL,
+    displayname text,
+    avatar_url text
+);
+
+
+
+CREATE TABLE public_room_list_stream (
+    stream_id bigint NOT NULL,
+    room_id text NOT NULL,
+    visibility boolean NOT NULL,
+    appservice_id text,
+    network_id text
+);
+
+
+
+CREATE TABLE push_rules (
+    id bigint NOT NULL,
+    user_name text NOT NULL,
+    rule_id text NOT NULL,
+    priority_class smallint NOT NULL,
+    priority integer DEFAULT 0 NOT NULL,
+    conditions text NOT NULL,
+    actions text NOT NULL
+);
+
+
+
+CREATE TABLE push_rules_enable (
+    id bigint NOT NULL,
+    user_name text NOT NULL,
+    rule_id text NOT NULL,
+    enabled smallint
+);
+
+
+
+CREATE TABLE push_rules_stream (
+    stream_id bigint NOT NULL,
+    event_stream_ordering bigint NOT NULL,
+    user_id text NOT NULL,
+    rule_id text NOT NULL,
+    op text NOT NULL,
+    priority_class smallint,
+    priority integer,
+    conditions text,
+    actions text
+);
+
+
+
+CREATE TABLE pusher_throttle (
+    pusher bigint NOT NULL,
+    room_id text NOT NULL,
+    last_sent_ts bigint,
+    throttle_ms bigint
+);
+
+
+
+CREATE TABLE pushers (
+    id bigint NOT NULL,
+    user_name text NOT NULL,
+    access_token bigint,
+    profile_tag text NOT NULL,
+    kind text NOT NULL,
+    app_id text NOT NULL,
+    app_display_name text NOT NULL,
+    device_display_name text NOT NULL,
+    pushkey text NOT NULL,
+    ts bigint NOT NULL,
+    lang text,
+    data text,
+    last_stream_ordering integer,
+    last_success bigint,
+    failing_since bigint
+);
+
+
+
+CREATE TABLE ratelimit_override (
+    user_id text NOT NULL,
+    messages_per_second bigint,
+    burst_count bigint
+);
+
+
+
+CREATE TABLE receipts_graph (
+    room_id text NOT NULL,
+    receipt_type text NOT NULL,
+    user_id text NOT NULL,
+    event_ids text NOT NULL,
+    data text NOT NULL
+);
+
+
+
+CREATE TABLE receipts_linearized (
+    stream_id bigint NOT NULL,
+    room_id text NOT NULL,
+    receipt_type text NOT NULL,
+    user_id text NOT NULL,
+    event_id text NOT NULL,
+    data text NOT NULL
+);
+
+
+
+CREATE TABLE received_transactions (
+    transaction_id text,
+    origin text,
+    ts bigint,
+    response_code integer,
+    response_json bytea,
+    has_been_referenced smallint DEFAULT 0
+);
+
+
+
+CREATE TABLE redactions (
+    event_id text NOT NULL,
+    redacts text NOT NULL
+);
+
+
+
+CREATE TABLE rejections (
+    event_id text NOT NULL,
+    reason text NOT NULL,
+    last_check text NOT NULL
+);
+
+
+
+CREATE TABLE remote_media_cache (
+    media_origin text,
+    media_id text,
+    media_type text,
+    created_ts bigint,
+    upload_name text,
+    media_length integer,
+    filesystem_id text,
+    last_access_ts bigint,
+    quarantined_by text
+);
+
+
+
+CREATE TABLE remote_media_cache_thumbnails (
+    media_origin text,
+    media_id text,
+    thumbnail_width integer,
+    thumbnail_height integer,
+    thumbnail_method text,
+    thumbnail_type text,
+    thumbnail_length integer,
+    filesystem_id text
+);
+
+
+
+CREATE TABLE remote_profile_cache (
+    user_id text NOT NULL,
+    displayname text,
+    avatar_url text,
+    last_check bigint NOT NULL
+);
+
+
+
+CREATE TABLE room_account_data (
+    user_id text NOT NULL,
+    room_id text NOT NULL,
+    account_data_type text NOT NULL,
+    stream_id bigint NOT NULL,
+    content text NOT NULL
+);
+
+
+
+CREATE TABLE room_alias_servers (
+    room_alias text NOT NULL,
+    server text NOT NULL
+);
+
+
+
+CREATE TABLE room_aliases (
+    room_alias text NOT NULL,
+    room_id text NOT NULL,
+    creator text
+);
+
+
+
+CREATE TABLE room_depth (
+    room_id text NOT NULL,
+    min_depth integer NOT NULL
+);
+
+
+
+CREATE TABLE room_memberships (
+    event_id text NOT NULL,
+    user_id text NOT NULL,
+    sender text NOT NULL,
+    room_id text NOT NULL,
+    membership text NOT NULL,
+    forgotten integer DEFAULT 0,
+    display_name text,
+    avatar_url text
+);
+
+
+
+CREATE TABLE room_names (
+    event_id text NOT NULL,
+    room_id text NOT NULL,
+    name text NOT NULL
+);
+
+
+
+CREATE TABLE room_state (
+    room_id text NOT NULL,
+    join_rules text,
+    history_visibility text,
+    encryption text,
+    name text,
+    topic text,
+    avatar text,
+    canonical_alias text
+);
+
+
+
+CREATE TABLE room_stats (
+    room_id text NOT NULL,
+    ts bigint NOT NULL,
+    bucket_size integer NOT NULL,
+    current_state_events integer NOT NULL,
+    joined_members integer NOT NULL,
+    invited_members integer NOT NULL,
+    left_members integer NOT NULL,
+    banned_members integer NOT NULL,
+    state_events integer NOT NULL
+);
+
+
+
+CREATE TABLE room_stats_earliest_token (
+    room_id text NOT NULL,
+    token bigint NOT NULL
+);
+
+
+
+CREATE TABLE room_tags (
+    user_id text NOT NULL,
+    room_id text NOT NULL,
+    tag text NOT NULL,
+    content text NOT NULL
+);
+
+
+
+CREATE TABLE room_tags_revisions (
+    user_id text NOT NULL,
+    room_id text NOT NULL,
+    stream_id bigint NOT NULL
+);
+
+
+
+CREATE TABLE rooms (
+    room_id text NOT NULL,
+    is_public boolean,
+    creator text
+);
+
+
+
+CREATE TABLE server_keys_json (
+    server_name text NOT NULL,
+    key_id text NOT NULL,
+    from_server text NOT NULL,
+    ts_added_ms bigint NOT NULL,
+    ts_valid_until_ms bigint NOT NULL,
+    key_json bytea NOT NULL
+);
+
+
+
+CREATE TABLE server_signature_keys (
+    server_name text,
+    key_id text,
+    from_server text,
+    ts_added_ms bigint,
+    verify_key bytea,
+    ts_valid_until_ms bigint
+);
+
+
+
+CREATE TABLE state_events (
+    event_id text NOT NULL,
+    room_id text NOT NULL,
+    type text NOT NULL,
+    state_key text NOT NULL,
+    prev_state text
+);
+
+
+
+CREATE TABLE state_group_edges (
+    state_group bigint NOT NULL,
+    prev_state_group bigint NOT NULL
+);
+
+
+
+CREATE SEQUENCE state_group_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+
+CREATE TABLE state_groups (
+    id bigint NOT NULL,
+    room_id text NOT NULL,
+    event_id text NOT NULL
+);
+
+
+
+CREATE TABLE state_groups_state (
+    state_group bigint NOT NULL,
+    room_id text NOT NULL,
+    type text NOT NULL,
+    state_key text NOT NULL,
+    event_id text NOT NULL
+);
+
+
+
+CREATE TABLE stats_stream_pos (
+    lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+    stream_id bigint,
+    CONSTRAINT stats_stream_pos_lock_check CHECK ((lock = 'X'::bpchar))
+);
+
+
+
+CREATE TABLE stream_ordering_to_exterm (
+    stream_ordering bigint NOT NULL,
+    room_id text NOT NULL,
+    event_id text NOT NULL
+);
+
+
+
+CREATE TABLE threepid_guest_access_tokens (
+    medium text,
+    address text,
+    guest_access_token text,
+    first_inviter text
+);
+
+
+
+CREATE TABLE topics (
+    event_id text NOT NULL,
+    room_id text NOT NULL,
+    topic text NOT NULL
+);
+
+
+
+CREATE TABLE user_daily_visits (
+    user_id text NOT NULL,
+    device_id text,
+    "timestamp" bigint NOT NULL
+);
+
+
+
+CREATE TABLE user_directory (
+    user_id text NOT NULL,
+    room_id text,
+    display_name text,
+    avatar_url text
+);
+
+
+
+CREATE TABLE user_directory_search (
+    user_id text NOT NULL,
+    vector tsvector
+);
+
+
+
+CREATE TABLE user_directory_stream_pos (
+    lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+    stream_id bigint,
+    CONSTRAINT user_directory_stream_pos_lock_check CHECK ((lock = 'X'::bpchar))
+);
+
+
+
+CREATE TABLE user_filters (
+    user_id text,
+    filter_id bigint,
+    filter_json bytea
+);
+
+
+
+CREATE TABLE user_ips (
+    user_id text NOT NULL,
+    access_token text NOT NULL,
+    device_id text,
+    ip text NOT NULL,
+    user_agent text NOT NULL,
+    last_seen bigint NOT NULL
+);
+
+
+
+CREATE TABLE user_stats (
+    user_id text NOT NULL,
+    ts bigint NOT NULL,
+    bucket_size integer NOT NULL,
+    public_rooms integer NOT NULL,
+    private_rooms integer NOT NULL
+);
+
+
+
+CREATE TABLE user_threepid_id_server (
+    user_id text NOT NULL,
+    medium text NOT NULL,
+    address text NOT NULL,
+    id_server text NOT NULL
+);
+
+
+
+CREATE TABLE user_threepids (
+    user_id text NOT NULL,
+    medium text NOT NULL,
+    address text NOT NULL,
+    validated_at bigint NOT NULL,
+    added_at bigint NOT NULL
+);
+
+
+
+CREATE TABLE users (
+    name text,
+    password_hash text,
+    creation_ts bigint,
+    admin smallint DEFAULT 0 NOT NULL,
+    upgrade_ts bigint,
+    is_guest smallint DEFAULT 0 NOT NULL,
+    appservice_id text,
+    consent_version text,
+    consent_server_notice_sent text,
+    user_type text
+);
+
+
+
+CREATE TABLE users_in_public_rooms (
+    user_id text NOT NULL,
+    room_id text NOT NULL
+);
+
+
+
+CREATE TABLE users_pending_deactivation (
+    user_id text NOT NULL
+);
+
+
+
+CREATE TABLE users_who_share_private_rooms (
+    user_id text NOT NULL,
+    other_user_id text NOT NULL,
+    room_id text NOT NULL
+);
+
+
+
+ALTER TABLE ONLY access_tokens
+    ADD CONSTRAINT access_tokens_pkey PRIMARY KEY (id);
+
+
+
+ALTER TABLE ONLY access_tokens
+    ADD CONSTRAINT access_tokens_token_key UNIQUE (token);
+
+
+
+ALTER TABLE ONLY account_data
+    ADD CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type);
+
+
+
+ALTER TABLE ONLY account_validity
+    ADD CONSTRAINT account_validity_pkey PRIMARY KEY (user_id);
+
+
+
+ALTER TABLE ONLY application_services_state
+    ADD CONSTRAINT application_services_state_pkey PRIMARY KEY (as_id);
+
+
+
+ALTER TABLE ONLY application_services_txns
+    ADD CONSTRAINT application_services_txns_as_id_txn_id_key UNIQUE (as_id, txn_id);
+
+
+
+ALTER TABLE ONLY appservice_stream_position
+    ADD CONSTRAINT appservice_stream_position_lock_key UNIQUE (lock);
+
+
+
+ALTER TABLE ONLY background_updates
+    ADD CONSTRAINT background_updates_uniqueness UNIQUE (update_name);
+
+
+
+ALTER TABLE ONLY current_state_events
+    ADD CONSTRAINT current_state_events_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY current_state_events
+    ADD CONSTRAINT current_state_events_room_id_type_state_key_key UNIQUE (room_id, type, state_key);
+
+
+
+ALTER TABLE ONLY destinations
+    ADD CONSTRAINT destinations_pkey PRIMARY KEY (destination);
+
+
+
+ALTER TABLE ONLY devices
+    ADD CONSTRAINT device_uniqueness UNIQUE (user_id, device_id);
+
+
+
+ALTER TABLE ONLY e2e_device_keys_json
+    ADD CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id);
+
+
+
+ALTER TABLE ONLY e2e_one_time_keys_json
+    ADD CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id);
+
+
+
+ALTER TABLE ONLY event_backward_extremities
+    ADD CONSTRAINT event_backward_extremities_event_id_room_id_key UNIQUE (event_id, room_id);
+
+
+
+ALTER TABLE ONLY event_edges
+    ADD CONSTRAINT event_edges_event_id_prev_event_id_room_id_is_state_key UNIQUE (event_id, prev_event_id, room_id, is_state);
+
+
+
+ALTER TABLE ONLY event_forward_extremities
+    ADD CONSTRAINT event_forward_extremities_event_id_room_id_key UNIQUE (event_id, room_id);
+
+
+
+ALTER TABLE ONLY event_push_actions
+    ADD CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag);
+
+
+
+ALTER TABLE ONLY event_json
+    ADD CONSTRAINT event_json_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY event_push_summary_stream_ordering
+    ADD CONSTRAINT event_push_summary_stream_ordering_lock_key UNIQUE (lock);
+
+
+
+ALTER TABLE ONLY event_reference_hashes
+    ADD CONSTRAINT event_reference_hashes_event_id_algorithm_key UNIQUE (event_id, algorithm);
+
+
+
+ALTER TABLE ONLY event_reports
+    ADD CONSTRAINT event_reports_pkey PRIMARY KEY (id);
+
+
+
+ALTER TABLE ONLY event_to_state_groups
+    ADD CONSTRAINT event_to_state_groups_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY events
+    ADD CONSTRAINT events_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY events
+    ADD CONSTRAINT events_pkey PRIMARY KEY (stream_ordering);
+
+
+
+ALTER TABLE ONLY ex_outlier_stream
+    ADD CONSTRAINT ex_outlier_stream_pkey PRIMARY KEY (event_stream_ordering);
+
+
+
+ALTER TABLE ONLY group_roles
+    ADD CONSTRAINT group_roles_group_id_role_id_key UNIQUE (group_id, role_id);
+
+
+
+ALTER TABLE ONLY group_room_categories
+    ADD CONSTRAINT group_room_categories_group_id_category_id_key UNIQUE (group_id, category_id);
+
+
+
+ALTER TABLE ONLY group_summary_roles
+    ADD CONSTRAINT group_summary_roles_group_id_role_id_role_order_key UNIQUE (group_id, role_id, role_order);
+
+
+
+ALTER TABLE ONLY group_summary_room_categories
+    ADD CONSTRAINT group_summary_room_categories_group_id_category_id_cat_orde_key UNIQUE (group_id, category_id, cat_order);
+
+
+
+ALTER TABLE ONLY group_summary_rooms
+    ADD CONSTRAINT group_summary_rooms_group_id_category_id_room_id_room_order_key UNIQUE (group_id, category_id, room_id, room_order);
+
+
+
+ALTER TABLE ONLY guest_access
+    ADD CONSTRAINT guest_access_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY history_visibility
+    ADD CONSTRAINT history_visibility_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY local_media_repository
+    ADD CONSTRAINT local_media_repository_media_id_key UNIQUE (media_id);
+
+
+
+ALTER TABLE ONLY local_media_repository_thumbnails
+    ADD CONSTRAINT local_media_repository_thumbn_media_id_thumbnail_width_thum_key UNIQUE (media_id, thumbnail_width, thumbnail_height, thumbnail_type);
+
+
+
+ALTER TABLE ONLY user_threepids
+    ADD CONSTRAINT medium_address UNIQUE (medium, address);
+
+
+
+ALTER TABLE ONLY open_id_tokens
+    ADD CONSTRAINT open_id_tokens_pkey PRIMARY KEY (token);
+
+
+
+ALTER TABLE ONLY presence_allow_inbound
+    ADD CONSTRAINT presence_allow_inbound_observed_user_id_observer_user_id_key UNIQUE (observed_user_id, observer_user_id);
+
+
+
+ALTER TABLE ONLY presence
+    ADD CONSTRAINT presence_user_id_key UNIQUE (user_id);
+
+
+
+ALTER TABLE ONLY account_data_max_stream_id
+    ADD CONSTRAINT private_user_data_max_stream_id_lock_key UNIQUE (lock);
+
+
+
+ALTER TABLE ONLY profiles
+    ADD CONSTRAINT profiles_user_id_key UNIQUE (user_id);
+
+
+
+ALTER TABLE ONLY push_rules_enable
+    ADD CONSTRAINT push_rules_enable_pkey PRIMARY KEY (id);
+
+
+
+ALTER TABLE ONLY push_rules_enable
+    ADD CONSTRAINT push_rules_enable_user_name_rule_id_key UNIQUE (user_name, rule_id);
+
+
+
+ALTER TABLE ONLY push_rules
+    ADD CONSTRAINT push_rules_pkey PRIMARY KEY (id);
+
+
+
+ALTER TABLE ONLY push_rules
+    ADD CONSTRAINT push_rules_user_name_rule_id_key UNIQUE (user_name, rule_id);
+
+
+
+ALTER TABLE ONLY pusher_throttle
+    ADD CONSTRAINT pusher_throttle_pkey PRIMARY KEY (pusher, room_id);
+
+
+
+ALTER TABLE ONLY pushers
+    ADD CONSTRAINT pushers2_app_id_pushkey_user_name_key UNIQUE (app_id, pushkey, user_name);
+
+
+
+ALTER TABLE ONLY pushers
+    ADD CONSTRAINT pushers2_pkey PRIMARY KEY (id);
+
+
+
+ALTER TABLE ONLY receipts_graph
+    ADD CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id);
+
+
+
+ALTER TABLE ONLY receipts_linearized
+    ADD CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id);
+
+
+
+ALTER TABLE ONLY received_transactions
+    ADD CONSTRAINT received_transactions_transaction_id_origin_key UNIQUE (transaction_id, origin);
+
+
+
+ALTER TABLE ONLY redactions
+    ADD CONSTRAINT redactions_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY rejections
+    ADD CONSTRAINT rejections_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY remote_media_cache
+    ADD CONSTRAINT remote_media_cache_media_origin_media_id_key UNIQUE (media_origin, media_id);
+
+
+
+ALTER TABLE ONLY remote_media_cache_thumbnails
+    ADD CONSTRAINT remote_media_cache_thumbnails_media_origin_media_id_thumbna_key UNIQUE (media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type);
+
+
+
+ALTER TABLE ONLY room_account_data
+    ADD CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type);
+
+
+
+ALTER TABLE ONLY room_aliases
+    ADD CONSTRAINT room_aliases_room_alias_key UNIQUE (room_alias);
+
+
+
+ALTER TABLE ONLY room_depth
+    ADD CONSTRAINT room_depth_room_id_key UNIQUE (room_id);
+
+
+
+ALTER TABLE ONLY room_memberships
+    ADD CONSTRAINT room_memberships_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY room_names
+    ADD CONSTRAINT room_names_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY room_tags_revisions
+    ADD CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id);
+
+
+
+ALTER TABLE ONLY room_tags
+    ADD CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag);
+
+
+
+ALTER TABLE ONLY rooms
+    ADD CONSTRAINT rooms_pkey PRIMARY KEY (room_id);
+
+
+
+ALTER TABLE ONLY server_keys_json
+    ADD CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server);
+
+
+
+ALTER TABLE ONLY server_signature_keys
+    ADD CONSTRAINT server_signature_keys_server_name_key_id_key UNIQUE (server_name, key_id);
+
+
+
+ALTER TABLE ONLY state_events
+    ADD CONSTRAINT state_events_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY state_groups
+    ADD CONSTRAINT state_groups_pkey PRIMARY KEY (id);
+
+
+
+ALTER TABLE ONLY stats_stream_pos
+    ADD CONSTRAINT stats_stream_pos_lock_key UNIQUE (lock);
+
+
+
+ALTER TABLE ONLY topics
+    ADD CONSTRAINT topics_event_id_key UNIQUE (event_id);
+
+
+
+ALTER TABLE ONLY user_directory_stream_pos
+    ADD CONSTRAINT user_directory_stream_pos_lock_key UNIQUE (lock);
+
+
+
+ALTER TABLE ONLY users
+    ADD CONSTRAINT users_name_key UNIQUE (name);
+
+
+
+CREATE INDEX access_tokens_device_id ON access_tokens USING btree (user_id, device_id);
+
+
+
+CREATE INDEX account_data_stream_id ON account_data USING btree (user_id, stream_id);
+
+
+
+CREATE INDEX application_services_txns_id ON application_services_txns USING btree (as_id);
+
+
+
+CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list USING btree (appservice_id, network_id, room_id);
+
+
+
+CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms USING btree (room_id);
+
+
+
+CREATE INDEX cache_invalidation_stream_id ON cache_invalidation_stream USING btree (stream_id);
+
+
+
+CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream USING btree (stream_id);
+
+
+
+CREATE INDEX current_state_events_member_index ON current_state_events USING btree (state_key) WHERE (type = 'm.room.member'::text);
+
+
+
+CREATE INDEX deleted_pushers_stream_id ON deleted_pushers USING btree (stream_id);
+
+
+
+CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox USING btree (origin, message_id);
+
+
+
+CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox USING btree (destination, stream_id);
+
+
+
+CREATE INDEX device_federation_outbox_id ON device_federation_outbox USING btree (stream_id);
+
+
+
+CREATE INDEX device_inbox_stream_id_user_id ON device_inbox USING btree (stream_id, user_id);
+
+
+
+CREATE INDEX device_inbox_user_stream_id ON device_inbox USING btree (user_id, device_id, stream_id);
+
+
+
+CREATE INDEX device_lists_outbound_last_success_idx ON device_lists_outbound_last_success USING btree (destination, user_id, stream_id);
+
+
+
+CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes USING btree (destination, stream_id);
+
+
+
+CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes USING btree (stream_id);
+
+
+
+CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes USING btree (destination, user_id);
+
+
+
+CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache USING btree (user_id, device_id);
+
+
+
+CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties USING btree (user_id);
+
+
+
+CREATE INDEX device_lists_stream_id ON device_lists_stream USING btree (stream_id, user_id);
+
+
+
+CREATE INDEX device_lists_stream_user_id ON device_lists_stream USING btree (user_id, device_id);
+
+
+
+CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys USING btree (user_id, room_id, session_id);
+
+
+
+CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions USING btree (user_id, version);
+
+
+
+CREATE UNIQUE INDEX erased_users_user ON erased_users USING btree (user_id);
+
+
+
+CREATE INDEX ev_b_extrem_id ON event_backward_extremities USING btree (event_id);
+
+
+
+CREATE INDEX ev_b_extrem_room ON event_backward_extremities USING btree (room_id);
+
+
+
+CREATE INDEX ev_edges_id ON event_edges USING btree (event_id);
+
+
+
+CREATE INDEX ev_edges_prev_id ON event_edges USING btree (prev_event_id);
+
+
+
+CREATE INDEX ev_extrem_id ON event_forward_extremities USING btree (event_id);
+
+
+
+CREATE INDEX ev_extrem_room ON event_forward_extremities USING btree (room_id);
+
+
+
+CREATE INDEX evauth_edges_id ON event_auth USING btree (event_id);
+
+
+
+CREATE INDEX event_contains_url_index ON events USING btree (room_id, topological_ordering, stream_ordering) WHERE ((contains_url = true) AND (outlier = false));
+
+
+
+CREATE INDEX event_json_room_id ON event_json USING btree (room_id);
+
+
+
+CREATE INDEX event_push_actions_highlights_index ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering) WHERE (highlight = 1);
+
+
+
+CREATE INDEX event_push_actions_rm_tokens ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering);
+
+
+
+CREATE INDEX event_push_actions_room_id_user_id ON event_push_actions USING btree (room_id, user_id);
+
+
+
+CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging USING btree (event_id);
+
+
+
+CREATE INDEX event_push_actions_stream_ordering ON event_push_actions USING btree (stream_ordering, user_id);
+
+
+
+CREATE INDEX event_push_actions_u_highlight ON event_push_actions USING btree (user_id, stream_ordering);
+
+
+
+CREATE INDEX event_push_summary_user_rm ON event_push_summary USING btree (user_id, room_id);
+
+
+
+CREATE INDEX event_reference_hashes_id ON event_reference_hashes USING btree (event_id);
+
+
+
+CREATE UNIQUE INDEX event_relations_id ON event_relations USING btree (event_id);
+
+
+
+CREATE INDEX event_relations_relates ON event_relations USING btree (relates_to_id, relation_type, aggregation_key);
+
+
+
+CREATE INDEX event_search_ev_ridx ON event_search USING btree (room_id);
+
+
+
+CREATE UNIQUE INDEX event_search_event_id_idx ON event_search USING btree (event_id);
+
+
+
+CREATE INDEX event_search_fts_idx ON event_search USING gin (vector);
+
+
+
+CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups USING btree (state_group);
+
+
+
+CREATE INDEX events_order_room ON events USING btree (room_id, topological_ordering, stream_ordering);
+
+
+
+CREATE INDEX events_room_stream ON events USING btree (room_id, stream_ordering);
+
+
+
+CREATE INDEX events_ts ON events USING btree (origin_server_ts, stream_ordering);
+
+
+
+CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote USING btree (group_id, user_id);
+
+
+
+CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote USING btree (user_id);
+
+
+
+CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote USING btree (valid_until_ms);
+
+
+
+CREATE INDEX group_attestations_renewals_g_idx ON group_attestations_renewals USING btree (group_id, user_id);
+
+
+
+CREATE INDEX group_attestations_renewals_u_idx ON group_attestations_renewals USING btree (user_id);
+
+
+
+CREATE INDEX group_attestations_renewals_v_idx ON group_attestations_renewals USING btree (valid_until_ms);
+
+
+
+CREATE UNIQUE INDEX group_invites_g_idx ON group_invites USING btree (group_id, user_id);
+
+
+
+CREATE INDEX group_invites_u_idx ON group_invites USING btree (user_id);
+
+
+
+CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms USING btree (group_id, room_id);
+
+
+
+CREATE INDEX group_rooms_r_idx ON group_rooms USING btree (room_id);
+
+
+
+CREATE UNIQUE INDEX group_summary_rooms_g_idx ON group_summary_rooms USING btree (group_id, room_id, category_id);
+
+
+
+CREATE INDEX group_summary_users_g_idx ON group_summary_users USING btree (group_id);
+
+
+
+CREATE UNIQUE INDEX group_users_g_idx ON group_users USING btree (group_id, user_id);
+
+
+
+CREATE INDEX group_users_u_idx ON group_users USING btree (user_id);
+
+
+
+CREATE UNIQUE INDEX groups_idx ON groups USING btree (group_id);
+
+
+
+CREATE INDEX local_group_membership_g_idx ON local_group_membership USING btree (group_id);
+
+
+
+CREATE INDEX local_group_membership_u_idx ON local_group_membership USING btree (user_id, group_id);
+
+
+
+CREATE INDEX local_invites_for_user_idx ON local_invites USING btree (invitee, locally_rejected, replaced_by, room_id);
+
+
+
+CREATE INDEX local_invites_id ON local_invites USING btree (stream_id);
+
+
+
+CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails USING btree (media_id);
+
+
+
+CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache USING btree (url, download_ts);
+
+
+
+CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache USING btree (expires_ts);
+
+
+
+CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache USING btree (media_id);
+
+
+
+CREATE INDEX local_media_repository_url_idx ON local_media_repository USING btree (created_ts) WHERE (url_cache IS NOT NULL);
+
+
+
+CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users USING btree ("timestamp");
+
+
+
+CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users USING btree (user_id);
+
+
+
+CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens USING btree (ts_valid_until_ms);
+
+
+
+CREATE INDEX presence_stream_id ON presence_stream USING btree (stream_id, user_id);
+
+
+
+CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id);
+
+
+
+CREATE INDEX public_room_index ON rooms USING btree (is_public);
+
+
+
+CREATE INDEX public_room_list_stream_idx ON public_room_list_stream USING btree (stream_id);
+
+
+
+CREATE INDEX public_room_list_stream_rm_idx ON public_room_list_stream USING btree (room_id, stream_id);
+
+
+
+CREATE INDEX push_rules_enable_user_name ON push_rules_enable USING btree (user_name);
+
+
+
+CREATE INDEX push_rules_stream_id ON push_rules_stream USING btree (stream_id);
+
+
+
+CREATE INDEX push_rules_stream_user_stream_id ON push_rules_stream USING btree (user_id, stream_id);
+
+
+
+CREATE INDEX push_rules_user_name ON push_rules USING btree (user_name);
+
+
+
+CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override USING btree (user_id);
+
+
+
+CREATE INDEX receipts_linearized_id ON receipts_linearized USING btree (stream_id);
+
+
+
+CREATE INDEX receipts_linearized_room_stream ON receipts_linearized USING btree (room_id, stream_id);
+
+
+
+CREATE INDEX receipts_linearized_user ON receipts_linearized USING btree (user_id);
+
+
+
+CREATE INDEX received_transactions_ts ON received_transactions USING btree (ts);
+
+
+
+CREATE INDEX redactions_redacts ON redactions USING btree (redacts);
+
+
+
+CREATE INDEX remote_profile_cache_time ON remote_profile_cache USING btree (last_check);
+
+
+
+CREATE UNIQUE INDEX remote_profile_cache_user_id ON remote_profile_cache USING btree (user_id);
+
+
+
+CREATE INDEX room_account_data_stream_id ON room_account_data USING btree (user_id, stream_id);
+
+
+
+CREATE INDEX room_alias_servers_alias ON room_alias_servers USING btree (room_alias);
+
+
+
+CREATE INDEX room_aliases_id ON room_aliases USING btree (room_id);
+
+
+
+CREATE INDEX room_depth_room ON room_depth USING btree (room_id);
+
+
+
+CREATE INDEX room_memberships_room_id ON room_memberships USING btree (room_id);
+
+
+
+CREATE INDEX room_memberships_user_id ON room_memberships USING btree (user_id);
+
+
+
+CREATE INDEX room_names_room_id ON room_names USING btree (room_id);
+
+
+
+CREATE UNIQUE INDEX room_state_room ON room_state USING btree (room_id);
+
+
+
+CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token USING btree (room_id);
+
+
+
+CREATE UNIQUE INDEX room_stats_room_ts ON room_stats USING btree (room_id, ts);
+
+
+
+CREATE INDEX state_group_edges_idx ON state_group_edges USING btree (state_group);
+
+
+
+CREATE INDEX state_group_edges_prev_idx ON state_group_edges USING btree (prev_state_group);
+
+
+
+CREATE INDEX state_groups_state_type_idx ON state_groups_state USING btree (state_group, type, state_key);
+
+
+
+CREATE INDEX stream_ordering_to_exterm_idx ON stream_ordering_to_exterm USING btree (stream_ordering);
+
+
+
+CREATE INDEX stream_ordering_to_exterm_rm_idx ON stream_ordering_to_exterm USING btree (room_id, stream_ordering);
+
+
+
+CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens USING btree (medium, address);
+
+
+
+CREATE INDEX topics_room_id ON topics USING btree (room_id);
+
+
+
+CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits USING btree ("timestamp");
+
+
+
+CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits USING btree (user_id, "timestamp");
+
+
+
+CREATE INDEX user_directory_room_idx ON user_directory USING btree (room_id);
+
+
+
+CREATE INDEX user_directory_search_fts_idx ON user_directory_search USING gin (vector);
+
+
+
+CREATE UNIQUE INDEX user_directory_search_user_idx ON user_directory_search USING btree (user_id);
+
+
+
+CREATE UNIQUE INDEX user_directory_user_idx ON user_directory USING btree (user_id);
+
+
+
+CREATE INDEX user_filters_by_user_id_filter_id ON user_filters USING btree (user_id, filter_id);
+
+
+
+CREATE INDEX user_ips_device_id ON user_ips USING btree (user_id, device_id, last_seen);
+
+
+
+CREATE INDEX user_ips_last_seen ON user_ips USING btree (user_id, last_seen);
+
+
+
+CREATE INDEX user_ips_last_seen_only ON user_ips USING btree (last_seen);
+
+
+
+CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips USING btree (user_id, access_token, ip);
+
+
+
+CREATE UNIQUE INDEX user_stats_user_ts ON user_stats USING btree (user_id, ts);
+
+
+
+CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server USING btree (user_id, medium, address, id_server);
+
+
+
+CREATE INDEX user_threepids_medium_address ON user_threepids USING btree (medium, address);
+
+
+
+CREATE INDEX user_threepids_user_id ON user_threepids USING btree (user_id);
+
+
+
+CREATE INDEX users_creation_ts ON users USING btree (creation_ts);
+
+
+
+CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms USING btree (user_id, room_id);
+
+
+
+CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms USING btree (other_user_id);
+
+
+
+CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms USING btree (room_id);
+
+
+
+CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms USING btree (user_id, other_user_id, room_id);
+
+
+
diff --git a/synapse/storage/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/schema/full_schemas/54/full.sql.sqlite
new file mode 100644
index 0000000000..be9295e4c9
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/54/full.sql.sqlite
@@ -0,0 +1,260 @@
+CREATE TABLE application_services_state( as_id TEXT PRIMARY KEY, state VARCHAR(5), last_txn INTEGER );
+CREATE TABLE application_services_txns( as_id TEXT NOT NULL, txn_id INTEGER NOT NULL, event_ids TEXT NOT NULL, UNIQUE(as_id, txn_id) );
+CREATE INDEX application_services_txns_id ON application_services_txns ( as_id );
+CREATE TABLE presence( user_id TEXT NOT NULL, state VARCHAR(20), status_msg TEXT, mtime BIGINT, UNIQUE (user_id) );
+CREATE TABLE presence_allow_inbound( observed_user_id TEXT NOT NULL, observer_user_id TEXT NOT NULL, UNIQUE (observed_user_id, observer_user_id) );
+CREATE TABLE users( name TEXT, password_hash TEXT, creation_ts BIGINT, admin SMALLINT DEFAULT 0 NOT NULL, upgrade_ts BIGINT, is_guest SMALLINT DEFAULT 0 NOT NULL, appservice_id TEXT, consent_version TEXT, consent_server_notice_sent TEXT, user_type TEXT DEFAULT NULL, UNIQUE(name) );
+CREATE TABLE access_tokens( id BIGINT PRIMARY KEY, user_id TEXT NOT NULL, device_id TEXT, token TEXT NOT NULL, last_used BIGINT, UNIQUE(token) );
+CREATE TABLE user_ips ( user_id TEXT NOT NULL, access_token TEXT NOT NULL, device_id TEXT, ip TEXT NOT NULL, user_agent TEXT NOT NULL, last_seen BIGINT NOT NULL );
+CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, UNIQUE(user_id) );
+CREATE TABLE received_transactions( transaction_id TEXT, origin TEXT, ts BIGINT, response_code INTEGER, response_json bytea, has_been_referenced smallint default 0, UNIQUE (transaction_id, origin) );
+CREATE TABLE destinations( destination TEXT PRIMARY KEY, retry_last_ts BIGINT, retry_interval INTEGER );
+CREATE TABLE events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, event_id TEXT NOT NULL, type TEXT NOT NULL, room_id TEXT NOT NULL, content TEXT, unrecognized_keys TEXT, processed BOOL NOT NULL, outlier BOOL NOT NULL, depth BIGINT DEFAULT 0 NOT NULL, origin_server_ts BIGINT, received_ts BIGINT, sender TEXT, contains_url BOOLEAN, UNIQUE (event_id) );
+CREATE INDEX events_order_room ON events ( room_id, topological_ordering, stream_ordering );
+CREATE TABLE event_json( event_id TEXT NOT NULL, room_id TEXT NOT NULL, internal_metadata TEXT NOT NULL, json TEXT NOT NULL, format_version INTEGER, UNIQUE (event_id) );
+CREATE INDEX event_json_room_id ON event_json(room_id);
+CREATE TABLE state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, prev_state TEXT, UNIQUE (event_id) );
+CREATE TABLE current_state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, UNIQUE (event_id), UNIQUE (room_id, type, state_key) );
+CREATE TABLE room_memberships( event_id TEXT NOT NULL, user_id TEXT NOT NULL, sender TEXT NOT NULL, room_id TEXT NOT NULL, membership TEXT NOT NULL, forgotten INTEGER DEFAULT 0, display_name TEXT, avatar_url TEXT, UNIQUE (event_id) );
+CREATE INDEX room_memberships_room_id ON room_memberships (room_id);
+CREATE INDEX room_memberships_user_id ON room_memberships (user_id);
+CREATE TABLE topics( event_id TEXT NOT NULL, room_id TEXT NOT NULL, topic TEXT NOT NULL, UNIQUE (event_id) );
+CREATE INDEX topics_room_id ON topics(room_id);
+CREATE TABLE room_names( event_id TEXT NOT NULL, room_id TEXT NOT NULL, name TEXT NOT NULL, UNIQUE (event_id) );
+CREATE INDEX room_names_room_id ON room_names(room_id);
+CREATE TABLE rooms( room_id TEXT PRIMARY KEY NOT NULL, is_public BOOL, creator TEXT );
+CREATE TABLE server_signature_keys( server_name TEXT, key_id TEXT, from_server TEXT, ts_added_ms BIGINT, verify_key bytea, ts_valid_until_ms BIGINT, UNIQUE (server_name, key_id) );
+CREATE TABLE rejections( event_id TEXT NOT NULL, reason TEXT NOT NULL, last_check TEXT NOT NULL, UNIQUE (event_id) );
+CREATE TABLE push_rules ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, priority_class SMALLINT NOT NULL, priority INTEGER NOT NULL DEFAULT 0, conditions TEXT NOT NULL, actions TEXT NOT NULL, UNIQUE(user_name, rule_id) );
+CREATE INDEX push_rules_user_name on push_rules (user_name);
+CREATE TABLE user_filters( user_id TEXT, filter_id BIGINT, filter_json bytea );
+CREATE INDEX user_filters_by_user_id_filter_id ON user_filters( user_id, filter_id );
+CREATE TABLE push_rules_enable ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, enabled SMALLINT, UNIQUE(user_name, rule_id) );
+CREATE INDEX push_rules_enable_user_name on push_rules_enable (user_name);
+CREATE TABLE event_forward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) );
+CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id);
+CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id);
+CREATE TABLE event_backward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) );
+CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id);
+CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id);
+CREATE TABLE event_edges( event_id TEXT NOT NULL, prev_event_id TEXT NOT NULL, room_id TEXT NOT NULL, is_state BOOL NOT NULL, UNIQUE (event_id, prev_event_id, room_id, is_state) );
+CREATE INDEX ev_edges_id ON event_edges(event_id);
+CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id);
+CREATE TABLE room_depth( room_id TEXT NOT NULL, min_depth INTEGER NOT NULL, UNIQUE (room_id) );
+CREATE INDEX room_depth_room ON room_depth(room_id);
+CREATE TABLE state_groups( id BIGINT PRIMARY KEY, room_id TEXT NOT NULL, event_id TEXT NOT NULL );
+CREATE TABLE state_groups_state( state_group BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT NOT NULL );
+CREATE TABLE event_to_state_groups( event_id TEXT NOT NULL, state_group BIGINT NOT NULL, UNIQUE (event_id) );
+CREATE TABLE local_media_repository ( media_id TEXT, media_type TEXT, media_length INTEGER, created_ts BIGINT, upload_name TEXT, user_id TEXT, quarantined_by TEXT, url_cache TEXT, last_access_ts BIGINT, UNIQUE (media_id) );
+CREATE TABLE local_media_repository_thumbnails ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type ) );
+CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails (media_id);
+CREATE TABLE remote_media_cache ( media_origin TEXT, media_id TEXT, media_type TEXT, created_ts BIGINT, upload_name TEXT, media_length INTEGER, filesystem_id TEXT, last_access_ts BIGINT, quarantined_by TEXT, UNIQUE (media_origin, media_id) );
+CREATE TABLE remote_media_cache_thumbnails ( media_origin TEXT, media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_method TEXT, thumbnail_type TEXT, thumbnail_length INTEGER, filesystem_id TEXT, UNIQUE ( media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type ) );
+CREATE TABLE redactions ( event_id TEXT NOT NULL, redacts TEXT NOT NULL, UNIQUE (event_id) );
+CREATE INDEX redactions_redacts ON redactions (redacts);
+CREATE TABLE room_aliases( room_alias TEXT NOT NULL, room_id TEXT NOT NULL, creator TEXT, UNIQUE (room_alias) );
+CREATE INDEX room_aliases_id ON room_aliases(room_id);
+CREATE TABLE room_alias_servers( room_alias TEXT NOT NULL, server TEXT NOT NULL );
+CREATE INDEX room_alias_servers_alias ON room_alias_servers(room_alias);
+CREATE TABLE event_reference_hashes ( event_id TEXT, algorithm TEXT, hash bytea, UNIQUE (event_id, algorithm) );
+CREATE INDEX event_reference_hashes_id ON event_reference_hashes(event_id);
+CREATE TABLE IF NOT EXISTS "server_keys_json" ( server_name TEXT NOT NULL, key_id TEXT NOT NULL, from_server TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, ts_valid_until_ms BIGINT NOT NULL, key_json bytea NOT NULL, CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server) );
+CREATE TABLE e2e_device_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id) );
+CREATE TABLE e2e_one_time_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, algorithm TEXT NOT NULL, key_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id) );
+CREATE TABLE receipts_graph( room_id TEXT NOT NULL, receipt_type TEXT NOT NULL, user_id TEXT NOT NULL, event_ids TEXT NOT NULL, data TEXT NOT NULL, CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id) );
+CREATE TABLE receipts_linearized ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, receipt_type TEXT NOT NULL, user_id TEXT NOT NULL, event_id TEXT NOT NULL, data TEXT NOT NULL, CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id) );
+CREATE INDEX receipts_linearized_id ON receipts_linearized( stream_id );
+CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( room_id, stream_id );
+CREATE TABLE IF NOT EXISTS "user_threepids" ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, validated_at BIGINT NOT NULL, added_at BIGINT NOT NULL, CONSTRAINT medium_address UNIQUE (medium, address) );
+CREATE INDEX user_threepids_user_id ON user_threepids(user_id);
+CREATE TABLE background_updates( update_name TEXT NOT NULL, progress_json TEXT NOT NULL, depends_on TEXT, CONSTRAINT background_updates_uniqueness UNIQUE (update_name) );
+CREATE VIRTUAL TABLE event_search USING fts4 ( event_id, room_id, sender, key, value )
+/* event_search(event_id,room_id,sender,"key",value) */;
+CREATE TABLE IF NOT EXISTS 'event_search_content'(docid INTEGER PRIMARY KEY, 'c0event_id', 'c1room_id', 'c2sender', 'c3key', 'c4value');
+CREATE TABLE IF NOT EXISTS 'event_search_segments'(blockid INTEGER PRIMARY KEY, block BLOB);
+CREATE TABLE IF NOT EXISTS 'event_search_segdir'(level INTEGER,idx INTEGER,start_block INTEGER,leaves_end_block INTEGER,end_block INTEGER,root BLOB,PRIMARY KEY(level, idx));
+CREATE TABLE IF NOT EXISTS 'event_search_docsize'(docid INTEGER PRIMARY KEY, size BLOB);
+CREATE TABLE IF NOT EXISTS 'event_search_stat'(id INTEGER PRIMARY KEY, value BLOB);
+CREATE TABLE guest_access( event_id TEXT NOT NULL, room_id TEXT NOT NULL, guest_access TEXT NOT NULL, UNIQUE (event_id) );
+CREATE TABLE history_visibility( event_id TEXT NOT NULL, room_id TEXT NOT NULL, history_visibility TEXT NOT NULL, UNIQUE (event_id) );
+CREATE TABLE room_tags( user_id TEXT NOT NULL, room_id TEXT NOT NULL, tag     TEXT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag) );
+CREATE TABLE room_tags_revisions ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, stream_id BIGINT NOT NULL, CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id) );
+CREATE TABLE IF NOT EXISTS "account_data_max_stream_id"( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id  BIGINT NOT NULL, CHECK (Lock='X') );
+CREATE TABLE account_data( user_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type) );
+CREATE TABLE room_account_data( user_id TEXT NOT NULL, room_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type) );
+CREATE INDEX account_data_stream_id on account_data(user_id, stream_id);
+CREATE INDEX room_account_data_stream_id on room_account_data(user_id, stream_id);
+CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering);
+CREATE TABLE event_push_actions( room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, profile_tag VARCHAR(32), actions TEXT NOT NULL, topological_ordering BIGINT, stream_ordering BIGINT, notif SMALLINT, highlight SMALLINT, CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) );
+CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id);
+CREATE INDEX events_room_stream on events(room_id, stream_ordering);
+CREATE INDEX public_room_index on rooms(is_public);
+CREATE INDEX receipts_linearized_user ON receipts_linearized( user_id );
+CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering );
+CREATE TABLE presence_stream( stream_id BIGINT, user_id TEXT, state TEXT, last_active_ts BIGINT, last_federation_update_ts BIGINT, last_user_sync_ts BIGINT, status_msg TEXT, currently_active BOOLEAN );
+CREATE INDEX presence_stream_id ON presence_stream(stream_id, user_id);
+CREATE INDEX presence_stream_user_id ON presence_stream(user_id);
+CREATE TABLE push_rules_stream( stream_id BIGINT NOT NULL, event_stream_ordering BIGINT NOT NULL, user_id TEXT NOT NULL, rule_id TEXT NOT NULL, op TEXT NOT NULL, priority_class SMALLINT, priority INTEGER, conditions TEXT, actions TEXT );
+CREATE INDEX push_rules_stream_id ON push_rules_stream(stream_id);
+CREATE INDEX push_rules_stream_user_stream_id on push_rules_stream(user_id, stream_id);
+CREATE TABLE ex_outlier_stream( event_stream_ordering BIGINT PRIMARY KEY NOT NULL, event_id TEXT NOT NULL, state_group BIGINT NOT NULL );
+CREATE TABLE threepid_guest_access_tokens( medium TEXT, address TEXT, guest_access_token TEXT, first_inviter TEXT );
+CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens(medium, address);
+CREATE TABLE local_invites( stream_id BIGINT NOT NULL, inviter TEXT NOT NULL, invitee TEXT NOT NULL, event_id TEXT NOT NULL, room_id TEXT NOT NULL, locally_rejected TEXT, replaced_by TEXT );
+CREATE INDEX local_invites_id ON local_invites(stream_id);
+CREATE INDEX local_invites_for_user_idx ON local_invites(invitee, locally_rejected, replaced_by, room_id);
+CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id );
+CREATE TABLE open_id_tokens ( token TEXT NOT NULL PRIMARY KEY, ts_valid_until_ms bigint NOT NULL, user_id TEXT NOT NULL, UNIQUE (token) );
+CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens(ts_valid_until_ms);
+CREATE TABLE pusher_throttle( pusher BIGINT NOT NULL, room_id TEXT NOT NULL, last_sent_ts BIGINT, throttle_ms BIGINT, PRIMARY KEY (pusher, room_id) );
+CREATE TABLE event_reports( id BIGINT NOT NULL PRIMARY KEY, received_ts BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, reason TEXT, content TEXT );
+CREATE TABLE devices ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, display_name TEXT, CONSTRAINT device_uniqueness UNIQUE (user_id, device_id) );
+CREATE TABLE appservice_stream_position( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT, CHECK (Lock='X') );
+CREATE TABLE device_inbox ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, stream_id BIGINT NOT NULL, message_json TEXT NOT NULL );
+CREATE INDEX device_inbox_user_stream_id ON device_inbox(user_id, device_id, stream_id);
+CREATE INDEX received_transactions_ts ON received_transactions(ts);
+CREATE TABLE device_federation_outbox ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, queued_ts BIGINT NOT NULL, messages_json TEXT NOT NULL );
+CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox(destination, stream_id);
+CREATE TABLE device_federation_inbox ( origin TEXT NOT NULL, message_id TEXT NOT NULL, received_ts BIGINT NOT NULL );
+CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox(origin, message_id);
+CREATE TABLE device_max_stream_id ( stream_id BIGINT NOT NULL );
+CREATE TABLE public_room_list_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, visibility BOOLEAN NOT NULL , appservice_id TEXT, network_id TEXT);
+CREATE INDEX public_room_list_stream_idx on public_room_list_stream( stream_id );
+CREATE INDEX public_room_list_stream_rm_idx on public_room_list_stream( room_id, stream_id );
+CREATE TABLE state_group_edges( state_group BIGINT NOT NULL, prev_state_group BIGINT NOT NULL );
+CREATE INDEX state_group_edges_idx ON state_group_edges(state_group);
+CREATE INDEX state_group_edges_prev_idx ON state_group_edges(prev_state_group);
+CREATE TABLE stream_ordering_to_exterm ( stream_ordering BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL );
+CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm( stream_ordering );
+CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm( room_id, stream_ordering );
+CREATE TABLE IF NOT EXISTS "event_auth"( event_id TEXT NOT NULL, auth_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE INDEX evauth_edges_id ON event_auth(event_id);
+CREATE INDEX user_threepids_medium_address on user_threepids (medium, address);
+CREATE TABLE appservice_room_list( appservice_id TEXT NOT NULL, network_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list( appservice_id, network_id, room_id );
+CREATE INDEX device_federation_outbox_id ON device_federation_outbox(stream_id);
+CREATE TABLE federation_stream_position( type TEXT NOT NULL, stream_id INTEGER NOT NULL );
+CREATE TABLE device_lists_remote_cache ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, content TEXT NOT NULL );
+CREATE TABLE device_lists_remote_extremeties ( user_id TEXT NOT NULL, stream_id TEXT NOT NULL );
+CREATE TABLE device_lists_stream ( stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL );
+CREATE INDEX device_lists_stream_id ON device_lists_stream(stream_id, user_id);
+CREATE TABLE device_lists_outbound_pokes ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL, sent BOOLEAN NOT NULL, ts BIGINT NOT NULL );
+CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes(destination, stream_id);
+CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes(destination, user_id);
+CREATE TABLE event_push_summary ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, notif_count BIGINT NOT NULL, stream_ordering BIGINT NOT NULL );
+CREATE INDEX event_push_summary_user_rm ON event_push_summary(user_id, room_id);
+CREATE TABLE event_push_summary_stream_ordering ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT NOT NULL, CHECK (Lock='X') );
+CREATE TABLE IF NOT EXISTS "pushers" ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, access_token BIGINT DEFAULT NULL, profile_tag TEXT NOT NULL, kind TEXT NOT NULL, app_id TEXT NOT NULL, app_display_name TEXT NOT NULL, device_display_name TEXT NOT NULL, pushkey TEXT NOT NULL, ts BIGINT NOT NULL, lang TEXT, data TEXT, last_stream_ordering INTEGER, last_success BIGINT, failing_since BIGINT, UNIQUE (app_id, pushkey, user_name) );
+CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes(stream_id);
+CREATE TABLE ratelimit_override ( user_id TEXT NOT NULL, messages_per_second BIGINT, burst_count BIGINT );
+CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override(user_id);
+CREATE TABLE current_state_delta_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT, prev_event_id TEXT );
+CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream(stream_id);
+CREATE TABLE device_lists_outbound_last_success ( destination TEXT NOT NULL, user_id TEXT NOT NULL, stream_id BIGINT NOT NULL );
+CREATE INDEX device_lists_outbound_last_success_idx ON device_lists_outbound_last_success( destination, user_id, stream_id );
+CREATE TABLE user_directory_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') );
+CREATE VIRTUAL TABLE user_directory_search USING fts4 ( user_id, value )
+/* user_directory_search(user_id,value) */;
+CREATE TABLE IF NOT EXISTS 'user_directory_search_content'(docid INTEGER PRIMARY KEY, 'c0user_id', 'c1value');
+CREATE TABLE IF NOT EXISTS 'user_directory_search_segments'(blockid INTEGER PRIMARY KEY, block BLOB);
+CREATE TABLE IF NOT EXISTS 'user_directory_search_segdir'(level INTEGER,idx INTEGER,start_block INTEGER,leaves_end_block INTEGER,end_block INTEGER,root BLOB,PRIMARY KEY(level, idx));
+CREATE TABLE IF NOT EXISTS 'user_directory_search_docsize'(docid INTEGER PRIMARY KEY, size BLOB);
+CREATE TABLE IF NOT EXISTS 'user_directory_search_stat'(id INTEGER PRIMARY KEY, value BLOB);
+CREATE TABLE blocked_rooms ( room_id TEXT NOT NULL, user_id TEXT NOT NULL );
+CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms(room_id);
+CREATE TABLE IF NOT EXISTS "local_media_repository_url_cache"( url TEXT, response_code INTEGER, etag TEXT, expires_ts BIGINT, og TEXT, media_id TEXT, download_ts BIGINT );
+CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache(expires_ts);
+CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache(url, download_ts);
+CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache(media_id);
+CREATE TABLE group_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, is_public BOOLEAN NOT NULL );
+CREATE TABLE group_invites ( group_id TEXT NOT NULL, user_id TEXT NOT NULL );
+CREATE TABLE group_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, is_public BOOLEAN NOT NULL );
+CREATE TABLE group_summary_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, category_id TEXT NOT NULL, room_order BIGINT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, category_id, room_id, room_order), CHECK (room_order > 0) );
+CREATE UNIQUE INDEX group_summary_rooms_g_idx ON group_summary_rooms(group_id, room_id, category_id);
+CREATE TABLE group_summary_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, cat_order BIGINT NOT NULL, UNIQUE (group_id, category_id, cat_order), CHECK (cat_order > 0) );
+CREATE TABLE group_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, profile TEXT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, category_id) );
+CREATE TABLE group_summary_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, role_id TEXT NOT NULL, user_order BIGINT NOT NULL, is_public BOOLEAN NOT NULL );
+CREATE INDEX group_summary_users_g_idx ON group_summary_users(group_id);
+CREATE TABLE group_summary_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, role_order BIGINT NOT NULL, UNIQUE (group_id, role_id, role_order), CHECK (role_order > 0) );
+CREATE TABLE group_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, profile TEXT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, role_id) );
+CREATE TABLE group_attestations_renewals ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, valid_until_ms BIGINT NOT NULL );
+CREATE INDEX group_attestations_renewals_g_idx ON group_attestations_renewals(group_id, user_id);
+CREATE INDEX group_attestations_renewals_u_idx ON group_attestations_renewals(user_id);
+CREATE INDEX group_attestations_renewals_v_idx ON group_attestations_renewals(valid_until_ms);
+CREATE TABLE group_attestations_remote ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, valid_until_ms BIGINT NOT NULL, attestation_json TEXT NOT NULL );
+CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote(group_id, user_id);
+CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote(user_id);
+CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote(valid_until_ms);
+CREATE TABLE local_group_membership ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, membership TEXT NOT NULL, is_publicised BOOLEAN NOT NULL, content TEXT NOT NULL );
+CREATE INDEX local_group_membership_u_idx ON local_group_membership(user_id, group_id);
+CREATE INDEX local_group_membership_g_idx ON local_group_membership(group_id);
+CREATE TABLE local_group_updates ( stream_id BIGINT NOT NULL, group_id TEXT NOT NULL, user_id TEXT NOT NULL, type TEXT NOT NULL, content TEXT NOT NULL );
+CREATE TABLE remote_profile_cache ( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, last_check BIGINT NOT NULL );
+CREATE UNIQUE INDEX remote_profile_cache_user_id ON remote_profile_cache(user_id);
+CREATE INDEX remote_profile_cache_time ON remote_profile_cache(last_check);
+CREATE TABLE IF NOT EXISTS "deleted_pushers" ( stream_id BIGINT NOT NULL, app_id TEXT NOT NULL, pushkey TEXT NOT NULL, user_id TEXT NOT NULL );
+CREATE INDEX deleted_pushers_stream_id ON deleted_pushers (stream_id);
+CREATE TABLE IF NOT EXISTS "groups" ( group_id TEXT NOT NULL, name TEXT, avatar_url TEXT, short_description TEXT, long_description TEXT, is_public BOOL NOT NULL , join_policy TEXT NOT NULL DEFAULT 'invite');
+CREATE UNIQUE INDEX groups_idx ON groups(group_id);
+CREATE TABLE IF NOT EXISTS "user_directory" ( user_id TEXT NOT NULL, room_id TEXT, display_name TEXT, avatar_url TEXT );
+CREATE INDEX user_directory_room_idx ON user_directory(room_id);
+CREATE UNIQUE INDEX user_directory_user_idx ON user_directory(user_id);
+CREATE TABLE event_push_actions_staging ( event_id TEXT NOT NULL, user_id TEXT NOT NULL, actions TEXT NOT NULL, notif SMALLINT NOT NULL, highlight SMALLINT NOT NULL );
+CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id);
+CREATE TABLE users_pending_deactivation ( user_id TEXT NOT NULL );
+CREATE UNIQUE INDEX group_invites_g_idx ON group_invites(group_id, user_id);
+CREATE UNIQUE INDEX group_users_g_idx ON group_users(group_id, user_id);
+CREATE INDEX group_users_u_idx ON group_users(user_id);
+CREATE INDEX group_invites_u_idx ON group_invites(user_id);
+CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms(group_id, room_id);
+CREATE INDEX group_rooms_r_idx ON group_rooms(room_id);
+CREATE TABLE user_daily_visits ( user_id TEXT NOT NULL, device_id TEXT, timestamp BIGINT NOT NULL );
+CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits(user_id, timestamp);
+CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits(timestamp);
+CREATE TABLE erased_users ( user_id TEXT NOT NULL );
+CREATE UNIQUE INDEX erased_users_user ON erased_users(user_id);
+CREATE TABLE monthly_active_users ( user_id TEXT NOT NULL, timestamp BIGINT NOT NULL );
+CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users(user_id);
+CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users(timestamp);
+CREATE TABLE IF NOT EXISTS "e2e_room_keys_versions" ( user_id TEXT NOT NULL, version BIGINT NOT NULL, algorithm TEXT NOT NULL, auth_data TEXT NOT NULL, deleted SMALLINT DEFAULT 0 NOT NULL );
+CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version);
+CREATE TABLE IF NOT EXISTS "e2e_room_keys" ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, session_id TEXT NOT NULL, version BIGINT NOT NULL, first_message_index INT, forwarded_count INT, is_verified BOOLEAN, session_data TEXT NOT NULL );
+CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id);
+CREATE TABLE users_who_share_private_rooms ( user_id TEXT NOT NULL, other_user_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms(user_id, other_user_id, room_id);
+CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms(room_id);
+CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms(other_user_id);
+CREATE TABLE user_threepid_id_server ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, id_server TEXT NOT NULL );
+CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server( user_id, medium, address, id_server );
+CREATE TABLE users_in_public_rooms ( user_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms(user_id, room_id);
+CREATE TABLE account_validity ( user_id TEXT PRIMARY KEY, expiration_ts_ms BIGINT NOT NULL, email_sent BOOLEAN NOT NULL, renewal_token TEXT );
+CREATE TABLE event_relations ( event_id TEXT NOT NULL, relates_to_id TEXT NOT NULL, relation_type TEXT NOT NULL, aggregation_key TEXT );
+CREATE UNIQUE INDEX event_relations_id ON event_relations(event_id);
+CREATE INDEX event_relations_relates ON event_relations(relates_to_id, relation_type, aggregation_key);
+CREATE TABLE stats_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') );
+CREATE TABLE user_stats ( user_id TEXT NOT NULL, ts BIGINT NOT NULL, bucket_size INT NOT NULL, public_rooms INT NOT NULL, private_rooms INT NOT NULL );
+CREATE UNIQUE INDEX user_stats_user_ts ON user_stats(user_id, ts);
+CREATE TABLE room_stats ( room_id TEXT NOT NULL, ts BIGINT NOT NULL, bucket_size INT NOT NULL, current_state_events INT NOT NULL, joined_members INT NOT NULL, invited_members INT NOT NULL, left_members INT NOT NULL, banned_members INT NOT NULL, state_events INT NOT NULL );
+CREATE UNIQUE INDEX room_stats_room_ts ON room_stats(room_id, ts);
+CREATE TABLE room_state ( room_id TEXT NOT NULL, join_rules TEXT, history_visibility TEXT, encryption TEXT, name TEXT, topic TEXT, avatar TEXT, canonical_alias TEXT );
+CREATE UNIQUE INDEX room_state_room ON room_state(room_id);
+CREATE TABLE room_stats_earliest_token ( room_id TEXT NOT NULL, token BIGINT NOT NULL );
+CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token(room_id);
+CREATE INDEX access_tokens_device_id ON access_tokens (user_id, device_id);
+CREATE INDEX user_ips_device_id ON user_ips (user_id, device_id, last_seen);
+CREATE INDEX event_contains_url_index ON events (room_id, topological_ordering, stream_ordering);
+CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering);
+CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering);
+CREATE INDEX current_state_events_member_index ON current_state_events (state_key);
+CREATE INDEX device_inbox_stream_id_user_id ON device_inbox (stream_id, user_id);
+CREATE INDEX device_lists_stream_user_id ON device_lists_stream (user_id, device_id);
+CREATE INDEX local_media_repository_url_idx ON local_media_repository (created_ts);
+CREATE INDEX user_ips_last_seen ON user_ips (user_id, last_seen);
+CREATE INDEX user_ips_last_seen_only ON user_ips (last_seen);
+CREATE INDEX users_creation_ts ON users (creation_ts);
+CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group);
+CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id);
+CREATE INDEX state_groups_state_type_idx ON state_groups_state(state_group, type, state_key);
+CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id);
+CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip);
diff --git a/synapse/storage/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/full_schemas/54/stream_positions.sql
new file mode 100644
index 0000000000..c265fd20e2
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/54/stream_positions.sql
@@ -0,0 +1,7 @@
+
+INSERT INTO appservice_stream_position (stream_ordering) SELECT COALESCE(MAX(stream_ordering), 0) FROM events;
+INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', -1);
+INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events;
+INSERT INTO user_directory_stream_pos (stream_id) VALUES (0);
+INSERT INTO stats_stream_pos (stream_id) VALUES (0);
+INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0);
diff --git a/synapse/storage/schema/full_schemas/README.txt b/synapse/storage/schema/full_schemas/README.txt
new file mode 100644
index 0000000000..d3f6401344
--- /dev/null
+++ b/synapse/storage/schema/full_schemas/README.txt
@@ -0,0 +1,19 @@
+Building full schema dumps
+==========================
+
+These schemas need to be made from a database that has had all background updates run.
+
+Postgres
+--------
+
+$ pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner $DATABASE_NAME| sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > full.sql.postgres
+
+SQLite
+------
+
+$ sqlite3 $DATABASE_FILE ".schema" > full.sql.sqlite
+
+After
+-----
+
+Delete the CREATE statements for "sqlite_stat1", "schema_version", "applied_schema_deltas", and "applied_module_schemas".
\ No newline at end of file
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index 226f8f1b7e..10a27c207a 100644
--- a/synapse/storage/search.py
+++ b/synapse/storage/search.py
@@ -341,29 +341,7 @@ class SearchStore(BackgroundUpdateStore):
                 for entry in entries
             )
 
-            # inserts to a GIN index are normally batched up into a pending
-            # list, and then all committed together once the list gets to a
-            # certain size. The trouble with that is that postgres (pre-9.5)
-            # uses work_mem to determine the length of the list, and work_mem
-            # is typically very large.
-            #
-            # We therefore reduce work_mem while we do the insert.
-            #
-            # (postgres 9.5 uses the separate gin_pending_list_limit setting,
-            # so doesn't suffer the same problem, but changing work_mem will
-            # be harmless)
-            #
-            # Note that we don't need to worry about restoring it on
-            # exception, because exceptions will cause the transaction to be
-            # rolled back, including the effects of the SET command.
-            #
-            # Also: we use SET rather than SET LOCAL because there's lots of
-            # other stuff going on in this transaction, which want to have the
-            # normal work_mem setting.
-
-            txn.execute("SET work_mem='256kB'")
             txn.executemany(sql, args)
-            txn.execute("RESET work_mem")
 
         elif isinstance(self.database_engine, Sqlite3Engine):
             sql = (
@@ -460,7 +438,7 @@ class SearchStore(BackgroundUpdateStore):
 
         results = list(filter(lambda row: row["room_id"] in room_ids, results))
 
-        events = yield self._get_events([r["event_id"] for r in results])
+        events = yield self.get_events_as_list([r["event_id"] for r in results])
 
         event_map = {ev.event_id: ev for ev in events}
 
@@ -605,7 +583,7 @@ class SearchStore(BackgroundUpdateStore):
 
         results = list(filter(lambda row: row["room_id"] in room_ids, results))
 
-        events = yield self._get_events([r["event_id"] for r in results])
+        events = yield self.get_events_as_list([r["event_id"] for r in results])
 
         event_map = {ev.event_id: ev for ev in events}
 
diff --git a/synapse/storage/state_deltas.py b/synapse/storage/state_deltas.py
index 56e42f583d..5fdb442104 100644
--- a/synapse/storage/state_deltas.py
+++ b/synapse/storage/state_deltas.py
@@ -22,6 +22,24 @@ logger = logging.getLogger(__name__)
 
 class StateDeltasStore(SQLBaseStore):
     def get_current_state_deltas(self, prev_stream_id):
+        """Fetch a list of room state changes since the given stream id
+
+        Each entry in the result contains the following fields:
+            - stream_id (int)
+            - room_id (str)
+            - type (str): event type
+            - state_key (str):
+            - event_id (str|None): new event_id for this state key. None if the
+                state has been deleted.
+            - prev_event_id (str|None): previous event_id for this state key. None
+                if it's new state.
+
+        Args:
+            prev_stream_id (int): point to get changes since (exclusive)
+
+        Returns:
+            Deferred[list[dict]]: results
+        """
         prev_stream_id = int(prev_stream_id)
         if not self._curr_state_delta_stream_cache.has_any_entity_changed(
             prev_stream_id
@@ -66,10 +84,16 @@ class StateDeltasStore(SQLBaseStore):
             "get_current_state_deltas", get_current_state_deltas_txn
         )
 
-    def get_max_stream_id_in_current_state_deltas(self):
-        return self._simple_select_one_onecol(
+    def _get_max_stream_id_in_current_state_deltas_txn(self, txn):
+        return self._simple_select_one_onecol_txn(
+            txn,
             table="current_state_delta_stream",
             keyvalues={},
             retcol="COALESCE(MAX(stream_id), -1)",
-            desc="get_max_stream_id_in_current_state_deltas",
+        )
+
+    def get_max_stream_id_in_current_state_deltas(self):
+        return self.runInteraction(
+            "get_max_stream_id_in_current_state_deltas",
+            self._get_max_stream_id_in_current_state_deltas_txn,
         )
diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py
new file mode 100644
index 0000000000..ff266b09b0
--- /dev/null
+++ b/synapse/storage/stats.py
@@ -0,0 +1,468 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018, 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, Membership
+from synapse.storage.prepare_database import get_statements
+from synapse.storage.state_deltas import StateDeltasStore
+from synapse.util.caches.descriptors import cached
+
+logger = logging.getLogger(__name__)
+
+# these fields track absolutes (e.g. total number of rooms on the server)
+ABSOLUTE_STATS_FIELDS = {
+    "room": (
+        "current_state_events",
+        "joined_members",
+        "invited_members",
+        "left_members",
+        "banned_members",
+        "state_events",
+    ),
+    "user": ("public_rooms", "private_rooms"),
+}
+
+TYPE_TO_ROOM = {"room": ("room_stats", "room_id"), "user": ("user_stats", "user_id")}
+
+TEMP_TABLE = "_temp_populate_stats"
+
+
+class StatsStore(StateDeltasStore):
+    def __init__(self, db_conn, hs):
+        super(StatsStore, self).__init__(db_conn, hs)
+
+        self.server_name = hs.hostname
+        self.clock = self.hs.get_clock()
+        self.stats_enabled = hs.config.stats_enabled
+        self.stats_bucket_size = hs.config.stats_bucket_size
+
+        self.register_background_update_handler(
+            "populate_stats_createtables", self._populate_stats_createtables
+        )
+        self.register_background_update_handler(
+            "populate_stats_process_rooms", self._populate_stats_process_rooms
+        )
+        self.register_background_update_handler(
+            "populate_stats_cleanup", self._populate_stats_cleanup
+        )
+
+    @defer.inlineCallbacks
+    def _populate_stats_createtables(self, progress, batch_size):
+
+        if not self.stats_enabled:
+            yield self._end_background_update("populate_stats_createtables")
+            defer.returnValue(1)
+
+        # Get all the rooms that we want to process.
+        def _make_staging_area(txn):
+            # Create the temporary tables
+            stmts = get_statements("""
+                -- We just recreate the table, we'll be reinserting the
+                -- correct entries again later anyway.
+                DROP TABLE IF EXISTS {temp}_rooms;
+
+                CREATE TABLE IF NOT EXISTS {temp}_rooms(
+                    room_id TEXT NOT NULL,
+                    events BIGINT NOT NULL
+                );
+
+                CREATE INDEX {temp}_rooms_events
+                    ON {temp}_rooms(events);
+                CREATE INDEX {temp}_rooms_id
+                    ON {temp}_rooms(room_id);
+            """.format(temp=TEMP_TABLE).splitlines())
+
+            for statement in stmts:
+                txn.execute(statement)
+
+            sql = (
+                "CREATE TABLE IF NOT EXISTS "
+                + TEMP_TABLE
+                + "_position(position TEXT NOT NULL)"
+            )
+            txn.execute(sql)
+
+            # Get rooms we want to process from the database, only adding
+            # those that we haven't (i.e. those not in room_stats_earliest_token)
+            sql = """
+                INSERT INTO %s_rooms (room_id, events)
+                SELECT c.room_id, count(*) FROM current_state_events AS c
+                LEFT JOIN room_stats_earliest_token AS t USING (room_id)
+                WHERE t.room_id IS NULL
+                GROUP BY c.room_id
+            """ % (TEMP_TABLE,)
+            txn.execute(sql)
+
+        new_pos = yield self.get_max_stream_id_in_current_state_deltas()
+        yield self.runInteraction("populate_stats_temp_build", _make_staging_area)
+        yield self._simple_insert(TEMP_TABLE + "_position", {"position": new_pos})
+        self.get_earliest_token_for_room_stats.invalidate_all()
+
+        yield self._end_background_update("populate_stats_createtables")
+        defer.returnValue(1)
+
+    @defer.inlineCallbacks
+    def _populate_stats_cleanup(self, progress, batch_size):
+        """
+        Update the user directory stream position, then clean up the old tables.
+        """
+        if not self.stats_enabled:
+            yield self._end_background_update("populate_stats_cleanup")
+            defer.returnValue(1)
+
+        position = yield self._simple_select_one_onecol(
+            TEMP_TABLE + "_position", None, "position"
+        )
+        yield self.update_stats_stream_pos(position)
+
+        def _delete_staging_area(txn):
+            txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_rooms")
+            txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_position")
+
+        yield self.runInteraction("populate_stats_cleanup", _delete_staging_area)
+
+        yield self._end_background_update("populate_stats_cleanup")
+        defer.returnValue(1)
+
+    @defer.inlineCallbacks
+    def _populate_stats_process_rooms(self, progress, batch_size):
+
+        if not self.stats_enabled:
+            yield self._end_background_update("populate_stats_process_rooms")
+            defer.returnValue(1)
+
+        # If we don't have progress filed, delete everything.
+        if not progress:
+            yield self.delete_all_stats()
+
+        def _get_next_batch(txn):
+            # Only fetch 250 rooms, so we don't fetch too many at once, even
+            # if those 250 rooms have less than batch_size state events.
+            sql = """
+                SELECT room_id, events FROM %s_rooms
+                ORDER BY events DESC
+                LIMIT 250
+            """ % (
+                TEMP_TABLE,
+            )
+            txn.execute(sql)
+            rooms_to_work_on = txn.fetchall()
+
+            if not rooms_to_work_on:
+                return None
+
+            # Get how many are left to process, so we can give status on how
+            # far we are in processing
+            txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms")
+            progress["remaining"] = txn.fetchone()[0]
+
+            return rooms_to_work_on
+
+        rooms_to_work_on = yield self.runInteraction(
+            "populate_stats_temp_read", _get_next_batch
+        )
+
+        # No more rooms -- complete the transaction.
+        if not rooms_to_work_on:
+            yield self._end_background_update("populate_stats_process_rooms")
+            defer.returnValue(1)
+
+        logger.info(
+            "Processing the next %d rooms of %d remaining",
+            len(rooms_to_work_on), progress["remaining"],
+        )
+
+        # Number of state events we've processed by going through each room
+        processed_event_count = 0
+
+        for room_id, event_count in rooms_to_work_on:
+
+            current_state_ids = yield self.get_current_state_ids(room_id)
+
+            join_rules_id = current_state_ids.get((EventTypes.JoinRules, ""))
+            history_visibility_id = current_state_ids.get(
+                (EventTypes.RoomHistoryVisibility, "")
+            )
+            encryption_id = current_state_ids.get((EventTypes.RoomEncryption, ""))
+            name_id = current_state_ids.get((EventTypes.Name, ""))
+            topic_id = current_state_ids.get((EventTypes.Topic, ""))
+            avatar_id = current_state_ids.get((EventTypes.RoomAvatar, ""))
+            canonical_alias_id = current_state_ids.get((EventTypes.CanonicalAlias, ""))
+
+            state_events = yield self.get_events([
+                join_rules_id, history_visibility_id, encryption_id, name_id,
+                topic_id, avatar_id, canonical_alias_id,
+            ])
+
+            def _get_or_none(event_id, arg):
+                event = state_events.get(event_id)
+                if event:
+                    return event.content.get(arg)
+                return None
+
+            yield self.update_room_state(
+                room_id,
+                {
+                    "join_rules": _get_or_none(join_rules_id, "join_rule"),
+                    "history_visibility": _get_or_none(
+                        history_visibility_id, "history_visibility"
+                    ),
+                    "encryption": _get_or_none(encryption_id, "algorithm"),
+                    "name": _get_or_none(name_id, "name"),
+                    "topic": _get_or_none(topic_id, "topic"),
+                    "avatar": _get_or_none(avatar_id, "url"),
+                    "canonical_alias": _get_or_none(canonical_alias_id, "alias"),
+                },
+            )
+
+            now = self.hs.get_reactor().seconds()
+
+            # quantise time to the nearest bucket
+            now = (now // self.stats_bucket_size) * self.stats_bucket_size
+
+            def _fetch_data(txn):
+
+                # Get the current token of the room
+                current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn)
+
+                current_state_events = len(current_state_ids)
+
+                membership_counts = self._get_user_counts_in_room_txn(txn, room_id)
+
+                total_state_events = self._get_total_state_event_counts_txn(
+                    txn, room_id
+                )
+
+                self._update_stats_txn(
+                    txn,
+                    "room",
+                    room_id,
+                    now,
+                    {
+                        "bucket_size": self.stats_bucket_size,
+                        "current_state_events": current_state_events,
+                        "joined_members": membership_counts.get(Membership.JOIN, 0),
+                        "invited_members": membership_counts.get(Membership.INVITE, 0),
+                        "left_members": membership_counts.get(Membership.LEAVE, 0),
+                        "banned_members": membership_counts.get(Membership.BAN, 0),
+                        "state_events": total_state_events,
+                    },
+                )
+                self._simple_insert_txn(
+                    txn,
+                    "room_stats_earliest_token",
+                    {"room_id": room_id, "token": current_token},
+                )
+
+                # We've finished a room. Delete it from the table.
+                self._simple_delete_one_txn(
+                    txn, TEMP_TABLE + "_rooms", {"room_id": room_id},
+                )
+
+            yield self.runInteraction("update_room_stats", _fetch_data)
+
+            # Update the remaining counter.
+            progress["remaining"] -= 1
+            yield self.runInteraction(
+                "populate_stats",
+                self._background_update_progress_txn,
+                "populate_stats_process_rooms",
+                progress,
+            )
+
+            processed_event_count += event_count
+
+            if processed_event_count > batch_size:
+                # Don't process any more rooms, we've hit our batch size.
+                defer.returnValue(processed_event_count)
+
+        defer.returnValue(processed_event_count)
+
+    def delete_all_stats(self):
+        """
+        Delete all statistics records.
+        """
+
+        def _delete_all_stats_txn(txn):
+            txn.execute("DELETE FROM room_state")
+            txn.execute("DELETE FROM room_stats")
+            txn.execute("DELETE FROM room_stats_earliest_token")
+            txn.execute("DELETE FROM user_stats")
+
+        return self.runInteraction("delete_all_stats", _delete_all_stats_txn)
+
+    def get_stats_stream_pos(self):
+        return self._simple_select_one_onecol(
+            table="stats_stream_pos",
+            keyvalues={},
+            retcol="stream_id",
+            desc="stats_stream_pos",
+        )
+
+    def update_stats_stream_pos(self, stream_id):
+        return self._simple_update_one(
+            table="stats_stream_pos",
+            keyvalues={},
+            updatevalues={"stream_id": stream_id},
+            desc="update_stats_stream_pos",
+        )
+
+    def update_room_state(self, room_id, fields):
+        """
+        Args:
+            room_id (str)
+            fields (dict[str:Any])
+        """
+
+        # For whatever reason some of the fields may contain null bytes, which
+        # postgres isn't a fan of, so we replace those fields with null.
+        for col in (
+            "join_rules",
+            "history_visibility",
+            "encryption",
+            "name",
+            "topic",
+            "avatar",
+            "canonical_alias"
+        ):
+            field = fields.get(col)
+            if field and "\0" in field:
+                fields[col] = None
+
+        return self._simple_upsert(
+            table="room_state",
+            keyvalues={"room_id": room_id},
+            values=fields,
+            desc="update_room_state",
+        )
+
+    def get_deltas_for_room(self, room_id, start, size=100):
+        """
+        Get statistics deltas for a given room.
+
+        Args:
+            room_id (str)
+            start (int): Pagination start. Number of entries, not timestamp.
+            size (int): How many entries to return.
+
+        Returns:
+            Deferred[list[dict]], where the dict has the keys of
+            ABSOLUTE_STATS_FIELDS["room"] and "ts".
+        """
+        return self._simple_select_list_paginate(
+            "room_stats",
+            {"room_id": room_id},
+            "ts",
+            start,
+            size,
+            retcols=(list(ABSOLUTE_STATS_FIELDS["room"]) + ["ts"]),
+            order_direction="DESC",
+        )
+
+    def get_all_room_state(self):
+        return self._simple_select_list(
+            "room_state", None, retcols=("name", "topic", "canonical_alias")
+        )
+
+    @cached()
+    def get_earliest_token_for_room_stats(self, room_id):
+        """
+        Fetch the "earliest token". This is used by the room stats delta
+        processor to ignore deltas that have been processed between the
+        start of the background task and any particular room's stats
+        being calculated.
+
+        Returns:
+            Deferred[int]
+        """
+        return self._simple_select_one_onecol(
+            "room_stats_earliest_token",
+            {"room_id": room_id},
+            retcol="token",
+            allow_none=True,
+        )
+
+    def update_stats(self, stats_type, stats_id, ts, fields):
+        table, id_col = TYPE_TO_ROOM[stats_type]
+        return self._simple_upsert(
+            table=table,
+            keyvalues={id_col: stats_id, "ts": ts},
+            values=fields,
+            desc="update_stats",
+        )
+
+    def _update_stats_txn(self, txn, stats_type, stats_id, ts, fields):
+        table, id_col = TYPE_TO_ROOM[stats_type]
+        return self._simple_upsert_txn(
+            txn, table=table, keyvalues={id_col: stats_id, "ts": ts}, values=fields
+        )
+
+    def update_stats_delta(self, ts, stats_type, stats_id, field, value):
+        def _update_stats_delta(txn):
+            table, id_col = TYPE_TO_ROOM[stats_type]
+
+            sql = (
+                "SELECT * FROM %s"
+                " WHERE %s=? and ts=("
+                "  SELECT MAX(ts) FROM %s"
+                "  WHERE %s=?"
+                ")"
+            ) % (table, id_col, table, id_col)
+            txn.execute(sql, (stats_id, stats_id))
+            rows = self.cursor_to_dict(txn)
+            if len(rows) == 0:
+                # silently skip as we don't have anything to apply a delta to yet.
+                # this tries to minimise any race between the initial sync and
+                # subsequent deltas arriving.
+                return
+
+            current_ts = ts
+            latest_ts = rows[0]["ts"]
+            if current_ts < latest_ts:
+                # This one is in the past, but we're just encountering it now.
+                # Mark it as part of the current bucket.
+                current_ts = latest_ts
+            elif ts != latest_ts:
+                # we have to copy our absolute counters over to the new entry.
+                values = {
+                    key: rows[0][key] for key in ABSOLUTE_STATS_FIELDS[stats_type]
+                }
+                values[id_col] = stats_id
+                values["ts"] = ts
+                values["bucket_size"] = self.stats_bucket_size
+
+                self._simple_insert_txn(txn, table=table, values=values)
+
+            # actually update the new value
+            if stats_type in ABSOLUTE_STATS_FIELDS[stats_type]:
+                self._simple_update_txn(
+                    txn,
+                    table=table,
+                    keyvalues={id_col: stats_id, "ts": current_ts},
+                    updatevalues={field: value},
+                )
+            else:
+                sql = ("UPDATE %s SET %s=%s+? WHERE %s=? AND ts=?") % (
+                    table,
+                    field,
+                    field,
+                    id_col,
+                )
+                txn.execute(sql, (value, stats_id, current_ts))
+
+        return self.runInteraction("update_stats_delta", _update_stats_delta)
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 9cd1e0f9fe..6f7f65d96b 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -64,59 +64,135 @@ _EventDictReturn = namedtuple(
 )
 
 
-def lower_bound(token, engine, inclusive=False):
-    inclusive = "=" if inclusive else ""
-    if token.topological is None:
-        return "(%d <%s %s)" % (token.stream, inclusive, "stream_ordering")
-    else:
-        if isinstance(engine, PostgresEngine):
-            # Postgres doesn't optimise ``(x < a) OR (x=a AND y<b)`` as well
-            # as it optimises ``(x,y) < (a,b)`` on multicolumn indexes. So we
-            # use the later form when running against postgres.
-            return "((%d,%d) <%s (%s,%s))" % (
-                token.topological,
-                token.stream,
-                inclusive,
-                "topological_ordering",
-                "stream_ordering",
+def generate_pagination_where_clause(
+    direction, column_names, from_token, to_token, engine,
+):
+    """Creates an SQL expression to bound the columns by the pagination
+    tokens.
+
+    For example creates an SQL expression like:
+
+        (6, 7) >= (topological_ordering, stream_ordering)
+        AND (5, 3) < (topological_ordering, stream_ordering)
+
+    would be generated for dir=b, from_token=(6, 7) and to_token=(5, 3).
+
+    Note that tokens are considered to be after the row they are in, e.g. if
+    a row A has a token T, then we consider A to be before T. This convention
+    is important when figuring out inequalities for the generated SQL, and
+    produces the following result:
+        - If paginating forwards then we exclude any rows matching the from
+          token, but include those that match the to token.
+        - If paginating backwards then we include any rows matching the from
+          token, but include those that match the to token.
+
+    Args:
+        direction (str): Whether we're paginating backwards("b") or
+            forwards ("f").
+        column_names (tuple[str, str]): The column names to bound. Must *not*
+            be user defined as these get inserted directly into the SQL
+            statement without escapes.
+        from_token (tuple[int, int]|None): The start point for the pagination.
+            This is an exclusive minimum bound if direction is "f", and an
+            inclusive maximum bound if direction is "b".
+        to_token (tuple[int, int]|None): The endpoint point for the pagination.
+            This is an inclusive maximum bound if direction is "f", and an
+            exclusive minimum bound if direction is "b".
+        engine: The database engine to generate the clauses for
+
+    Returns:
+        str: The sql expression
+    """
+    assert direction in ("b", "f")
+
+    where_clause = []
+    if from_token:
+        where_clause.append(
+            _make_generic_sql_bound(
+                bound=">=" if direction == "b" else "<",
+                column_names=column_names,
+                values=from_token,
+                engine=engine,
             )
-        return "(%d < %s OR (%d = %s AND %d <%s %s))" % (
-            token.topological,
-            "topological_ordering",
-            token.topological,
-            "topological_ordering",
-            token.stream,
-            inclusive,
-            "stream_ordering",
-        )
-
-
-def upper_bound(token, engine, inclusive=True):
-    inclusive = "=" if inclusive else ""
-    if token.topological is None:
-        return "(%d >%s %s)" % (token.stream, inclusive, "stream_ordering")
-    else:
-        if isinstance(engine, PostgresEngine):
-            # Postgres doesn't optimise ``(x > a) OR (x=a AND y>b)`` as well
-            # as it optimises ``(x,y) > (a,b)`` on multicolumn indexes. So we
-            # use the later form when running against postgres.
-            return "((%d,%d) >%s (%s,%s))" % (
-                token.topological,
-                token.stream,
-                inclusive,
-                "topological_ordering",
-                "stream_ordering",
+        )
+
+    if to_token:
+        where_clause.append(
+            _make_generic_sql_bound(
+                bound="<" if direction == "b" else ">=",
+                column_names=column_names,
+                values=to_token,
+                engine=engine,
             )
-        return "(%d > %s OR (%d = %s AND %d >%s %s))" % (
-            token.topological,
-            "topological_ordering",
-            token.topological,
-            "topological_ordering",
-            token.stream,
-            inclusive,
-            "stream_ordering",
         )
 
+    return " AND ".join(where_clause)
+
+
+def _make_generic_sql_bound(bound, column_names, values, engine):
+    """Create an SQL expression that bounds the given column names by the
+    values, e.g. create the equivalent of `(1, 2) < (col1, col2)`.
+
+    Only works with two columns.
+
+    Older versions of SQLite don't support that syntax so we have to expand it
+    out manually.
+
+    Args:
+        bound (str): The comparison operator to use. One of ">", "<", ">=",
+            "<=", where the values are on the left and columns on the right.
+        names (tuple[str, str]): The column names. Must *not* be user defined
+            as these get inserted directly into the SQL statement without
+            escapes.
+        values (tuple[int|None, int]): The values to bound the columns by. If
+            the first value is None then only creates a bound on the second
+            column.
+        engine: The database engine to generate the SQL for
+
+    Returns:
+        str
+    """
+
+    assert(bound in (">", "<", ">=", "<="))
+
+    name1, name2 = column_names
+    val1, val2 = values
+
+    if val1 is None:
+        val2 = int(val2)
+        return "(%d %s %s)" % (val2, bound, name2)
+
+    val1 = int(val1)
+    val2 = int(val2)
+
+    if isinstance(engine, PostgresEngine):
+        # Postgres doesn't optimise ``(x < a) OR (x=a AND y<b)`` as well
+        # as it optimises ``(x,y) < (a,b)`` on multicolumn indexes. So we
+        # use the later form when running against postgres.
+        return "((%d,%d) %s (%s,%s))" % (
+            val1, val2,
+            bound,
+            name1, name2,
+        )
+
+    # We want to generate queries of e.g. the form:
+    #
+    #   (val1 < name1 OR (val1 = name1 AND val2 <= name2))
+    #
+    # which is equivalent to (val1, val2) < (name1, name2)
+
+    return """(
+        {val1:d} {strict_bound} {name1}
+        OR ({val1:d} = {name1} AND {val2:d} {bound} {name2})
+    )""".format(
+        name1=name1,
+        val1=val1,
+        name2=name2,
+        val2=val2,
+        strict_bound=bound[0],  # The first bound must always be strict equality here
+        bound=bound,
+    )
+
 
 def filter_to_clause(event_filter):
     # NB: This may create SQL clauses that don't optimise well (and we don't
@@ -319,7 +395,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         rows = yield self.runInteraction("get_room_events_stream_for_room", f)
 
-        ret = yield self._get_events([r.event_id for r in rows], get_prev_content=True)
+        ret = yield self.get_events_as_list([
+            r.event_id for r in rows], get_prev_content=True,
+        )
 
         self._set_before_and_after(ret, rows, topo_order=from_id is None)
 
@@ -367,7 +445,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         rows = yield self.runInteraction("get_membership_changes_for_user", f)
 
-        ret = yield self._get_events([r.event_id for r in rows], get_prev_content=True)
+        ret = yield self.get_events_as_list(
+            [r.event_id for r in rows], get_prev_content=True,
+        )
 
         self._set_before_and_after(ret, rows, topo_order=False)
 
@@ -394,7 +474,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         )
 
         logger.debug("stream before")
-        events = yield self._get_events(
+        events = yield self.get_events_as_list(
             [r.event_id for r in rows], get_prev_content=True
         )
         logger.debug("stream after")
@@ -512,8 +592,18 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         )
 
     def get_max_topological_token(self, room_id, stream_key):
+        """Get the max topological token in a room before the given stream
+        ordering.
+
+        Args:
+            room_id (str)
+            stream_key (int)
+
+        Returns:
+            Deferred[int]
+        """
         sql = (
-            "SELECT max(topological_ordering) FROM events"
+            "SELECT coalesce(max(topological_ordering), 0) FROM events"
             " WHERE room_id = ? AND stream_ordering < ?"
         )
         return self._execute(
@@ -580,11 +670,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             event_filter,
         )
 
-        events_before = yield self._get_events(
+        events_before = yield self.get_events_as_list(
             [e for e in results["before"]["event_ids"]], get_prev_content=True
         )
 
-        events_after = yield self._get_events(
+        events_after = yield self.get_events_as_list(
             [e for e in results["after"]["event_ids"]], get_prev_content=True
         )
 
@@ -697,7 +787,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             "get_all_new_events_stream", get_all_new_events_stream_txn
         )
 
-        events = yield self._get_events(event_ids)
+        events = yield self.get_events_as_list(event_ids)
 
         defer.returnValue((upper_bound, events))
 
@@ -758,20 +848,16 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         args = [False, room_id]
         if direction == 'b':
             order = "DESC"
-            bounds = upper_bound(from_token, self.database_engine)
-            if to_token:
-                bounds = "%s AND %s" % (
-                    bounds,
-                    lower_bound(to_token, self.database_engine),
-                )
         else:
             order = "ASC"
-            bounds = lower_bound(from_token, self.database_engine)
-            if to_token:
-                bounds = "%s AND %s" % (
-                    bounds,
-                    upper_bound(to_token, self.database_engine),
-                )
+
+        bounds = generate_pagination_where_clause(
+            direction=direction,
+            column_names=("topological_ordering", "stream_ordering"),
+            from_token=from_token,
+            to_token=to_token,
+            engine=self.database_engine,
+        )
 
         filter_clause, filter_args = filter_to_clause(event_filter)
 
@@ -849,7 +935,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             event_filter,
         )
 
-        events = yield self._get_events(
+        events = yield self.get_events_as_list(
             [r.event_id for r in rows], get_prev_content=True
         )
 
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index f0e4a0e10c..7253ba120f 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd.
+# Copyright 2018 New Vector Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -156,6 +156,25 @@ def concurrently_execute(func, args, limit):
     ], consumeErrors=True)).addErrback(unwrapFirstError)
 
 
+def yieldable_gather_results(func, iter, *args, **kwargs):
+    """Executes the function with each argument concurrently.
+
+    Args:
+        func (func): Function to execute that returns a Deferred
+        iter (iter): An iterable that yields items that get passed as the first
+            argument to the function
+        *args: Arguments to be passed to each call to func
+
+    Returns
+        Deferred[list]: Resolved when all functions have been invoked, or errors if
+        one of the function calls fails.
+    """
+    return logcontext.make_deferred_yieldable(defer.gatherResults([
+        run_in_background(func, item, *args, **kwargs)
+        for item in iter
+    ], consumeErrors=True)).addErrback(unwrapFirstError)
+
+
 class Linearizer(object):
     """Limits concurrent access to resources based on a key. Useful to ensure
     only a few things happen at a time on a given resource.
diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py
index 194da87639..e14c8bdfda 100644
--- a/synapse/util/distributor.py
+++ b/synapse/util/distributor.py
@@ -27,6 +27,7 @@ def user_left_room(distributor, user, room_id):
     distributor.fire("user_left_room", user=user, room_id=room_id)
 
 
+# XXX: this is no longer used. We should probably kill it.
 def user_joined_room(distributor, user, room_id):
     distributor.fire("user_joined_room", user=user, room_id=room_id)
 
diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py
index 311b49e18a..fe412355d8 100644
--- a/synapse/util/logcontext.py
+++ b/synapse/util/logcontext.py
@@ -226,6 +226,8 @@ class LoggingContext(object):
             self.request = request
 
     def __str__(self):
+        if self.request:
+            return str(self.request)
         return "%s@%x" % (self.name, id(self))
 
     @classmethod
@@ -274,12 +276,10 @@ class LoggingContext(object):
         current = self.set_current_context(self.previous_context)
         if current is not self:
             if current is self.sentinel:
-                logger.warn("Expected logging context %s has been lost", self)
+                logger.warning("Expected logging context %s was lost", self)
             else:
-                logger.warn(
-                    "Current logging context %s is not expected context %s",
-                    current,
-                    self
+                logger.warning(
+                    "Expected logging context %s but found %s", self, current
                 )
         self.previous_context = None
         self.alive = False
@@ -433,10 +433,14 @@ class PreserveLoggingContext(object):
         context = LoggingContext.set_current_context(self.current_context)
 
         if context != self.new_context:
-            logger.warn(
-                "Unexpected logging context: %s is not %s",
-                context, self.new_context,
-            )
+            if context is LoggingContext.sentinel:
+                logger.warning("Expected logging context %s was lost", self.new_context)
+            else:
+                logger.warning(
+                    "Expected logging context %s but found %s",
+                    self.new_context,
+                    context,
+                )
 
         if self.current_context is not LoggingContext.sentinel:
             if not self.current_context.alive:
diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py
index 9cb7e9c9ab..628a2962d9 100644
--- a/synapse/util/manhole.py
+++ b/synapse/util/manhole.py
@@ -1,4 +1,5 @@
 # Copyright 2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -11,10 +12,12 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+import sys
+import traceback
 
 from twisted.conch import manhole_ssh
 from twisted.conch.insults import insults
-from twisted.conch.manhole import ColoredManhole
+from twisted.conch.manhole import ColoredManhole, ManholeInterpreter
 from twisted.conch.ssh.keys import Key
 from twisted.cred import checkers, portal
 
@@ -79,7 +82,7 @@ def manhole(username, password, globals):
 
     rlm = manhole_ssh.TerminalRealm()
     rlm.chainedProtocolFactory = lambda: insults.ServerProtocol(
-        ColoredManhole,
+        SynapseManhole,
         dict(globals, __name__="__console__")
     )
 
@@ -88,3 +91,55 @@ def manhole(username, password, globals):
     factory.privateKeys[b'ssh-rsa'] = Key.fromString(PRIVATE_KEY)
 
     return factory
+
+
+class SynapseManhole(ColoredManhole):
+    """Overrides connectionMade to create our own ManholeInterpreter"""
+    def connectionMade(self):
+        super(SynapseManhole, self).connectionMade()
+
+        # replace the manhole interpreter with our own impl
+        self.interpreter = SynapseManholeInterpreter(self, self.namespace)
+
+        # this would also be a good place to add more keyHandlers.
+
+
+class SynapseManholeInterpreter(ManholeInterpreter):
+    def showsyntaxerror(self, filename=None):
+        """Display the syntax error that just occurred.
+
+        Overrides the base implementation, ignoring sys.excepthook. We always want
+        any syntax errors to be sent to the terminal, rather than sentry.
+        """
+        type, value, tb = sys.exc_info()
+        sys.last_type = type
+        sys.last_value = value
+        sys.last_traceback = tb
+        if filename and type is SyntaxError:
+            # Work hard to stuff the correct filename in the exception
+            try:
+                msg, (dummy_filename, lineno, offset, line) = value.args
+            except ValueError:
+                # Not the format we expect; leave it alone
+                pass
+            else:
+                # Stuff in the right filename
+                value = SyntaxError(msg, (filename, lineno, offset, line))
+                sys.last_value = value
+        lines = traceback.format_exception_only(type, value)
+        self.write(''.join(lines))
+
+    def showtraceback(self):
+        """Display the exception that just occurred.
+
+        Overrides the base implementation, ignoring sys.excepthook. We always want
+        any syntax errors to be sent to the terminal, rather than sentry.
+        """
+        sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
+        sys.last_traceback = last_tb
+        try:
+            # We remove the first stack item because it is our own code.
+            lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
+            self.write(''.join(lines))
+        finally:
+            last_tb = ei = None
diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py
index 7deb38f2a7..b146d137f4 100644
--- a/synapse/util/ratelimitutils.py
+++ b/synapse/util/ratelimitutils.py
@@ -30,31 +30,14 @@ logger = logging.getLogger(__name__)
 
 
 class FederationRateLimiter(object):
-    def __init__(self, clock, window_size, sleep_limit, sleep_msec,
-                 reject_limit, concurrent_requests):
+    def __init__(self, clock, config):
         """
         Args:
             clock (Clock)
-            window_size (int): The window size in milliseconds.
-            sleep_limit (int): The number of requests received in the last
-                `window_size` milliseconds before we artificially start
-                delaying processing of requests.
-            sleep_msec (int): The number of milliseconds to delay processing
-                of incoming requests by.
-            reject_limit (int): The maximum number of requests that are can be
-                queued for processing before we start rejecting requests with
-                a 429 Too Many Requests response.
-            concurrent_requests (int): The number of concurrent requests to
-                process.
+            config (FederationRateLimitConfig)
         """
         self.clock = clock
-
-        self.window_size = window_size
-        self.sleep_limit = sleep_limit
-        self.sleep_msec = sleep_msec
-        self.reject_limit = reject_limit
-        self.concurrent_requests = concurrent_requests
-
+        self._config = config
         self.ratelimiters = {}
 
     def ratelimit(self, host):
@@ -76,25 +59,25 @@ class FederationRateLimiter(object):
             host,
             _PerHostRatelimiter(
                 clock=self.clock,
-                window_size=self.window_size,
-                sleep_limit=self.sleep_limit,
-                sleep_msec=self.sleep_msec,
-                reject_limit=self.reject_limit,
-                concurrent_requests=self.concurrent_requests,
+                config=self._config,
             )
         ).ratelimit()
 
 
 class _PerHostRatelimiter(object):
-    def __init__(self, clock, window_size, sleep_limit, sleep_msec,
-                 reject_limit, concurrent_requests):
+    def __init__(self, clock, config):
+        """
+        Args:
+            clock (Clock)
+            config (FederationRateLimitConfig)
+        """
         self.clock = clock
 
-        self.window_size = window_size
-        self.sleep_limit = sleep_limit
-        self.sleep_sec = sleep_msec / 1000.0
-        self.reject_limit = reject_limit
-        self.concurrent_requests = concurrent_requests
+        self.window_size = config.window_size
+        self.sleep_limit = config.sleep_limit
+        self.sleep_sec = config.sleep_delay / 1000.0
+        self.reject_limit = config.reject_limit
+        self.concurrent_requests = config.concurrent
 
         # request_id objects for requests which have been slept
         self.sleeping_requests = set()
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index 26cce7d197..1a77456498 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -46,8 +46,7 @@ class NotRetryingDestination(Exception):
 
 
 @defer.inlineCallbacks
-def get_retry_limiter(destination, clock, store, ignore_backoff=False,
-                      **kwargs):
+def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs):
     """For a given destination check if we have previously failed to
     send a request there and are waiting before retrying the destination.
     If we are not ready to retry the destination, this will raise a
@@ -60,8 +59,7 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False,
         clock (synapse.util.clock): timing source
         store (synapse.storage.transactions.TransactionStore): datastore
         ignore_backoff (bool): true to ignore the historical backoff data and
-            try the request anyway. We will still update the next
-            retry_interval on success/failure.
+            try the request anyway. We will still reset the retry_interval on success.
 
     Example usage:
 
@@ -75,13 +73,12 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False,
     """
     retry_last_ts, retry_interval = (0, 0)
 
-    retry_timings = yield store.get_destination_retry_timings(
-        destination
-    )
+    retry_timings = yield store.get_destination_retry_timings(destination)
 
     if retry_timings:
         retry_last_ts, retry_interval = (
-            retry_timings["retry_last_ts"], retry_timings["retry_interval"]
+            retry_timings["retry_last_ts"],
+            retry_timings["retry_interval"],
         )
 
         now = int(clock.time_msec())
@@ -93,22 +90,36 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False,
                 destination=destination,
             )
 
+    # if we are ignoring the backoff data, we should also not increment the backoff
+    # when we get another failure - otherwise a server can very quickly reach the
+    # maximum backoff even though it might only have been down briefly
+    backoff_on_failure = not ignore_backoff
+
     defer.returnValue(
         RetryDestinationLimiter(
             destination,
             clock,
             store,
             retry_interval,
+            backoff_on_failure=backoff_on_failure,
             **kwargs
         )
     )
 
 
 class RetryDestinationLimiter(object):
-    def __init__(self, destination, clock, store, retry_interval,
-                 min_retry_interval=10 * 60 * 1000,
-                 max_retry_interval=24 * 60 * 60 * 1000,
-                 multiplier_retry_interval=5, backoff_on_404=False):
+    def __init__(
+        self,
+        destination,
+        clock,
+        store,
+        retry_interval,
+        min_retry_interval=10 * 60 * 1000,
+        max_retry_interval=24 * 60 * 60 * 1000,
+        multiplier_retry_interval=5,
+        backoff_on_404=False,
+        backoff_on_failure=True,
+    ):
         """Marks the destination as "down" if an exception is thrown in the
         context, except for CodeMessageException with code < 500.
 
@@ -128,6 +139,9 @@ class RetryDestinationLimiter(object):
             multiplier_retry_interval (int): The multiplier to use to increase
                 the retry interval after a failed request.
             backoff_on_404 (bool): Back off if we get a 404
+
+            backoff_on_failure (bool): set to False if we should not increase the
+                retry interval on a failure.
         """
         self.clock = clock
         self.store = store
@@ -138,6 +152,7 @@ class RetryDestinationLimiter(object):
         self.max_retry_interval = max_retry_interval
         self.multiplier_retry_interval = multiplier_retry_interval
         self.backoff_on_404 = backoff_on_404
+        self.backoff_on_failure = backoff_on_failure
 
     def __enter__(self):
         pass
@@ -173,10 +188,13 @@ class RetryDestinationLimiter(object):
             if not self.retry_interval:
                 return
 
-            logger.debug("Connection to %s was successful; clearing backoff",
-                         self.destination)
+            logger.debug(
+                "Connection to %s was successful; clearing backoff", self.destination
+            )
             retry_last_ts = 0
             self.retry_interval = 0
+        elif not self.backoff_on_failure:
+            return
         else:
             # We couldn't connect.
             if self.retry_interval:
@@ -190,7 +208,10 @@ class RetryDestinationLimiter(object):
 
             logger.info(
                 "Connection to %s was unsuccessful (%s(%s)); backoff now %i",
-                self.destination, exc_type, exc_val, self.retry_interval
+                self.destination,
+                exc_type,
+                exc_val,
+                self.retry_interval,
             )
             retry_last_ts = int(self.clock.time_msec())
 
@@ -201,9 +222,7 @@ class RetryDestinationLimiter(object):
                     self.destination, retry_last_ts, self.retry_interval
                 )
             except Exception:
-                logger.exception(
-                    "Failed to store destination_retry_timings",
-                )
+                logger.exception("Failed to store destination_retry_timings")
 
         # we deliberately do this in the background.
         synapse.util.logcontext.run_in_background(store_retry_timings)
diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py
index fdcb375f95..69dffd8244 100644
--- a/synapse/util/stringutils.py
+++ b/synapse/util/stringutils.py
@@ -24,14 +24,19 @@ _string_with_symbols = (
     string.digits + string.ascii_letters + ".,;:^&*-_+=#~@"
 )
 
+# random_string and random_string_with_symbols are used for a range of things,
+# some cryptographically important, some less so. We use SystemRandom to make sure
+# we get cryptographically-secure randoms.
+rand = random.SystemRandom()
+
 
 def random_string(length):
-    return ''.join(random.choice(string.ascii_letters) for _ in range(length))
+    return ''.join(rand.choice(string.ascii_letters) for _ in range(length))
 
 
 def random_string_with_symbols(length):
     return ''.join(
-        random.choice(_string_with_symbols) for _ in range(length)
+        rand.choice(_string_with_symbols) for _ in range(length)
     )