summary refs log tree commit diff
path: root/synapse/storage
diff options
context:
space:
mode:
Diffstat (limited to 'synapse/storage')
-rw-r--r--synapse/storage/__init__.py350
-rw-r--r--synapse/storage/_base.py75
-rw-r--r--synapse/storage/appservice.py138
-rw-r--r--synapse/storage/event_federation.py51
-rw-r--r--synapse/storage/push_rule.py48
-rw-r--r--synapse/storage/roommember.py70
-rw-r--r--synapse/storage/schema/application_services.sql34
-rw-r--r--synapse/storage/schema/delta/11/v11.sql (renamed from synapse/storage/schema/delta/v11.sql)0
-rw-r--r--synapse/storage/schema/delta/12/v12.sql (renamed from synapse/storage/schema/delta/v12.sql)0
-rw-r--r--synapse/storage/schema/delta/13/v13.sql (renamed from synapse/storage/schema/delta/v13.sql)0
-rw-r--r--synapse/storage/schema/delta/14/upgrade_appservice_db.py23
-rw-r--r--synapse/storage/schema/delta/14/v14.sql9
-rw-r--r--synapse/storage/schema/delta/v2.sql168
-rw-r--r--synapse/storage/schema/delta/v3.sql27
-rw-r--r--synapse/storage/schema/delta/v4.sql26
-rw-r--r--synapse/storage/schema/delta/v5.sql30
-rw-r--r--synapse/storage/schema/delta/v6.sql31
-rw-r--r--synapse/storage/schema/delta/v8.sql34
-rw-r--r--synapse/storage/schema/delta/v9.sql79
-rw-r--r--synapse/storage/schema/filtering.sql24
-rw-r--r--synapse/storage/schema/full_schemas/11/event_edges.sql (renamed from synapse/storage/schema/event_edges.sql)0
-rw-r--r--synapse/storage/schema/full_schemas/11/event_signatures.sql (renamed from synapse/storage/schema/event_signatures.sql)0
-rw-r--r--synapse/storage/schema/full_schemas/11/im.sql (renamed from synapse/storage/schema/im.sql)0
-rw-r--r--synapse/storage/schema/full_schemas/11/keys.sql (renamed from synapse/storage/schema/keys.sql)0
-rw-r--r--synapse/storage/schema/full_schemas/11/media_repository.sql (renamed from synapse/storage/schema/media_repository.sql)0
-rw-r--r--synapse/storage/schema/full_schemas/11/presence.sql (renamed from synapse/storage/schema/presence.sql)0
-rw-r--r--synapse/storage/schema/full_schemas/11/profiles.sql (renamed from synapse/storage/schema/profiles.sql)0
-rw-r--r--synapse/storage/schema/full_schemas/11/redactions.sql (renamed from synapse/storage/schema/redactions.sql)0
-rw-r--r--synapse/storage/schema/full_schemas/11/room_aliases.sql (renamed from synapse/storage/schema/room_aliases.sql)0
-rw-r--r--synapse/storage/schema/full_schemas/11/state.sql (renamed from synapse/storage/schema/state.sql)0
-rw-r--r--synapse/storage/schema/full_schemas/11/transactions.sql (renamed from synapse/storage/schema/transactions.sql)0
-rw-r--r--synapse/storage/schema/full_schemas/11/users.sql (renamed from synapse/storage/schema/users.sql)0
-rw-r--r--synapse/storage/schema/pusher.sql46
-rw-r--r--synapse/storage/schema/rejections.sql21
-rw-r--r--synapse/storage/schema/schema_version.sql30
-rw-r--r--synapse/storage/stream.py83
-rw-r--r--synapse/storage/transactions.py16
37 files changed, 723 insertions, 690 deletions
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index d16e7b8fac..a3ff995695 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -45,36 +45,19 @@ from syutil.jsonutil import encode_canonical_json
 from synapse.crypto.event_signing import compute_event_reference_hash
 
 
+import fnmatch
+import imp
 import logging
 import os
+import re
 
 
 logger = logging.getLogger(__name__)
 
 
-SCHEMAS = [
-    "transactions",
-    "users",
-    "profiles",
-    "presence",
-    "im",
-    "room_aliases",
-    "keys",
-    "redactions",
-    "state",
-    "event_edges",
-    "event_signatures",
-    "pusher",
-    "media_repository",
-    "application_services",
-    "filtering",
-    "rejections",
-]
-
-
-# Remember to update this number every time an incompatible change is made to
-# database schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 13
+# Remember to update this number every time a change is made to database
+# schema files, so the users will be informed on server restarts.
+SCHEMA_VERSION = 14
 
 dir_path = os.path.abspath(os.path.dirname(__file__))
 
@@ -576,28 +559,15 @@ class DataStore(RoomMemberStore, RoomStore,
         )
 
 
-def schema_path(schema):
-    """ Get a filesystem path for the named database schema
-
-    Args:
-        schema: Name of the database schema.
-    Returns:
-        A filesystem path pointing at a ".sql" file.
-
-    """
-    schemaPath = os.path.join(dir_path, "schema", schema + ".sql")
-    return schemaPath
-
-
-def read_schema(schema):
+def read_schema(path):
     """ Read the named database schema.
 
     Args:
-        schema: Name of the datbase schema.
+        path: Path of the database schema.
     Returns:
         A string containing the database schema.
     """
-    with open(schema_path(schema)) as schema_file:
+    with open(path) as schema_file:
         return schema_file.read()
 
 
@@ -610,49 +580,275 @@ class UpgradeDatabaseException(PrepareDatabaseException):
 
 
 def prepare_database(db_conn):
-    """ Set up all the dbs. Since all the *.sql have IF NOT EXISTS, so we
-    don't have to worry about overwriting existing content.
+    """Prepares a database for usage. Will either create all necessary tables
+    or upgrade from an older schema version.
+    """
+    try:
+        cur = db_conn.cursor()
+        version_info = _get_or_create_schema_state(cur)
+
+        if version_info:
+            user_version, delta_files, upgraded = version_info
+            _upgrade_existing_database(cur, user_version, delta_files, upgraded)
+        else:
+            _setup_new_database(cur)
+
+        cur.execute("PRAGMA user_version = %d" % (SCHEMA_VERSION,))
+
+        cur.close()
+        db_conn.commit()
+    except:
+        db_conn.rollback()
+        raise
+
+
+def _setup_new_database(cur):
+    """Sets up the database by finding a base set of "full schemas" and then
+    applying any necessary deltas.
+
+    The "full_schemas" directory has subdirectories named after versions. This
+    function searches for the highest version less than or equal to
+    `SCHEMA_VERSION` and executes all .sql files in that directory.
+
+    The function will then apply all deltas for all versions after the base
+    version.
+
+    Example directory structure:
+
+        schema/
+            delta/
+                ...
+            full_schemas/
+                3/
+                    test.sql
+                    ...
+                11/
+                    foo.sql
+                    bar.sql
+                ...
+
+    In the example foo.sql and bar.sql would be run, and then any delta files
+    for versions strictly greater than 11.
     """
-    c = db_conn.cursor()
-    c.execute("PRAGMA user_version")
-    row = c.fetchone()
+    current_dir = os.path.join(dir_path, "schema", "full_schemas")
+    directory_entries = os.listdir(current_dir)
+
+    valid_dirs = []
+    pattern = re.compile(r"^\d+(\.sql)?$")
+    for filename in directory_entries:
+        match = pattern.match(filename)
+        abs_path = os.path.join(current_dir, filename)
+        if match and os.path.isdir(abs_path):
+            ver = int(match.group(0))
+            if ver <= SCHEMA_VERSION:
+                valid_dirs.append((ver, abs_path))
+        else:
+            logger.warn("Unexpected entry in 'full_schemas': %s", filename)
 
-    if row and row[0]:
-        user_version = row[0]
+    if not valid_dirs:
+        raise PrepareDatabaseException(
+            "Could not find a suitable base set of full schemas"
+        )
 
-        if user_version > SCHEMA_VERSION:
-            raise ValueError(
-                "Cannot use this database as it is too " +
-                "new for the server to understand"
-            )
-        elif user_version < SCHEMA_VERSION:
-            logger.info(
-                "Upgrading database from version %d",
-                user_version
+    max_current_ver, sql_dir = max(valid_dirs, key=lambda x: x[0])
+
+    logger.debug("Initialising schema v%d", max_current_ver)
+
+    directory_entries = os.listdir(sql_dir)
+
+    sql_script = "BEGIN TRANSACTION;\n"
+    for filename in fnmatch.filter(directory_entries, "*.sql"):
+        sql_loc = os.path.join(sql_dir, filename)
+        logger.debug("Applying schema %s", sql_loc)
+        sql_script += read_schema(sql_loc)
+        sql_script += "\n"
+    sql_script += "COMMIT TRANSACTION;"
+    cur.executescript(sql_script)
+
+    cur.execute(
+        "INSERT OR REPLACE INTO schema_version (version, upgraded)"
+        " VALUES (?,?)",
+        (max_current_ver, False)
+    )
+
+    _upgrade_existing_database(
+        cur,
+        current_version=max_current_ver,
+        applied_delta_files=[],
+        upgraded=False
+    )
+
+
+def _upgrade_existing_database(cur, current_version, applied_delta_files,
+                               upgraded):
+    """Upgrades an existing database.
+
+    Delta files can either be SQL stored in *.sql files, or python modules
+    in *.py.
+
+    There can be multiple delta files per version. Synapse will keep track of
+    which delta files have been applied, and will apply any that haven't been
+    even if there has been no version bump. This is useful for development
+    where orthogonal schema changes may happen on separate branches.
+
+    Different delta files for the same version *must* be orthogonal and give
+    the same result when applied in any order. No guarantees are made on the
+    order of execution of these scripts.
+
+    This is a no-op of current_version == SCHEMA_VERSION.
+
+    Example directory structure:
+
+        schema/
+            delta/
+                11/
+                    foo.sql
+                    ...
+                12/
+                    foo.sql
+                    bar.py
+                ...
+            full_schemas/
+                ...
+
+    In the example, if current_version is 11, then foo.sql will be run if and
+    only if `upgraded` is True. Then `foo.sql` and `bar.py` would be run in
+    some arbitrary order.
+
+    Args:
+        cur (Cursor)
+        current_version (int): The current version of the schema.
+        applied_delta_files (list): A list of deltas that have already been
+            applied.
+        upgraded (bool): Whether the current version was generated by having
+            applied deltas or from full schema file. If `True` the function
+            will never apply delta files for the given `current_version`, since
+            the current_version wasn't generated by applying those delta files.
+    """
+
+    if current_version > SCHEMA_VERSION:
+        raise ValueError(
+            "Cannot use this database as it is too " +
+            "new for the server to understand"
+        )
+
+    start_ver = current_version
+    if not upgraded:
+        start_ver += 1
+
+    for v in range(start_ver, SCHEMA_VERSION + 1):
+        logger.debug("Upgrading schema to v%d", v)
+
+        delta_dir = os.path.join(dir_path, "schema", "delta", str(v))
+
+        try:
+            directory_entries = os.listdir(delta_dir)
+        except OSError:
+            logger.exception("Could not open delta dir for version %d", v)
+            raise UpgradeDatabaseException(
+                "Could not open delta dir for version %d" % (v,)
             )
 
-            # Run every version since after the current version.
-            for v in range(user_version + 1, SCHEMA_VERSION + 1):
-                if v == 10:
-                    raise UpgradeDatabaseException(
-                        "No delta for version 10"
+        directory_entries.sort()
+        for file_name in directory_entries:
+            relative_path = os.path.join(str(v), file_name)
+            if relative_path in applied_delta_files:
+                continue
+
+            absolute_path = os.path.join(
+                dir_path, "schema", "delta", relative_path,
+            )
+            root_name, ext = os.path.splitext(file_name)
+            if ext == ".py":
+                # This is a python upgrade module. We need to import into some
+                # package and then execute its `run_upgrade` function.
+                module_name = "synapse.storage.v%d_%s" % (
+                    v, root_name
+                )
+                with open(absolute_path) as python_file:
+                    module = imp.load_source(
+                        module_name, absolute_path, python_file
                     )
-                sql_script = read_schema("delta/v%d" % (v))
-                c.executescript(sql_script)
+                logger.debug("Running script %s", relative_path)
+                module.run_upgrade(cur)
+            elif ext == ".sql":
+                # A plain old .sql file, just read and execute it
+                delta_schema = read_schema(absolute_path)
+                logger.debug("Applying schema %s", relative_path)
+                cur.executescript(delta_schema)
+            else:
+                # Not a valid delta file.
+                logger.warn(
+                    "Found directory entry that did not end in .py or"
+                    " .sql: %s",
+                    relative_path,
+                )
+                continue
+
+            # Mark as done.
+            cur.execute(
+                "INSERT INTO applied_schema_deltas (version, file)"
+                " VALUES (?,?)",
+                (v, relative_path)
+            )
+
+            cur.execute(
+                "INSERT OR REPLACE INTO schema_version (version, upgraded)"
+                " VALUES (?,?)",
+                (v, True)
+            )
 
-            db_conn.commit()
-        else:
-            logger.info("Database is at version %r", user_version)
-
-    else:
-        sql_script = "BEGIN TRANSACTION;\n"
-        for sql_loc in SCHEMAS:
-            logger.debug("Applying schema %r", sql_loc)
-            sql_script += read_schema(sql_loc)
-            sql_script += "\n"
-        sql_script += "COMMIT TRANSACTION;"
-        c.executescript(sql_script)
-        db_conn.commit()
-        c.execute("PRAGMA user_version = %d" % SCHEMA_VERSION)
 
-    c.close()
+def _get_or_create_schema_state(txn):
+    schema_path = os.path.join(
+        dir_path, "schema", "schema_version.sql",
+    )
+    create_schema = read_schema(schema_path)
+    txn.executescript(create_schema)
+
+    txn.execute("SELECT version, upgraded FROM schema_version")
+    row = txn.fetchone()
+    current_version = int(row[0]) if row else None
+    upgraded = bool(row[1]) if row else None
+
+    if current_version:
+        txn.execute(
+            "SELECT file FROM applied_schema_deltas WHERE version >= ?",
+            (current_version,)
+        )
+        return current_version, txn.fetchall(), upgraded
+
+    return None
+
+
+def prepare_sqlite3_database(db_conn):
+    """This function should be called before `prepare_database` on sqlite3
+    databases.
+
+    Since we changed the way we store the current schema version and handle
+    updates to schemas, we need a way to upgrade from the old method to the
+    new. This only affects sqlite databases since they were the only ones
+    supported at the time.
+    """
+    with db_conn:
+        schema_path = os.path.join(
+            dir_path, "schema", "schema_version.sql",
+        )
+        create_schema = read_schema(schema_path)
+        db_conn.executescript(create_schema)
+
+        c = db_conn.execute("SELECT * FROM schema_version")
+        rows = c.fetchall()
+        c.close()
+
+        if not rows:
+            c = db_conn.execute("PRAGMA user_version")
+            row = c.fetchone()
+            c.close()
+
+            if row and row[0]:
+                db_conn.execute(
+                    "INSERT OR REPLACE INTO schema_version (version, upgraded)"
+                    " VALUES (?,?)",
+                    (row[0], False)
+                )
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index be9934c66f..3725c9795d 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -23,7 +23,7 @@ from synapse.util.lrucache import LruCache
 
 from twisted.internet import defer
 
-import collections
+from collections import namedtuple, OrderedDict
 import simplejson as json
 import sys
 import time
@@ -35,6 +35,52 @@ sql_logger = logging.getLogger("synapse.storage.SQL")
 transaction_logger = logging.getLogger("synapse.storage.txn")
 
 
+# TODO(paul):
+#  * more generic key management
+#  * export monitoring stats
+#  * consider other eviction strategies - LRU?
+def cached(max_entries=1000):
+    """ A method decorator that applies a memoizing cache around the function.
+
+    The function is presumed to take one additional argument, which is used as
+    the key for the cache. Cache hits are served directly from the cache;
+    misses use the function body to generate the value.
+
+    The wrapped function has an additional member, a callable called
+    "invalidate". This can be used to remove individual entries from the cache.
+
+    The wrapped function has another additional callable, called "prefill",
+    which can be used to insert values into the cache specifically, without
+    calling the calculation function.
+    """
+    def wrap(orig):
+        cache = OrderedDict()
+
+        def prefill(key, value):
+            while len(cache) > max_entries:
+                cache.popitem(last=False)
+
+            cache[key] = value
+
+        @defer.inlineCallbacks
+        def wrapped(self, key):
+            if key in cache:
+                defer.returnValue(cache[key])
+
+            ret = yield orig(self, key)
+            prefill(key, ret)
+            defer.returnValue(ret)
+
+        def invalidate(key):
+            cache.pop(key, None)
+
+        wrapped.invalidate = invalidate
+        wrapped.prefill = prefill
+        return wrapped
+
+    return wrap
+
+
 class LoggingTransaction(object):
     """An object that almost-transparently proxies for the 'txn' object
     passed to the constructor. Adds logging to the .execute() method."""
@@ -404,7 +450,8 @@ class SQLBaseStore(object):
 
         Args:
             table : string giving the table name
-            keyvalues : dict of column names and values to select the rows with
+            keyvalues : dict of column names and values to select the rows with,
+            or None to not apply a WHERE clause.
             retcols : list of strings giving the names of the columns to return
         """
         return self.runInteraction(
@@ -423,13 +470,20 @@ class SQLBaseStore(object):
             keyvalues : dict of column names and values to select the rows with
             retcols : list of strings giving the names of the columns to return
         """
-        sql = "SELECT %s FROM %s WHERE %s ORDER BY rowid asc" % (
-            ", ".join(retcols),
-            table,
-            " AND ".join("%s = ?" % (k, ) for k in keyvalues)
-        )
+        if keyvalues:
+            sql = "SELECT %s FROM %s WHERE %s ORDER BY rowid asc" % (
+                ", ".join(retcols),
+                table,
+                " AND ".join("%s = ?" % (k, ) for k in keyvalues)
+            )
+            txn.execute(sql, keyvalues.values())
+        else:
+            sql = "SELECT %s FROM %s ORDER BY rowid asc" % (
+                ", ".join(retcols),
+                table
+            )
+            txn.execute(sql)
 
-        txn.execute(sql, keyvalues.values())
         return self.cursor_to_dict(txn)
 
     def _simple_update_one(self, table, keyvalues, updatevalues,
@@ -586,8 +640,9 @@ class SQLBaseStore(object):
         start_time = time.time() * 1000
         update_counter = self._get_event_counters.update
 
+        cache = self._get_event_cache.setdefault(event_id, {})
+
         try:
-            cache = self._get_event_cache.setdefault(event_id, {})
             # Separate cache entries for each way to invoke _get_event_txn
             return cache[(check_redacted, get_prev_content, allow_rejected)]
         except KeyError:
@@ -786,7 +841,7 @@ class JoinHelper(object):
         for table in self.tables:
             res += [f for f in table.fields if f not in res]
 
-        self.EntryType = collections.namedtuple("JoinHelperEntry", res)
+        self.EntryType = namedtuple("JoinHelperEntry", res)
 
     def get_fields(self, **prefixes):
         """Get a string representing a list of fields for use in SELECT
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index d941b1f387..e30265750a 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -13,34 +13,32 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+import simplejson
+from simplejson import JSONDecodeError
 from twisted.internet import defer
 
+from synapse.api.constants import Membership
 from synapse.api.errors import StoreError
 from synapse.appservice import ApplicationService
+from synapse.storage.roommember import RoomsForUser
 from ._base import SQLBaseStore
 
 
 logger = logging.getLogger(__name__)
 
 
-class ApplicationServiceCache(object):
-    """Caches ApplicationServices and provides utility functions on top.
-
-    This class is designed to be invoked on incoming events in order to avoid
-    hammering the database every time to extract a list of application service
-    regexes.
-    """
-
-    def __init__(self):
-        self.services = []
+def log_failure(failure):
+    logger.error("Failed to detect application services: %s", failure.value)
+    logger.error(failure.getTraceback())
 
 
 class ApplicationServiceStore(SQLBaseStore):
 
     def __init__(self, hs):
         super(ApplicationServiceStore, self).__init__(hs)
-        self.cache = ApplicationServiceCache()
+        self.services_cache = []
         self.cache_defer = self._populate_cache()
+        self.cache_defer.addErrback(log_failure)
 
     @defer.inlineCallbacks
     def unregister_app_service(self, token):
@@ -56,7 +54,7 @@ class ApplicationServiceStore(SQLBaseStore):
             token,
         )
         # update cache TODO: Should this be in the txn?
-        for service in self.cache.services:
+        for service in self.services_cache:
             if service.token == token:
                 service.url = None
                 service.namespaces = None
@@ -110,13 +108,13 @@ class ApplicationServiceStore(SQLBaseStore):
         )
 
         # update cache TODO: Should this be in the txn?
-        for (index, cache_service) in enumerate(self.cache.services):
+        for (index, cache_service) in enumerate(self.services_cache):
             if service.token == cache_service.token:
-                self.cache.services[index] = service
+                self.services_cache[index] = service
                 logger.info("Updated: %s", service)
                 return
         # new entry
-        self.cache.services.append(service)
+        self.services_cache.append(service)
         logger.info("Updated(new): %s", service)
 
     def _update_app_service_txn(self, txn, service):
@@ -140,11 +138,11 @@ class ApplicationServiceStore(SQLBaseStore):
         )
         for (ns_int, ns_str) in enumerate(ApplicationService.NS_LIST):
             if ns_str in service.namespaces:
-                for regex in service.namespaces[ns_str]:
+                for regex_obj in service.namespaces[ns_str]:
                     txn.execute(
                         "INSERT INTO application_services_regex("
                         "as_id, namespace, regex) values(?,?,?)",
-                        (as_id, ns_int, regex)
+                        (as_id, ns_int, simplejson.dumps(regex_obj))
                     )
         return True
 
@@ -160,11 +158,34 @@ class ApplicationServiceStore(SQLBaseStore):
     @defer.inlineCallbacks
     def get_app_services(self):
         yield self.cache_defer  # make sure the cache is ready
-        defer.returnValue(self.cache.services)
+        defer.returnValue(self.services_cache)
+
+    @defer.inlineCallbacks
+    def get_app_service_by_user_id(self, user_id):
+        """Retrieve an application service from their user ID.
+
+        All application services have associated with them a particular user ID.
+        There is no distinguishing feature on the user ID which indicates it
+        represents an application service. This function allows you to map from
+        a user ID to an application service.
+
+        Args:
+            user_id(str): The user ID to see if it is an application service.
+        Returns:
+            synapse.appservice.ApplicationService or None.
+        """
+
+        yield self.cache_defer  # make sure the cache is ready
+
+        for service in self.services_cache:
+            if service.sender == user_id:
+                defer.returnValue(service)
+                return
+        defer.returnValue(None)
 
     @defer.inlineCallbacks
     def get_app_service_by_token(self, token, from_cache=True):
-        """Get the application service with the given token.
+        """Get the application service with the given appservice token.
 
         Args:
             token (str): The application service token.
@@ -176,7 +197,7 @@ class ApplicationServiceStore(SQLBaseStore):
         yield self.cache_defer  # make sure the cache is ready
 
         if from_cache:
-            for service in self.cache.services:
+            for service in self.services_cache:
                 if service.token == token:
                     defer.returnValue(service)
                     return
@@ -185,6 +206,77 @@ class ApplicationServiceStore(SQLBaseStore):
         # TODO: The from_cache=False impl
         # TODO: This should be JOINed with the application_services_regex table.
 
+    def get_app_service_rooms(self, service):
+        """Get a list of RoomsForUser for this application service.
+
+        Application services may be "interested" in lots of rooms depending on
+        the room ID, the room aliases, or the members in the room. This function
+        takes all of these into account and returns a list of RoomsForUser which
+        represent the entire list of room IDs that this application service
+        wants to know about.
+
+        Args:
+            service: The application service to get a room list for.
+        Returns:
+            A list of RoomsForUser.
+        """
+        return self.runInteraction(
+            "get_app_service_rooms",
+            self._get_app_service_rooms_txn,
+            service,
+        )
+
+    def _get_app_service_rooms_txn(self, txn, service):
+        # get all rooms matching the room ID regex.
+        room_entries = self._simple_select_list_txn(
+            txn=txn, table="rooms", keyvalues=None, retcols=["room_id"]
+        )
+        matching_room_list = set([
+            r["room_id"] for r in room_entries if
+            service.is_interested_in_room(r["room_id"])
+        ])
+
+        # resolve room IDs for matching room alias regex.
+        room_alias_mappings = self._simple_select_list_txn(
+            txn=txn, table="room_aliases", keyvalues=None,
+            retcols=["room_id", "room_alias"]
+        )
+        matching_room_list |= set([
+            r["room_id"] for r in room_alias_mappings if
+            service.is_interested_in_alias(r["room_alias"])
+        ])
+
+        # get all rooms for every user for this AS. This is scoped to users on
+        # this HS only.
+        user_list = self._simple_select_list_txn(
+            txn=txn, table="users", keyvalues=None, retcols=["name"]
+        )
+        user_list = [
+            u["name"] for u in user_list if
+            service.is_interested_in_user(u["name"])
+        ]
+        rooms_for_user_matching_user_id = set()  # RoomsForUser list
+        for user_id in user_list:
+            # FIXME: This assumes this store is linked with RoomMemberStore :(
+            rooms_for_user = self._get_rooms_for_user_where_membership_is_txn(
+                txn=txn,
+                user_id=user_id,
+                membership_list=[Membership.JOIN]
+            )
+            rooms_for_user_matching_user_id |= set(rooms_for_user)
+
+        # make RoomsForUser tuples for room ids and aliases which are not in the
+        # main rooms_for_user_list - e.g. they are rooms which do not have AS
+        # registered users in it.
+        known_room_ids = [r.room_id for r in rooms_for_user_matching_user_id]
+        missing_rooms_for_user = [
+            RoomsForUser(r, service.sender, "join") for r in
+            matching_room_list if r not in known_room_ids
+        ]
+        rooms_for_user_matching_user_id |= set(missing_rooms_for_user)
+
+        return rooms_for_user_matching_user_id
+
     @defer.inlineCallbacks
     def _populate_cache(self):
         """Populates the ApplicationServiceCache from the database."""
@@ -227,15 +319,17 @@ class ApplicationServiceStore(SQLBaseStore):
             try:
                 services[as_token]["namespaces"][
                     ApplicationService.NS_LIST[ns_int]].append(
-                    res["regex"]
+                    simplejson.loads(res["regex"])
                 )
             except IndexError:
                 logger.error("Bad namespace enum '%s'. %s", ns_int, res)
+            except JSONDecodeError:
+                logger.error("Bad regex object '%s'", res["regex"])
 
         # TODO get last successful txn id f.e. service
         for service in services.values():
             logger.info("Found application service: %s", service)
-            self.cache.services.append(ApplicationService(
+            self.services_cache.append(ApplicationService(
                 token=service["token"],
                 url=service["url"],
                 namespaces=service["namespaces"],
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
index 3fbc090224..2deda8ac50 100644
--- a/synapse/storage/event_federation.py
+++ b/synapse/storage/event_federation.py
@@ -64,6 +64,9 @@ class EventFederationStore(SQLBaseStore):
             for f in front:
                 txn.execute(base_sql, (f,))
                 new_front.update([r[0] for r in txn.fetchall()])
+
+            new_front -= results
+
             front = new_front
             results.update(front)
 
@@ -378,3 +381,51 @@ class EventFederationStore(SQLBaseStore):
             event_results += new_front
 
         return self._get_events_txn(txn, event_results)
+
+    def get_missing_events(self, room_id, earliest_events, latest_events,
+                           limit, min_depth):
+        return self.runInteraction(
+            "get_missing_events",
+            self._get_missing_events,
+            room_id, earliest_events, latest_events, limit, min_depth
+        )
+
+    def _get_missing_events(self, txn, room_id, earliest_events, latest_events,
+                            limit, min_depth):
+
+        earliest_events = set(earliest_events)
+        front = set(latest_events) - earliest_events
+
+        event_results = set()
+
+        query = (
+            "SELECT prev_event_id FROM event_edges "
+            "WHERE room_id = ? AND event_id = ? AND is_state = 0 "
+            "LIMIT ?"
+        )
+
+        while front and len(event_results) < limit:
+            new_front = set()
+            for event_id in front:
+                txn.execute(
+                    query,
+                    (room_id, event_id, limit - len(event_results))
+                )
+
+                for e_id, in txn.fetchall():
+                    new_front.add(e_id)
+
+            new_front -= earliest_events
+            new_front -= event_results
+
+            front = new_front
+            event_results |= new_front
+
+        events = self._get_events_txn(txn, event_results)
+
+        events = sorted(
+            [ev for ev in events if ev.depth >= min_depth],
+            key=lambda e: e.depth,
+        )
+
+        return events[:limit]
diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py
index ae46b39cc1..bbf322cc84 100644
--- a/synapse/storage/push_rule.py
+++ b/synapse/storage/push_rule.py
@@ -27,7 +27,7 @@ logger = logging.getLogger(__name__)
 
 class PushRuleStore(SQLBaseStore):
     @defer.inlineCallbacks
-    def get_push_rules_for_user_name(self, user_name):
+    def get_push_rules_for_user(self, user_name):
         sql = (
             "SELECT "+",".join(PushRuleTable.fields)+" "
             "FROM "+PushRuleTable.table_name+" "
@@ -46,6 +46,28 @@ class PushRuleStore(SQLBaseStore):
         defer.returnValue(dicts)
 
     @defer.inlineCallbacks
+    def get_push_rules_enabled_for_user(self, user_name):
+        results = yield self._simple_select_list(
+            PushRuleEnableTable.table_name,
+            {'user_name': user_name},
+            PushRuleEnableTable.fields
+        )
+        defer.returnValue(
+            {r['rule_id']: False if r['enabled'] == 0 else True for r in results}
+        )
+
+    @defer.inlineCallbacks
+    def get_push_rule_enabled_by_user_rule_id(self, user_name, rule_id):
+        results = yield self._simple_select_list(
+            PushRuleEnableTable.table_name,
+            {'user_name': user_name, 'rule_id': rule_id},
+            ['enabled']
+        )
+        if not results:
+            defer.returnValue(True)
+        defer.returnValue(results[0])
+
+    @defer.inlineCallbacks
     def add_push_rule(self, before, after, **kwargs):
         vals = copy.copy(kwargs)
         if 'conditions' in vals:
@@ -193,6 +215,20 @@ class PushRuleStore(SQLBaseStore):
             {'user_name': user_name, 'rule_id': rule_id}
         )
 
+    @defer.inlineCallbacks
+    def set_push_rule_enabled(self, user_name, rule_id, enabled):
+        if enabled:
+            yield self._simple_delete_one(
+                PushRuleEnableTable.table_name,
+                {'user_name': user_name, 'rule_id': rule_id}
+            )
+        else:
+            yield self._simple_upsert(
+                PushRuleEnableTable.table_name,
+                {'user_name': user_name, 'rule_id': rule_id},
+                {'enabled': False}
+            )
+
 
 class RuleNotFoundException(Exception):
     pass
@@ -216,3 +252,13 @@ class PushRuleTable(Table):
     ]
 
     EntryType = collections.namedtuple("PushRuleEntry", fields)
+
+
+class PushRuleEnableTable(Table):
+    table_name = "push_rules_enable"
+
+    fields = [
+        "user_name",
+        "rule_id",
+        "enabled"
+    ]
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 9bf608bc90..65ffb4627f 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -17,7 +17,7 @@ from twisted.internet import defer
 
 from collections import namedtuple
 
-from ._base import SQLBaseStore
+from ._base import SQLBaseStore, cached
 
 from synapse.api.constants import Membership
 from synapse.types import UserID
@@ -35,11 +35,6 @@ RoomsForUser = namedtuple(
 
 class RoomMemberStore(SQLBaseStore):
 
-    def __init__(self, *args, **kw):
-        super(RoomMemberStore, self).__init__(*args, **kw)
-
-        self._user_rooms_cache = {}
-
     def _store_room_member_txn(self, txn, event):
         """Store a room member in the database.
         """
@@ -103,7 +98,7 @@ class RoomMemberStore(SQLBaseStore):
 
                 txn.execute(sql, (event.room_id, domain))
 
-        self.invalidate_rooms_for_user(target_user_id)
+        self.get_rooms_for_user.invalidate(target_user_id)
 
     @defer.inlineCallbacks
     def get_room_member(self, user_id, room_id):
@@ -185,6 +180,14 @@ class RoomMemberStore(SQLBaseStore):
         if not membership_list:
             return defer.succeed(None)
 
+        return self.runInteraction(
+            "get_rooms_for_user_where_membership_is",
+            self._get_rooms_for_user_where_membership_is_txn,
+            user_id, membership_list
+        )
+
+    def _get_rooms_for_user_where_membership_is_txn(self, txn, user_id,
+                                                    membership_list):
         where_clause = "user_id = ? AND (%s)" % (
             " OR ".join(["membership = ?" for _ in membership_list]),
         )
@@ -192,24 +195,18 @@ class RoomMemberStore(SQLBaseStore):
         args = [user_id]
         args.extend(membership_list)
 
-        def f(txn):
-            sql = (
-                "SELECT m.room_id, m.sender, m.membership"
-                " FROM room_memberships as m"
-                " INNER JOIN current_state_events as c"
-                " ON m.event_id = c.event_id"
-                " WHERE %s"
-            ) % (where_clause,)
-
-            txn.execute(sql, args)
-            return [
-                RoomsForUser(**r) for r in self.cursor_to_dict(txn)
-            ]
+        sql = (
+            "SELECT m.room_id, m.sender, m.membership"
+            " FROM room_memberships as m"
+            " INNER JOIN current_state_events as c"
+            " ON m.event_id = c.event_id"
+            " WHERE %s"
+        ) % (where_clause,)
 
-        return self.runInteraction(
-            "get_rooms_for_user_where_membership_is",
-            f
-        )
+        txn.execute(sql, args)
+        return [
+            RoomsForUser(**r) for r in self.cursor_to_dict(txn)
+        ]
 
     def get_joined_hosts_for_room(self, room_id):
         return self._simple_select_onecol(
@@ -247,33 +244,12 @@ class RoomMemberStore(SQLBaseStore):
         results = self._parse_events_txn(txn, rows)
         return results
 
-    # TODO(paul): Create a nice @cached decorator to do this
-    #    @cached
-    #    def get_foo(...)
-    #        ...
-    #    invalidate_foo = get_foo.invalidator
-
-    @defer.inlineCallbacks
+    @cached()
     def get_rooms_for_user(self, user_id):
-        # TODO(paul): put some performance counters in here so we can easily
-        #   track what impact this cache is having
-        if user_id in self._user_rooms_cache:
-            defer.returnValue(self._user_rooms_cache[user_id])
-
-        rooms = yield self.get_rooms_for_user_where_membership_is(
+        return self.get_rooms_for_user_where_membership_is(
             user_id, membership_list=[Membership.JOIN],
         )
 
-        # TODO(paul): Consider applying a maximum size; just evict things at
-        #   random, or consider LRU?
-
-        self._user_rooms_cache[user_id] = rooms
-        defer.returnValue(rooms)
-
-    def invalidate_rooms_for_user(self, user_id):
-        if user_id in self._user_rooms_cache:
-            del self._user_rooms_cache[user_id]
-
     @defer.inlineCallbacks
     def user_rooms_intersect(self, user_id_list):
         """ Checks whether all the users whose IDs are given in a list share a
diff --git a/synapse/storage/schema/application_services.sql b/synapse/storage/schema/application_services.sql
deleted file mode 100644
index e491ad5aec..0000000000
--- a/synapse/storage/schema/application_services.sql
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright 2015 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-CREATE TABLE IF NOT EXISTS application_services(
-    id INTEGER PRIMARY KEY AUTOINCREMENT,
-    url TEXT,
-    token TEXT,
-    hs_token TEXT,
-    sender TEXT,
-    UNIQUE(token) ON CONFLICT ROLLBACK
-);
-
-CREATE TABLE IF NOT EXISTS application_services_regex(
-    id INTEGER PRIMARY KEY AUTOINCREMENT,
-    as_id INTEGER NOT NULL,
-    namespace INTEGER,  /* enum[room_id|room_alias|user_id] */
-    regex TEXT,
-    FOREIGN KEY(as_id) REFERENCES application_services(id)
-);
-
-
-
diff --git a/synapse/storage/schema/delta/v11.sql b/synapse/storage/schema/delta/11/v11.sql
index 313592221b..313592221b 100644
--- a/synapse/storage/schema/delta/v11.sql
+++ b/synapse/storage/schema/delta/11/v11.sql
diff --git a/synapse/storage/schema/delta/v12.sql b/synapse/storage/schema/delta/12/v12.sql
index b87ef1fe79..b87ef1fe79 100644
--- a/synapse/storage/schema/delta/v12.sql
+++ b/synapse/storage/schema/delta/12/v12.sql
diff --git a/synapse/storage/schema/delta/v13.sql b/synapse/storage/schema/delta/13/v13.sql
index e491ad5aec..e491ad5aec 100644
--- a/synapse/storage/schema/delta/v13.sql
+++ b/synapse/storage/schema/delta/13/v13.sql
diff --git a/synapse/storage/schema/delta/14/upgrade_appservice_db.py b/synapse/storage/schema/delta/14/upgrade_appservice_db.py
new file mode 100644
index 0000000000..847b1c5b89
--- /dev/null
+++ b/synapse/storage/schema/delta/14/upgrade_appservice_db.py
@@ -0,0 +1,23 @@
+import json
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def run_upgrade(cur):
+    cur.execute("SELECT id, regex FROM application_services_regex")
+    for row in cur.fetchall():
+        try:
+            logger.debug("Checking %s..." % row[0])
+            json.loads(row[1])
+        except ValueError:
+            # row isn't in json, make it so.
+            string_regex = row[1]
+            new_regex = json.dumps({
+                "regex": string_regex,
+                "exclusive": True
+            })
+            cur.execute(
+                "UPDATE application_services_regex SET regex=? WHERE id=?",
+                (new_regex, row[0])
+            )
diff --git a/synapse/storage/schema/delta/14/v14.sql b/synapse/storage/schema/delta/14/v14.sql
new file mode 100644
index 0000000000..0212726448
--- /dev/null
+++ b/synapse/storage/schema/delta/14/v14.sql
@@ -0,0 +1,9 @@
+CREATE TABLE IF NOT EXISTS push_rules_enable (
+  id INTEGER PRIMARY KEY AUTOINCREMENT,
+  user_name TEXT NOT NULL,
+  rule_id TEXT NOT NULL,
+  enabled TINYINT,
+  UNIQUE(user_name, rule_id)
+);
+
+CREATE INDEX IF NOT EXISTS push_rules_enable_user_name on push_rules_enable (user_name);
diff --git a/synapse/storage/schema/delta/v2.sql b/synapse/storage/schema/delta/v2.sql
deleted file mode 100644
index f740f6dd5d..0000000000
--- a/synapse/storage/schema/delta/v2.sql
+++ /dev/null
@@ -1,168 +0,0 @@
-/* Copyright 2014, 2015 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-CREATE TABLE IF NOT EXISTS events(
-    stream_ordering INTEGER PRIMARY KEY AUTOINCREMENT,
-    topological_ordering INTEGER NOT NULL,
-    event_id TEXT NOT NULL,
-    type TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    content TEXT NOT NULL,
-    unrecognized_keys TEXT,
-    processed BOOL NOT NULL,
-    outlier BOOL NOT NULL,
-    CONSTRAINT ev_uniq UNIQUE (event_id)
-);
-
-CREATE INDEX IF NOT EXISTS events_event_id ON events (event_id);
-CREATE INDEX IF NOT EXISTS events_stream_ordering ON events (stream_ordering);
-CREATE INDEX IF NOT EXISTS events_topological_ordering ON events (topological_ordering);
-CREATE INDEX IF NOT EXISTS events_room_id ON events (room_id);
-
-CREATE TABLE IF NOT EXISTS state_events(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    type TEXT NOT NULL,
-    state_key TEXT NOT NULL,
-    prev_state TEXT
-);
-
-CREATE UNIQUE INDEX IF NOT EXISTS state_events_event_id ON state_events (event_id);
-CREATE INDEX IF NOT EXISTS state_events_room_id ON state_events (room_id);
-CREATE INDEX IF NOT EXISTS state_events_type ON state_events (type);
-CREATE INDEX IF NOT EXISTS state_events_state_key ON state_events (state_key);
-
-
-CREATE TABLE IF NOT EXISTS current_state_events(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    type TEXT NOT NULL,
-    state_key TEXT NOT NULL,
-    CONSTRAINT curr_uniq UNIQUE (room_id, type, state_key) ON CONFLICT REPLACE
-);
-
-CREATE INDEX IF NOT EXISTS curr_events_event_id ON current_state_events (event_id);
-CREATE INDEX IF NOT EXISTS current_state_events_room_id ON current_state_events (room_id);
-CREATE INDEX IF NOT EXISTS current_state_events_type ON current_state_events (type);
-CREATE INDEX IF NOT EXISTS current_state_events_state_key ON current_state_events (state_key);
-
-CREATE TABLE IF NOT EXISTS room_memberships(
-    event_id TEXT NOT NULL,
-    user_id TEXT NOT NULL,
-    sender TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    membership TEXT NOT NULL
-);
-
-CREATE INDEX IF NOT EXISTS room_memberships_event_id ON room_memberships (event_id);
-CREATE INDEX IF NOT EXISTS room_memberships_room_id ON room_memberships (room_id);
-CREATE INDEX IF NOT EXISTS room_memberships_user_id ON room_memberships (user_id);
-
-CREATE TABLE IF NOT EXISTS feedback(
-    event_id TEXT NOT NULL,
-    feedback_type TEXT,
-    target_event_id TEXT,
-    sender TEXT,
-    room_id TEXT
-);
-
-CREATE TABLE IF NOT EXISTS topics(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    topic TEXT NOT NULL
-);
-
-CREATE TABLE IF NOT EXISTS room_names(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    name TEXT NOT NULL
-);
-
-CREATE TABLE IF NOT EXISTS rooms(
-    room_id TEXT PRIMARY KEY NOT NULL,
-    is_public INTEGER,
-    creator TEXT
-);
-
-CREATE TABLE IF NOT EXISTS room_join_rules(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    join_rule TEXT NOT NULL
-);
-CREATE INDEX IF NOT EXISTS room_join_rules_event_id ON room_join_rules(event_id);
-CREATE INDEX IF NOT EXISTS room_join_rules_room_id ON room_join_rules(room_id);
-
-
-CREATE TABLE IF NOT EXISTS room_power_levels(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    user_id TEXT NOT NULL,
-    level INTEGER NOT NULL
-);
-CREATE INDEX IF NOT EXISTS room_power_levels_event_id ON room_power_levels(event_id);
-CREATE INDEX IF NOT EXISTS room_power_levels_room_id ON room_power_levels(room_id);
-CREATE INDEX IF NOT EXISTS room_power_levels_room_user ON room_power_levels(room_id, user_id);
-
-
-CREATE TABLE IF NOT EXISTS room_default_levels(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    level INTEGER NOT NULL
-);
-
-CREATE INDEX IF NOT EXISTS room_default_levels_event_id ON room_default_levels(event_id);
-CREATE INDEX IF NOT EXISTS room_default_levels_room_id ON room_default_levels(room_id);
-
-
-CREATE TABLE IF NOT EXISTS room_add_state_levels(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    level INTEGER NOT NULL
-);
-
-CREATE INDEX IF NOT EXISTS room_add_state_levels_event_id ON room_add_state_levels(event_id);
-CREATE INDEX IF NOT EXISTS room_add_state_levels_room_id ON room_add_state_levels(room_id);
-
-
-CREATE TABLE IF NOT EXISTS room_send_event_levels(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    level INTEGER NOT NULL
-);
-
-CREATE INDEX IF NOT EXISTS room_send_event_levels_event_id ON room_send_event_levels(event_id);
-CREATE INDEX IF NOT EXISTS room_send_event_levels_room_id ON room_send_event_levels(room_id);
-
-
-CREATE TABLE IF NOT EXISTS room_ops_levels(
-    event_id TEXT NOT NULL,
-    room_id TEXT NOT NULL,
-    ban_level INTEGER,
-    kick_level INTEGER
-);
-
-CREATE INDEX IF NOT EXISTS room_ops_levels_event_id ON room_ops_levels(event_id);
-CREATE INDEX IF NOT EXISTS room_ops_levels_room_id ON room_ops_levels(room_id);
-
-
-CREATE TABLE IF NOT EXISTS room_hosts(
-    room_id TEXT NOT NULL,
-    host TEXT NOT NULL,
-    CONSTRAINT room_hosts_uniq UNIQUE (room_id, host) ON CONFLICT IGNORE
-);
-
-CREATE INDEX IF NOT EXISTS room_hosts_room_id ON room_hosts (room_id);
-
-PRAGMA user_version = 2;
diff --git a/synapse/storage/schema/delta/v3.sql b/synapse/storage/schema/delta/v3.sql
deleted file mode 100644
index c67e38ff52..0000000000
--- a/synapse/storage/schema/delta/v3.sql
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright 2014, 2015 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-CREATE INDEX IF NOT EXISTS room_aliases_alias ON room_aliases(room_alias);
-CREATE INDEX IF NOT EXISTS room_aliases_id ON room_aliases(room_id);
-
-
-CREATE INDEX IF NOT EXISTS room_alias_servers_alias ON room_alias_servers(room_alias);
-
-DELETE FROM room_aliases WHERE rowid NOT IN (SELECT max(rowid) FROM room_aliases GROUP BY room_alias, room_id);
-
-CREATE UNIQUE INDEX IF NOT EXISTS room_aliases_uniq ON room_aliases(room_alias, room_id);
-
-PRAGMA user_version = 3;
diff --git a/synapse/storage/schema/delta/v4.sql b/synapse/storage/schema/delta/v4.sql
deleted file mode 100644
index d3807b7686..0000000000
--- a/synapse/storage/schema/delta/v4.sql
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright 2014, 2015 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-CREATE TABLE IF NOT EXISTS redactions (
-    event_id TEXT NOT NULL,
-    redacts TEXT NOT NULL,
-    CONSTRAINT ev_uniq UNIQUE (event_id)
-);
-
-CREATE INDEX IF NOT EXISTS redactions_event_id ON redactions (event_id);
-CREATE INDEX IF NOT EXISTS redactions_redacts ON redactions (redacts);
-
-ALTER TABLE room_ops_levels ADD COLUMN redact_level INTEGER;
-
-PRAGMA user_version = 4;
diff --git a/synapse/storage/schema/delta/v5.sql b/synapse/storage/schema/delta/v5.sql
deleted file mode 100644
index 0874a15431..0000000000
--- a/synapse/storage/schema/delta/v5.sql
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright 2014, 2015 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-CREATE TABLE IF NOT EXISTS user_ips (
-    user TEXT NOT NULL,
-    access_token TEXT NOT NULL,
-    device_id TEXT,
-    ip TEXT NOT NULL,
-    user_agent TEXT NOT NULL,
-    last_seen INTEGER NOT NULL,
-    CONSTRAINT user_ip UNIQUE (user, access_token, ip, user_agent) ON CONFLICT REPLACE
-);
-
-CREATE INDEX IF NOT EXISTS user_ips_user ON user_ips(user);
-
-ALTER TABLE users ADD COLUMN admin BOOL DEFAULT 0 NOT NULL;
-
-PRAGMA user_version = 5;
diff --git a/synapse/storage/schema/delta/v6.sql b/synapse/storage/schema/delta/v6.sql
deleted file mode 100644
index a9e0a4fe0d..0000000000
--- a/synapse/storage/schema/delta/v6.sql
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright 2014, 2015 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-CREATE TABLE IF NOT EXISTS server_tls_certificates(
-  server_name TEXT, -- Server name.
-  fingerprint TEXT, -- Certificate fingerprint.
-  from_server TEXT, -- Which key server the certificate was fetched from.
-  ts_added_ms INTEGER, -- When the certifcate was added.
-  tls_certificate BLOB, -- DER encoded x509 certificate.
-  CONSTRAINT uniqueness UNIQUE (server_name, fingerprint)
-);
-
-CREATE TABLE IF NOT EXISTS server_signature_keys(
-  server_name TEXT, -- Server name.
-  key_id TEXT, -- Key version.
-  from_server TEXT, -- Which key server the key was fetched form.
-  ts_added_ms INTEGER, -- When the key was added.
-  verify_key BLOB, -- NACL verification key.
-  CONSTRAINT uniqueness UNIQUE (server_name, key_id)
-);
diff --git a/synapse/storage/schema/delta/v8.sql b/synapse/storage/schema/delta/v8.sql
deleted file mode 100644
index 1e9f8b18cb..0000000000
--- a/synapse/storage/schema/delta/v8.sql
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright 2014, 2015 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- CREATE TABLE IF NOT EXISTS event_signatures_2 (
-    event_id TEXT,
-    signature_name TEXT,
-    key_id TEXT,
-    signature BLOB,
-    CONSTRAINT uniqueness UNIQUE (event_id, signature_name, key_id)
-);
-
-INSERT INTO event_signatures_2 (event_id, signature_name, key_id, signature)
-SELECT event_id, signature_name, key_id, signature FROM event_signatures;
-
-DROP TABLE event_signatures;
-ALTER TABLE event_signatures_2 RENAME TO event_signatures;
-
-CREATE INDEX IF NOT EXISTS event_signatures_id ON event_signatures (
-    event_id
-);
-
-PRAGMA user_version = 8;
\ No newline at end of file
diff --git a/synapse/storage/schema/delta/v9.sql b/synapse/storage/schema/delta/v9.sql
deleted file mode 100644
index 455d51a70c..0000000000
--- a/synapse/storage/schema/delta/v9.sql
+++ /dev/null
@@ -1,79 +0,0 @@
-/* Copyright 2014, 2015 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
--- To track destination health
-CREATE TABLE IF NOT EXISTS destinations(
-    destination TEXT PRIMARY KEY,
-    retry_last_ts INTEGER,
-    retry_interval INTEGER
-);
-
-
-CREATE TABLE IF NOT EXISTS local_media_repository (
-    media_id TEXT, -- The id used to refer to the media.
-    media_type TEXT, -- The MIME-type of the media.
-    media_length INTEGER, -- Length of the media in bytes.
-    created_ts INTEGER, -- When the content was uploaded in ms.
-    upload_name TEXT, -- The name the media was uploaded with.
-    user_id TEXT, -- The user who uploaded the file.
-    CONSTRAINT uniqueness UNIQUE (media_id)
-);
-
-CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails (
-    media_id TEXT, -- The id used to refer to the media.
-    thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
-    thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
-    thumbnail_type TEXT, -- The MIME-type of the thumbnail.
-    thumbnail_method TEXT, -- The method used to make the thumbnail.
-    thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
-    CONSTRAINT uniqueness UNIQUE (
-        media_id, thumbnail_width, thumbnail_height, thumbnail_type
-    )
-);
-
-CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id
-    ON local_media_repository_thumbnails (media_id);
-
-CREATE TABLE IF NOT EXISTS remote_media_cache (
-    media_origin TEXT, -- The remote HS the media came from.
-    media_id TEXT, -- The id used to refer to the media on that server.
-    media_type TEXT, -- The MIME-type of the media.
-    created_ts INTEGER, -- When the content was uploaded in ms.
-    upload_name TEXT, -- The name the media was uploaded with.
-    media_length INTEGER, -- Length of the media in bytes.
-    filesystem_id TEXT, -- The name used to store the media on disk.
-    CONSTRAINT uniqueness UNIQUE (media_origin, media_id)
-);
-
-CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails (
-    media_origin TEXT, -- The remote HS the media came from.
-    media_id TEXT, -- The id used to refer to the media.
-    thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
-    thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
-    thumbnail_method TEXT, -- The method used to make the thumbnail
-    thumbnail_type TEXT, -- The MIME-type of the thumbnail.
-    thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
-    filesystem_id TEXT, -- The name used to store the media on disk.
-    CONSTRAINT uniqueness UNIQUE (
-        media_origin, media_id, thumbnail_width, thumbnail_height,
-        thumbnail_type, thumbnail_type
-    )
-);
-
-CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id
-    ON local_media_repository_thumbnails (media_id);
-
-
-PRAGMA user_version = 9;
diff --git a/synapse/storage/schema/filtering.sql b/synapse/storage/schema/filtering.sql
deleted file mode 100644
index beb39ca201..0000000000
--- a/synapse/storage/schema/filtering.sql
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright 2015 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-CREATE TABLE IF NOT EXISTS user_filters(
-  user_id TEXT,
-  filter_id INTEGER,
-  filter_json TEXT,
-  FOREIGN KEY(user_id) REFERENCES users(id)
-);
-
-CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters(
-  user_id, filter_id
-);
diff --git a/synapse/storage/schema/event_edges.sql b/synapse/storage/schema/full_schemas/11/event_edges.sql
index 1e766d6db2..1e766d6db2 100644
--- a/synapse/storage/schema/event_edges.sql
+++ b/synapse/storage/schema/full_schemas/11/event_edges.sql
diff --git a/synapse/storage/schema/event_signatures.sql b/synapse/storage/schema/full_schemas/11/event_signatures.sql
index c28c39c48a..c28c39c48a 100644
--- a/synapse/storage/schema/event_signatures.sql
+++ b/synapse/storage/schema/full_schemas/11/event_signatures.sql
diff --git a/synapse/storage/schema/im.sql b/synapse/storage/schema/full_schemas/11/im.sql
index dd00c1cd2f..dd00c1cd2f 100644
--- a/synapse/storage/schema/im.sql
+++ b/synapse/storage/schema/full_schemas/11/im.sql
diff --git a/synapse/storage/schema/keys.sql b/synapse/storage/schema/full_schemas/11/keys.sql
index a9e0a4fe0d..a9e0a4fe0d 100644
--- a/synapse/storage/schema/keys.sql
+++ b/synapse/storage/schema/full_schemas/11/keys.sql
diff --git a/synapse/storage/schema/media_repository.sql b/synapse/storage/schema/full_schemas/11/media_repository.sql
index afdf48cbfb..afdf48cbfb 100644
--- a/synapse/storage/schema/media_repository.sql
+++ b/synapse/storage/schema/full_schemas/11/media_repository.sql
diff --git a/synapse/storage/schema/presence.sql b/synapse/storage/schema/full_schemas/11/presence.sql
index f9f8db9697..f9f8db9697 100644
--- a/synapse/storage/schema/presence.sql
+++ b/synapse/storage/schema/full_schemas/11/presence.sql
diff --git a/synapse/storage/schema/profiles.sql b/synapse/storage/schema/full_schemas/11/profiles.sql
index f06a528b4d..f06a528b4d 100644
--- a/synapse/storage/schema/profiles.sql
+++ b/synapse/storage/schema/full_schemas/11/profiles.sql
diff --git a/synapse/storage/schema/redactions.sql b/synapse/storage/schema/full_schemas/11/redactions.sql
index 5011d95db8..5011d95db8 100644
--- a/synapse/storage/schema/redactions.sql
+++ b/synapse/storage/schema/full_schemas/11/redactions.sql
diff --git a/synapse/storage/schema/room_aliases.sql b/synapse/storage/schema/full_schemas/11/room_aliases.sql
index 0d2df01603..0d2df01603 100644
--- a/synapse/storage/schema/room_aliases.sql
+++ b/synapse/storage/schema/full_schemas/11/room_aliases.sql
diff --git a/synapse/storage/schema/state.sql b/synapse/storage/schema/full_schemas/11/state.sql
index 1fe8f1e430..1fe8f1e430 100644
--- a/synapse/storage/schema/state.sql
+++ b/synapse/storage/schema/full_schemas/11/state.sql
diff --git a/synapse/storage/schema/transactions.sql b/synapse/storage/schema/full_schemas/11/transactions.sql
index 2d30f99b06..2d30f99b06 100644
--- a/synapse/storage/schema/transactions.sql
+++ b/synapse/storage/schema/full_schemas/11/transactions.sql
diff --git a/synapse/storage/schema/users.sql b/synapse/storage/schema/full_schemas/11/users.sql
index 08ccfdac0a..08ccfdac0a 100644
--- a/synapse/storage/schema/users.sql
+++ b/synapse/storage/schema/full_schemas/11/users.sql
diff --git a/synapse/storage/schema/pusher.sql b/synapse/storage/schema/pusher.sql
deleted file mode 100644
index 3735b11547..0000000000
--- a/synapse/storage/schema/pusher.sql
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright 2014 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--- Push notification endpoints that users have configured
-CREATE TABLE IF NOT EXISTS pushers (
-  id INTEGER PRIMARY KEY AUTOINCREMENT,
-  user_name TEXT NOT NULL,
-  profile_tag varchar(32) NOT NULL,
-  kind varchar(8) NOT NULL,
-  app_id varchar(64) NOT NULL,
-  app_display_name varchar(64) NOT NULL,
-  device_display_name varchar(128) NOT NULL,
-  pushkey blob NOT NULL,
-  ts BIGINT NOT NULL,
-  lang varchar(8),
-  data blob,
-  last_token TEXT,
-  last_success BIGINT,
-  failing_since BIGINT,
-  FOREIGN KEY(user_name) REFERENCES users(name),
-  UNIQUE (app_id, pushkey)
-);
-
-CREATE TABLE IF NOT EXISTS push_rules (
-  id INTEGER PRIMARY KEY AUTOINCREMENT,
-  user_name TEXT NOT NULL,
-  rule_id TEXT NOT NULL,
-  priority_class TINYINT NOT NULL,
-  priority INTEGER NOT NULL DEFAULT 0,
-  conditions TEXT NOT NULL,
-  actions TEXT NOT NULL,
-  UNIQUE(user_name, rule_id)
-);
-
-CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name);
diff --git a/synapse/storage/schema/rejections.sql b/synapse/storage/schema/rejections.sql
deleted file mode 100644
index bd2a8b1bb5..0000000000
--- a/synapse/storage/schema/rejections.sql
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright 2015 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-CREATE TABLE IF NOT EXISTS rejections(
-    event_id TEXT NOT NULL,
-    reason TEXT NOT NULL,
-    last_check TEXT NOT NULL,
-    CONSTRAINT ev_id UNIQUE (event_id) ON CONFLICT REPLACE
-);
diff --git a/synapse/storage/schema/schema_version.sql b/synapse/storage/schema/schema_version.sql
new file mode 100644
index 0000000000..0431e2d051
--- /dev/null
+++ b/synapse/storage/schema/schema_version.sql
@@ -0,0 +1,30 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE IF NOT EXISTS schema_version(
+    Lock char(1) NOT NULL DEFAULT 'X',  -- Makes sure this table only has one row.
+    version INTEGER NOT NULL,
+    upgraded BOOL NOT NULL,  -- Whether we reached this version from an upgrade or an initial schema.
+    CONSTRAINT schema_version_lock_x CHECK (Lock='X')
+    CONSTRAINT schema_version_lock_uniq UNIQUE (Lock)
+);
+
+CREATE TABLE IF NOT EXISTS applied_schema_deltas(
+    version INTEGER NOT NULL,
+    file TEXT NOT NULL,
+    CONSTRAINT schema_deltas_ver_file UNIQUE (version, file) ON CONFLICT IGNORE
+);
+
+CREATE INDEX IF NOT EXISTS schema_deltas_ver ON applied_schema_deltas(version);
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 3ccb6f8a61..09bc522210 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -36,6 +36,7 @@ what sort order was used:
 from twisted.internet import defer
 
 from ._base import SQLBaseStore
+from synapse.api.constants import EventTypes
 from synapse.api.errors import SynapseError
 from synapse.util.logutils import log_function
 
@@ -127,6 +128,85 @@ class _StreamToken(namedtuple("_StreamToken", "topological stream")):
 
 
 class StreamStore(SQLBaseStore):
+
+    @defer.inlineCallbacks
+    def get_appservice_room_stream(self, service, from_key, to_key, limit=0):
+        # NB this lives here instead of appservice.py so we can reuse the
+        # 'private' StreamToken class in this file.
+        if limit:
+            limit = max(limit, MAX_STREAM_SIZE)
+        else:
+            limit = MAX_STREAM_SIZE
+
+        # From and to keys should be integers from ordering.
+        from_id = _StreamToken.parse_stream_token(from_key)
+        to_id = _StreamToken.parse_stream_token(to_key)
+
+        if from_key == to_key:
+            defer.returnValue(([], to_key))
+            return
+
+        # select all the events between from/to with a sensible limit
+        sql = (
+            "SELECT e.event_id, e.room_id, e.type, s.state_key, "
+            "e.stream_ordering FROM events AS e LEFT JOIN state_events as s ON "
+            "e.event_id = s.event_id "
+            "WHERE e.stream_ordering > ? AND e.stream_ordering <= ? "
+            "ORDER BY stream_ordering ASC LIMIT %(limit)d "
+        ) % {
+            "limit": limit
+        }
+
+        def f(txn):
+            # pull out all the events between the tokens
+            txn.execute(sql, (from_id.stream, to_id.stream,))
+            rows = self.cursor_to_dict(txn)
+
+            # Logic:
+            #  - We want ALL events which match the AS room_id regex
+            #  - We want ALL events which match the rooms represented by the AS
+            #    room_alias regex
+            #  - We want ALL events for rooms that AS users have joined.
+            # This is currently supported via get_app_service_rooms (which is
+            # used for the Notifier listener rooms). We can't reasonably make a
+            # SQL query for these room IDs, so we'll pull all the events between
+            # from/to and filter in python.
+            rooms_for_as = self._get_app_service_rooms_txn(txn, service)
+            room_ids_for_as = [r.room_id for r in rooms_for_as]
+
+            def app_service_interested(row):
+                if row["room_id"] in room_ids_for_as:
+                    return True
+
+                if row["type"] == EventTypes.Member:
+                    if service.is_interested_in_user(row.get("state_key")):
+                        return True
+                return False
+
+            ret = self._get_events_txn(
+                txn,
+                # apply the filter on the room id list
+                [
+                    r["event_id"] for r in rows
+                    if app_service_interested(r)
+                ],
+                get_prev_content=True
+            )
+
+            self._set_before_and_after(ret, rows)
+
+            if rows:
+                key = "s%d" % max(r["stream_ordering"] for r in rows)
+            else:
+                # Assume we didn't get anything because there was nothing to
+                # get.
+                key = to_key
+
+            return ret, key
+
+        results = yield self.runInteraction("get_appservice_room_stream", f)
+        defer.returnValue(results)
+
     @log_function
     def get_room_events_stream(self, user_id, from_key, to_key, room_id,
                                limit=0, with_feedback=False):
@@ -184,8 +264,7 @@ class StreamStore(SQLBaseStore):
             self._set_before_and_after(ret, rows)
 
             if rows:
-                key = "s%d" % max([r["stream_ordering"] for r in rows])
-
+                key = "s%d" % max(r["stream_ordering"] for r in rows)
             else:
                 # Assume we didn't get anything because there was nothing to
                 # get.
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index e06ef35690..0b8a3b7a07 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -13,12 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import SQLBaseStore, Table
+from ._base import SQLBaseStore, Table, cached
 
 from collections import namedtuple
 
-from twisted.internet import defer
-
 import logging
 
 logger = logging.getLogger(__name__)
@@ -28,10 +26,6 @@ class TransactionStore(SQLBaseStore):
     """A collection of queries for handling PDUs.
     """
 
-    # a write-through cache of DestinationsTable.EntryType indexed by
-    # destination string
-    destination_retry_cache = {}
-
     def get_received_txn_response(self, transaction_id, origin):
         """For an incoming transaction from a given origin, check if we have
         already responded to it. If so, return the response code and response
@@ -211,6 +205,7 @@ class TransactionStore(SQLBaseStore):
 
         return ReceivedTransactionsTable.decode_results(txn.fetchall())
 
+    @cached()
     def get_destination_retry_timings(self, destination):
         """Gets the current retry timings (if any) for a given destination.
 
@@ -221,9 +216,6 @@ class TransactionStore(SQLBaseStore):
             None if not retrying
             Otherwise a DestinationsTable.EntryType for the retry scheme
         """
-        if destination in self.destination_retry_cache:
-            return defer.succeed(self.destination_retry_cache[destination])
-
         return self.runInteraction(
             "get_destination_retry_timings",
             self._get_destination_retry_timings, destination)
@@ -250,7 +242,9 @@ class TransactionStore(SQLBaseStore):
             retry_interval (int) - how long until next retry in ms
         """
 
-        self.destination_retry_cache[destination] = (
+        # As this is the new value, we might as well prefill the cache
+        self.get_destination_retry_timings.prefill(
+            destination,
             DestinationsTable.EntryType(
                 destination,
                 retry_last_ts,