summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--CHANGES.rst6
-rw-r--r--scripts-dev/convert_server_keys.py5
-rwxr-xr-xscripts/register_new_matrix_user5
-rw-r--r--synapse/__init__.py2
-rwxr-xr-xsynapse/app/homeserver.py8
-rw-r--r--synapse/appservice/__init__.py6
-rw-r--r--synapse/crypto/keyring.py56
-rw-r--r--synapse/federation/federation_client.py6
-rw-r--r--synapse/federation/transaction_queue.py31
-rw-r--r--synapse/federation/transport/server.py8
-rw-r--r--synapse/handlers/appservice.py7
-rw-r--r--synapse/handlers/room.py4
-rw-r--r--synapse/notifier.py1
-rw-r--r--synapse/push/__init__.py27
-rw-r--r--synapse/storage/__init__.py2
-rw-r--r--synapse/storage/_base.py325
-rw-r--r--synapse/storage/engines/postgres.py2
-rw-r--r--synapse/storage/engines/sqlite3.py2
-rw-r--r--synapse/storage/event_federation.py39
-rw-r--r--synapse/storage/events.py455
-rw-r--r--synapse/storage/push_rule.py21
-rw-r--r--synapse/storage/room.py3
-rw-r--r--synapse/storage/roommember.py41
-rw-r--r--synapse/storage/schema/delta/19/event_index.sql19
-rw-r--r--synapse/storage/state.py53
-rw-r--r--synapse/storage/stream.py43
-rw-r--r--synapse/util/__init__.py36
-rw-r--r--synapse/util/lrucache.py8
-rw-r--r--tests/appservice/test_appservice.py15
-rw-r--r--tests/rest/client/v1/test_presence.py7
-rw-r--r--tests/storage/test_base.py3
31 files changed, 851 insertions, 395 deletions
diff --git a/CHANGES.rst b/CHANGES.rst
index 65970a89c7..e1420d7a35 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,3 +1,9 @@
+Changes in synapse v0.9.0-r5 (2015-05-21)
+=========================================
+
+* Add more database caches to reduce amount of work done for each pusher. This
+  radically reduces CPU usage when multiple pushers are set up in the same room.
+
 Changes in synapse v0.9.0 (2015-05-07)
 ======================================
 
diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py
index 024ddcdbd0..a1ee39059c 100644
--- a/scripts-dev/convert_server_keys.py
+++ b/scripts-dev/convert_server_keys.py
@@ -47,7 +47,10 @@ def convert_v1_to_v2(server_name, valid_until, keys, certificate):
     return {
         "old_verify_keys": {},
         "server_name": server_name,
-        "verify_keys": keys,
+        "verify_keys": {
+            key_id: {"key": key}
+            for key_id, key in keys.items()
+        },
         "valid_until_ts": valid_until,
         "tls_fingerprints": [fingerprint(certificate)],
     }
diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user
index 0ca83795a3..4a520bdb5d 100755
--- a/scripts/register_new_matrix_user
+++ b/scripts/register_new_matrix_user
@@ -33,9 +33,10 @@ def request_registration(user, password, server_location, shared_secret):
     ).hexdigest()
 
     data = {
-        "username": user,
+        "user": user,
         "password": password,
         "mac": mac,
+        "type": "org.matrix.login.shared_secret",
     }
 
     server_location = server_location.rstrip("/")
@@ -43,7 +44,7 @@ def request_registration(user, password, server_location, shared_secret):
     print "Sending registration request..."
 
     req = urllib2.Request(
-        "%s/_matrix/client/v2_alpha/register" % (server_location,),
+        "%s/_matrix/client/api/v1/register" % (server_location,),
         data=json.dumps(data),
         headers={'Content-Type': 'application/json'}
     )
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 041e2151b0..68f86138a4 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -16,4 +16,4 @@
 """ This is a reference implementation of a Matrix home server.
 """
 
-__version__ = "0.9.0-r4"
+__version__ = "0.9.0-r5"
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index dfb5314ff7..fa43211415 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -97,7 +97,13 @@ class SynapseHomeServer(HomeServer):
         import syweb
         syweb_path = os.path.dirname(syweb.__file__)
         webclient_path = os.path.join(syweb_path, "webclient")
-        return GzipFile(webclient_path)  # TODO configurable?
+        # GZip is disabled here due to
+        # https://twistedmatrix.com/trac/ticket/7678
+        # (It can stay enabled for the API resources: they call
+        # write() with the whole body and then finish() straight
+        # after and so do not trigger the bug.
+        # return GzipFile(webclient_path)  # TODO configurable?
+        return File(webclient_path)  # TODO configurable?
 
     def build_resource_for_static_content(self):
         # This is old and should go away: not going to bother adding gzip
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 63a18b802b..e3ca45de83 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -148,8 +148,8 @@ class ApplicationService(object):
                 and self.is_interested_in_user(event.state_key)):
             return True
         # check joined member events
-        for member in member_list:
-            if self.is_interested_in_user(member.state_key):
+        for user_id in member_list:
+            if self.is_interested_in_user(user_id):
                 return True
         return False
 
@@ -173,7 +173,7 @@ class ApplicationService(object):
             restrict_to(str): The namespace to restrict regex tests to.
             aliases_for_event(list): A list of all the known room aliases for
             this event.
-            member_list(list): A list of all joined room members in this room.
+            member_list(list): A list of all joined user_ids in this room.
         Returns:
             bool: True if this service would like to know about this event.
         """
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index a859872ce2..aff69c5f83 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -129,23 +129,24 @@ class Keyring(object):
     def _get_server_verify_key_impl(self, server_name, key_ids):
         keys = None
 
-        perspective_results = []
-        for perspective_name, perspective_keys in self.perspective_servers.items():
-            @defer.inlineCallbacks
-            def get_key():
-                try:
-                    result = yield self.get_server_verify_key_v2_indirect(
-                        server_name, key_ids, perspective_name, perspective_keys
-                    )
-                    defer.returnValue(result)
-                except:
-                    logging.info(
-                        "Unable to getting key %r for %r from %r",
-                        key_ids, server_name, perspective_name,
-                    )
-            perspective_results.append(get_key())
+        @defer.inlineCallbacks
+        def get_key(perspective_name, perspective_keys):
+            try:
+                result = yield self.get_server_verify_key_v2_indirect(
+                    server_name, key_ids, perspective_name, perspective_keys
+                )
+                defer.returnValue(result)
+            except Exception as e:
+                logging.info(
+                    "Unable to getting key %r for %r from %r: %s %s",
+                    key_ids, server_name, perspective_name,
+                    type(e).__name__, str(e.message),
+                )
 
-        perspective_results = yield defer.gatherResults(perspective_results)
+        perspective_results = yield defer.gatherResults([
+            get_key(p_name, p_keys)
+            for p_name, p_keys in self.perspective_servers.items()
+        ])
 
         for results in perspective_results:
             if results is not None:
@@ -158,17 +159,22 @@ class Keyring(object):
         )
 
         with limiter:
-            if keys is None:
+            if not keys:
                 try:
                     keys = yield self.get_server_verify_key_v2_direct(
                         server_name, key_ids
                     )
-                except:
-                    pass
+                except Exception as e:
+                    logging.info(
+                        "Unable to getting key %r for %r directly: %s %s",
+                        key_ids, server_name,
+                        type(e).__name__, str(e.message),
+                    )
 
-            keys = yield self.get_server_verify_key_v1_direct(
-                server_name, key_ids
-            )
+            if not keys:
+                keys = yield self.get_server_verify_key_v1_direct(
+                    server_name, key_ids
+                )
 
         for key_id in key_ids:
             if key_id in keys:
@@ -188,7 +194,7 @@ class Keyring(object):
             # TODO(mark): Set the minimum_valid_until_ts to that needed by
             # the events being validated or the current time if validating
             # an incoming request.
-            responses = yield self.client.post_json(
+            query_response = yield self.client.post_json(
                 destination=perspective_name,
                 path=b"/_matrix/key/v2/query",
                 data={
@@ -204,6 +210,8 @@ class Keyring(object):
 
         keys = {}
 
+        responses = query_response["server_keys"]
+
         for response in responses:
             if (u"signatures" not in response
                     or perspective_name not in response[u"signatures"]):
@@ -327,7 +335,7 @@ class Keyring(object):
                 verify_key.time_added = time_now_ms
                 old_verify_keys[key_id] = verify_key
 
-        for key_id in response_json["signatures"][server_name]:
+        for key_id in response_json["signatures"].get(server_name, {}):
             if key_id not in response_json["verify_keys"]:
                 raise ValueError(
                     "Key response must include verification keys for all"
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 904c7c0945..3a7bc0c9a7 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -222,7 +222,7 @@ class FederationClient(FederationBase):
                         for p in transaction_data["pdus"]
                     ]
 
-                    if pdu_list:
+                    if pdu_list and pdu_list[0]:
                         pdu = pdu_list[0]
 
                         # Check signatures are correct.
@@ -255,7 +255,7 @@ class FederationClient(FederationBase):
                 )
                 continue
 
-        if self._get_pdu_cache is not None:
+        if self._get_pdu_cache is not None and pdu:
             self._get_pdu_cache[event_id] = pdu
 
         defer.returnValue(pdu)
@@ -561,7 +561,7 @@ class FederationClient(FederationBase):
 
             res = yield defer.DeferredList(deferreds, consumeErrors=True)
             for (result, val), (e_id, _) in zip(res, ordered_missing):
-                if result:
+                if result and val:
                     signed_events.append(val)
                 else:
                     failed_to_fetch.add(e_id)
diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py
index ca04822fb3..32fa5e8c15 100644
--- a/synapse/federation/transaction_queue.py
+++ b/synapse/federation/transaction_queue.py
@@ -207,13 +207,13 @@ class TransactionQueue(object):
             # request at which point pending_pdus_by_dest just keeps growing.
             # we need application-layer timeouts of some flavour of these
             # requests
-            logger.info(
+            logger.debug(
                 "TX [%s] Transaction already in progress",
                 destination
             )
             return
 
-        logger.info("TX [%s] _attempt_new_transaction", destination)
+        logger.debug("TX [%s] _attempt_new_transaction", destination)
 
         # list of (pending_pdu, deferred, order)
         pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
@@ -221,11 +221,11 @@ class TransactionQueue(object):
         pending_failures = self.pending_failures_by_dest.pop(destination, [])
 
         if pending_pdus:
-            logger.info("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
-                        destination, len(pending_pdus))
+            logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
+                         destination, len(pending_pdus))
 
         if not pending_pdus and not pending_edus and not pending_failures:
-            logger.info("TX [%s] Nothing to send", destination)
+            logger.debug("TX [%s] Nothing to send", destination)
             return
 
         # Sort based on the order field
@@ -242,6 +242,8 @@ class TransactionQueue(object):
         try:
             self.pending_transactions[destination] = 1
 
+            txn_id = str(self._next_txn_id)
+
             limiter = yield get_retry_limiter(
                 destination,
                 self._clock,
@@ -249,9 +251,9 @@ class TransactionQueue(object):
             )
 
             logger.debug(
-                "TX [%s] Attempting new transaction"
+                "TX [%s] {%s} Attempting new transaction"
                 " (pdus: %d, edus: %d, failures: %d)",
-                destination,
+                destination, txn_id,
                 len(pending_pdus),
                 len(pending_edus),
                 len(pending_failures)
@@ -261,7 +263,7 @@ class TransactionQueue(object):
 
             transaction = Transaction.create_new(
                 origin_server_ts=int(self._clock.time_msec()),
-                transaction_id=str(self._next_txn_id),
+                transaction_id=txn_id,
                 origin=self.server_name,
                 destination=destination,
                 pdus=pdus,
@@ -275,9 +277,13 @@ class TransactionQueue(object):
 
             logger.debug("TX [%s] Persisted transaction", destination)
             logger.info(
-                "TX [%s] Sending transaction [%s]",
-                destination,
+                "TX [%s] {%s} Sending transaction [%s],"
+                " (PDUs: %d, EDUs: %d, failures: %d)",
+                destination, txn_id,
                 transaction.transaction_id,
+                len(pending_pdus),
+                len(pending_edus),
+                len(pending_failures),
             )
 
             with limiter:
@@ -313,7 +319,10 @@ class TransactionQueue(object):
                     code = e.code
                     response = e.response
 
-                logger.info("TX [%s] got %d response", destination, code)
+                logger.info(
+                    "TX [%s] {%s} got %d response",
+                    destination, txn_id, code
+                )
 
                 logger.debug("TX [%s] Sent transaction", destination)
                 logger.debug("TX [%s] Marking as delivered...", destination)
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 2bfe0f3c9b..af87805f34 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -196,6 +196,14 @@ class FederationSendServlet(BaseFederationServlet):
                 transaction_id, str(transaction_data)
             )
 
+            logger.info(
+                "Received txn %s from %s. (PDUs: %d, EDUs: %d, failures: %d)",
+                transaction_id, origin,
+                len(transaction_data.get("pdus", [])),
+                len(transaction_data.get("edus", [])),
+                len(transaction_data.get("failures", [])),
+            )
+
             # We should ideally be getting this from the security layer.
             # origin = body["origin"]
 
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 355ab317df..8269482e47 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -15,7 +15,7 @@
 
 from twisted.internet import defer
 
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventTypes
 from synapse.appservice import ApplicationService
 from synapse.types import UserID
 
@@ -147,10 +147,7 @@ class ApplicationServicesHandler(object):
                 )
             # We need to know the members associated with this event.room_id,
             # if any.
-            member_list = yield self.store.get_room_members(
-                room_id=event.room_id,
-                membership=Membership.JOIN
-            )
+            member_list = yield self.store.get_users_in_room(event.room_id)
 
         services = yield self.store.get_app_services()
         interested_list = [
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 401cc677d1..4bd027d9bb 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -536,9 +536,7 @@ class RoomListHandler(BaseHandler):
         chunk = yield self.store.get_rooms(is_public=True)
         results = yield defer.gatherResults(
             [
-                self.store.get_users_in_room(
-                    room_id=room["room_id"],
-                )
+                self.store.get_users_in_room(room["room_id"])
                 for room in chunk
             ],
             consumeErrors=True,
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 1e73d52c4d..4f47f88df8 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -296,6 +296,7 @@ class Notifier(object):
             appservice = yield self.store.get_app_service_by_user_id(user)
             current_token = yield self.event_sources.get_current_token()
             rooms = yield self.store.get_rooms_for_user(user)
+            rooms = [room.room_id for room in rooms]
             user_stream = _NotifierUserStream(
                 user=user,
                 rooms=rooms,
diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py
index 5575c847f9..e3dd4ce76d 100644
--- a/synapse/push/__init__.py
+++ b/synapse/push/__init__.py
@@ -84,25 +84,20 @@ class Pusher(object):
 
         rules = baserules.list_with_base_rules(rawrules, user)
 
+        room_id = ev['room_id']
+
         # get *our* member event for display name matching
-        member_events_for_room = yield self.store.get_current_state(
-            room_id=ev['room_id'],
+        my_display_name = None
+        our_member_event = yield self.store.get_current_state(
+            room_id=room_id,
             event_type='m.room.member',
-            state_key=None
+            state_key=self.user_name,
         )
-        my_display_name = None
-        room_member_count = 0
-        for mev in member_events_for_room:
-            if mev.content['membership'] != 'join':
-                continue
-
-            # This loop does two things:
-            # 1) Find our current display name
-            if mev.state_key == self.user_name and 'displayname' in mev.content:
-                my_display_name = mev.content['displayname']
+        if our_member_event:
+            my_display_name = our_member_event[0].content.get("displayname")
 
-            # and 2) Get the number of people in that room
-            room_member_count += 1
+        room_members = yield self.store.get_users_in_room(room_id)
+        room_member_count = len(room_members)
 
         for r in rules:
             if r['rule_id'] in enabled_map:
@@ -287,9 +282,11 @@ class Pusher(object):
             if len(actions) == 0:
                 logger.warn("Empty actions! Using default action.")
                 actions = Pusher.DEFAULT_ACTIONS
+
             if 'notify' not in actions and 'dont_notify' not in actions:
                 logger.warn("Neither notify nor dont_notify in actions: adding default")
                 actions.extend(Pusher.DEFAULT_ACTIONS)
+
             if 'dont_notify' in actions:
                 logger.debug(
                     "%s for %s: dont_notify",
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 7cb91a0be9..75af44d787 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -51,7 +51,7 @@ logger = logging.getLogger(__name__)
 
 # Remember to update this number every time a change is made to database
 # schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 18
+SCHEMA_VERSION = 19
 
 dir_path = os.path.abspath(os.path.dirname(__file__))
 
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 81052409b7..39884c2afe 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -15,8 +15,6 @@
 import logging
 
 from synapse.api.errors import StoreError
-from synapse.events import FrozenEvent
-from synapse.events.utils import prune_event
 from synapse.util.logutils import log_function
 from synapse.util.logcontext import preserve_context_over_fn, LoggingContext
 from synapse.util.lrucache import LruCache
@@ -27,8 +25,8 @@ from util.id_generators import IdGenerator, StreamIdGenerator
 from twisted.internet import defer
 
 from collections import namedtuple, OrderedDict
+
 import functools
-import simplejson as json
 import sys
 import time
 import threading
@@ -48,7 +46,6 @@ sql_scheduling_timer = metrics.register_distribution("schedule_time")
 
 sql_query_timer = metrics.register_distribution("query_time", labels=["verb"])
 sql_txn_timer = metrics.register_distribution("transaction_time", labels=["desc"])
-sql_getevents_timer = metrics.register_distribution("getEvents_time", labels=["desc"])
 
 caches_by_name = {}
 cache_counter = metrics.register_cache(
@@ -124,6 +121,11 @@ class Cache(object):
         self.sequence += 1
         self.cache.pop(keyargs, None)
 
+    def invalidate_all(self):
+        self.check_thread()
+        self.sequence += 1
+        self.cache.clear()
+
 
 def cached(max_entries=1000, num_args=1, lru=False):
     """ A method decorator that applies a memoizing cache around the function.
@@ -175,6 +177,7 @@ def cached(max_entries=1000, num_args=1, lru=False):
                 defer.returnValue(ret)
 
         wrapped.invalidate = cache.invalidate
+        wrapped.invalidate_all = cache.invalidate_all
         wrapped.prefill = cache.prefill
         return wrapped
 
@@ -301,6 +304,12 @@ class SQLBaseStore(object):
         self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True,
                                       max_entries=hs.config.event_cache_size)
 
+        self._event_fetch_lock = threading.Condition()
+        self._event_fetch_list = []
+        self._event_fetch_ongoing = 0
+
+        self._pending_ds = []
+
         self.database_engine = hs.database_engine
 
         self._stream_id_gen = StreamIdGenerator()
@@ -340,6 +349,75 @@ class SQLBaseStore(object):
 
         self._clock.looping_call(loop, 10000)
 
+    def _new_transaction(self, conn, desc, after_callbacks, func, *args, **kwargs):
+        start = time.time() * 1000
+        txn_id = self._TXN_ID
+
+        # We don't really need these to be unique, so lets stop it from
+        # growing really large.
+        self._TXN_ID = (self._TXN_ID + 1) % (sys.maxint - 1)
+
+        name = "%s-%x" % (desc, txn_id, )
+
+        transaction_logger.debug("[TXN START] {%s}", name)
+
+        try:
+            i = 0
+            N = 5
+            while True:
+                try:
+                    txn = conn.cursor()
+                    txn = LoggingTransaction(
+                        txn, name, self.database_engine, after_callbacks
+                    )
+                    r = func(txn, *args, **kwargs)
+                    conn.commit()
+                    return r
+                except self.database_engine.module.OperationalError as e:
+                    # This can happen if the database disappears mid
+                    # transaction.
+                    logger.warn(
+                        "[TXN OPERROR] {%s} %s %d/%d",
+                        name, e, i, N
+                    )
+                    if i < N:
+                        i += 1
+                        try:
+                            conn.rollback()
+                        except self.database_engine.module.Error as e1:
+                            logger.warn(
+                                "[TXN EROLL] {%s} %s",
+                                name, e1,
+                            )
+                        continue
+                    raise
+                except self.database_engine.module.DatabaseError as e:
+                    if self.database_engine.is_deadlock(e):
+                        logger.warn("[TXN DEADLOCK] {%s} %d/%d", name, i, N)
+                        if i < N:
+                            i += 1
+                            try:
+                                conn.rollback()
+                            except self.database_engine.module.Error as e1:
+                                logger.warn(
+                                    "[TXN EROLL] {%s} %s",
+                                    name, e1,
+                                )
+                            continue
+                    raise
+        except Exception as e:
+            logger.debug("[TXN FAIL] {%s} %s", name, e)
+            raise
+        finally:
+            end = time.time() * 1000
+            duration = end - start
+
+            transaction_logger.debug("[TXN END] {%s} %f", name, duration)
+
+            self._current_txn_total_time += duration
+            self._txn_perf_counters.update(desc, start, end)
+            sql_txn_timer.inc_by(duration, desc)
+
     @defer.inlineCallbacks
     def runInteraction(self, desc, func, *args, **kwargs):
         """Wraps the .runInteraction() method on the underlying db_pool."""
@@ -351,83 +429,50 @@ class SQLBaseStore(object):
 
         def inner_func(conn, *args, **kwargs):
             with LoggingContext("runInteraction") as context:
+                sql_scheduling_timer.inc_by(time.time() * 1000 - start_time)
+
                 if self.database_engine.is_connection_closed(conn):
                     logger.debug("Reconnecting closed database connection")
                     conn.reconnect()
 
                 current_context.copy_to(context)
-                start = time.time() * 1000
-                txn_id = self._TXN_ID
+                return self._new_transaction(
+                    conn, desc, after_callbacks, func, *args, **kwargs
+                )
 
-                # We don't really need these to be unique, so lets stop it from
-                # growing really large.
-                self._TXN_ID = (self._TXN_ID + 1) % (sys.maxint - 1)
+        result = yield preserve_context_over_fn(
+            self._db_pool.runWithConnection,
+            inner_func, *args, **kwargs
+        )
+
+        for after_callback, after_args in after_callbacks:
+            after_callback(*after_args)
+        defer.returnValue(result)
 
-                name = "%s-%x" % (desc, txn_id, )
+    @defer.inlineCallbacks
+    def runWithConnection(self, func, *args, **kwargs):
+        """Wraps the .runInteraction() method on the underlying db_pool."""
+        current_context = LoggingContext.current_context()
 
+        start_time = time.time() * 1000
+
+        def inner_func(conn, *args, **kwargs):
+            with LoggingContext("runWithConnection") as context:
                 sql_scheduling_timer.inc_by(time.time() * 1000 - start_time)
-                transaction_logger.debug("[TXN START] {%s}", name)
-                try:
-                    i = 0
-                    N = 5
-                    while True:
-                        try:
-                            txn = conn.cursor()
-                            txn = LoggingTransaction(
-                                txn, name, self.database_engine, after_callbacks
-                            )
-                            return func(txn, *args, **kwargs)
-                        except self.database_engine.module.OperationalError as e:
-                            # This can happen if the database disappears mid
-                            # transaction.
-                            logger.warn(
-                                "[TXN OPERROR] {%s} %s %d/%d",
-                                name, e, i, N
-                            )
-                            if i < N:
-                                i += 1
-                                try:
-                                    conn.rollback()
-                                except self.database_engine.module.Error as e1:
-                                    logger.warn(
-                                        "[TXN EROLL] {%s} %s",
-                                        name, e1,
-                                    )
-                                continue
-                        except self.database_engine.module.DatabaseError as e:
-                            if self.database_engine.is_deadlock(e):
-                                logger.warn("[TXN DEADLOCK] {%s} %d/%d", name, i, N)
-                                if i < N:
-                                    i += 1
-                                    try:
-                                        conn.rollback()
-                                    except self.database_engine.module.Error as e1:
-                                        logger.warn(
-                                            "[TXN EROLL] {%s} %s",
-                                            name, e1,
-                                        )
-                                    continue
-                            raise
-                except Exception as e:
-                    logger.debug("[TXN FAIL] {%s} %s", name, e)
-                    raise
-                finally:
-                    end = time.time() * 1000
-                    duration = end - start
 
-                    transaction_logger.debug("[TXN END] {%s} %f", name, duration)
+                if self.database_engine.is_connection_closed(conn):
+                    logger.debug("Reconnecting closed database connection")
+                    conn.reconnect()
+
+                current_context.copy_to(context)
 
-                    self._current_txn_total_time += duration
-                    self._txn_perf_counters.update(desc, start, end)
-                    sql_txn_timer.inc_by(duration, desc)
+                return func(conn, *args, **kwargs)
 
         result = yield preserve_context_over_fn(
             self._db_pool.runWithConnection,
             inner_func, *args, **kwargs
         )
 
-        for after_callback, after_args in after_callbacks:
-            after_callback(*after_args)
         defer.returnValue(result)
 
     def cursor_to_dict(self, cursor):
@@ -867,158 +912,6 @@ class SQLBaseStore(object):
 
         return self.runInteraction("_simple_max_id", func)
 
-    def _get_events(self, event_ids, check_redacted=True,
-                    get_prev_content=False):
-        return self.runInteraction(
-            "_get_events", self._get_events_txn, event_ids,
-            check_redacted=check_redacted, get_prev_content=get_prev_content,
-        )
-
-    def _get_events_txn(self, txn, event_ids, check_redacted=True,
-                        get_prev_content=False):
-        if not event_ids:
-            return []
-
-        events = [
-            self._get_event_txn(
-                txn, event_id,
-                check_redacted=check_redacted,
-                get_prev_content=get_prev_content
-            )
-            for event_id in event_ids
-        ]
-
-        return [e for e in events if e]
-
-    def _invalidate_get_event_cache(self, event_id):
-        for check_redacted in (False, True):
-            for get_prev_content in (False, True):
-                self._get_event_cache.invalidate(event_id, check_redacted,
-                                                 get_prev_content)
-
-    def _get_event_txn(self, txn, event_id, check_redacted=True,
-                       get_prev_content=False, allow_rejected=False):
-
-        start_time = time.time() * 1000
-
-        def update_counter(desc, last_time):
-            curr_time = self._get_event_counters.update(desc, last_time)
-            sql_getevents_timer.inc_by(curr_time - last_time, desc)
-            return curr_time
-
-        try:
-            ret = self._get_event_cache.get(event_id, check_redacted, get_prev_content)
-
-            if allow_rejected or not ret.rejected_reason:
-                return ret
-            else:
-                return None
-        except KeyError:
-            pass
-        finally:
-            start_time = update_counter("event_cache", start_time)
-
-        sql = (
-            "SELECT e.internal_metadata, e.json, r.event_id, rej.reason "
-            "FROM event_json as e "
-            "LEFT JOIN redactions as r ON e.event_id = r.redacts "
-            "LEFT JOIN rejections as rej on rej.event_id = e.event_id  "
-            "WHERE e.event_id = ? "
-            "LIMIT 1 "
-        )
-
-        txn.execute(sql, (event_id,))
-
-        res = txn.fetchone()
-
-        if not res:
-            return None
-
-        internal_metadata, js, redacted, rejected_reason = res
-
-        start_time = update_counter("select_event", start_time)
-
-        result = self._get_event_from_row_txn(
-            txn, internal_metadata, js, redacted,
-            check_redacted=check_redacted,
-            get_prev_content=get_prev_content,
-            rejected_reason=rejected_reason,
-        )
-        self._get_event_cache.prefill(event_id, check_redacted, get_prev_content, result)
-
-        if allow_rejected or not rejected_reason:
-            return result
-        else:
-            return None
-
-    def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted,
-                                check_redacted=True, get_prev_content=False,
-                                rejected_reason=None):
-
-        start_time = time.time() * 1000
-
-        def update_counter(desc, last_time):
-            curr_time = self._get_event_counters.update(desc, last_time)
-            sql_getevents_timer.inc_by(curr_time - last_time, desc)
-            return curr_time
-
-        d = json.loads(js)
-        start_time = update_counter("decode_json", start_time)
-
-        internal_metadata = json.loads(internal_metadata)
-        start_time = update_counter("decode_internal", start_time)
-
-        ev = FrozenEvent(
-            d,
-            internal_metadata_dict=internal_metadata,
-            rejected_reason=rejected_reason,
-        )
-        start_time = update_counter("build_frozen_event", start_time)
-
-        if check_redacted and redacted:
-            ev = prune_event(ev)
-
-            ev.unsigned["redacted_by"] = redacted
-            # Get the redaction event.
-
-            because = self._get_event_txn(
-                txn,
-                redacted,
-                check_redacted=False
-            )
-
-            if because:
-                ev.unsigned["redacted_because"] = because
-            start_time = update_counter("redact_event", start_time)
-
-        if get_prev_content and "replaces_state" in ev.unsigned:
-            prev = self._get_event_txn(
-                txn,
-                ev.unsigned["replaces_state"],
-                get_prev_content=False,
-            )
-            if prev:
-                ev.unsigned["prev_content"] = prev.get_dict()["content"]
-            start_time = update_counter("get_prev_content", start_time)
-
-        return ev
-
-    def _parse_events(self, rows):
-        return self.runInteraction(
-            "_parse_events", self._parse_events_txn, rows
-        )
-
-    def _parse_events_txn(self, txn, rows):
-        event_ids = [r["event_id"] for r in rows]
-
-        return self._get_events_txn(txn, event_ids)
-
-    def _has_been_redacted_txn(self, txn, event):
-        sql = "SELECT event_id FROM redactions WHERE redacts = ?"
-        txn.execute(sql, (event.event_id,))
-        result = txn.fetchone()
-        return result[0] if result else None
-
     def get_next_stream_id(self):
         with self._next_stream_id_lock:
             i = self._next_stream_id
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index a323028546..4a855ffd56 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -19,6 +19,8 @@ from ._base import IncorrectDatabaseSetup
 
 
 class PostgresEngine(object):
+    single_threaded = False
+
     def __init__(self, database_module):
         self.module = database_module
         self.module.extensions.register_type(self.module.extensions.UNICODE)
diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite3.py
index ff13d8006a..d18e2808d1 100644
--- a/synapse/storage/engines/sqlite3.py
+++ b/synapse/storage/engines/sqlite3.py
@@ -17,6 +17,8 @@ from synapse.storage import prepare_database, prepare_sqlite3_database
 
 
 class Sqlite3Engine(object):
+    single_threaded = True
+
     def __init__(self, database_module):
         self.module = database_module
 
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
index a1982dfbb5..23573e8b2b 100644
--- a/synapse/storage/event_federation.py
+++ b/synapse/storage/event_federation.py
@@ -13,6 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from twisted.internet import defer
+
 from ._base import SQLBaseStore, cached
 from syutil.base64util import encode_base64
 
@@ -33,16 +35,7 @@ class EventFederationStore(SQLBaseStore):
     """
 
     def get_auth_chain(self, event_ids):
-        return self.runInteraction(
-            "get_auth_chain",
-            self._get_auth_chain_txn,
-            event_ids
-        )
-
-    def _get_auth_chain_txn(self, txn, event_ids):
-        results = self._get_auth_chain_ids_txn(txn, event_ids)
-
-        return self._get_events_txn(txn, results)
+        return self.get_auth_chain_ids(event_ids).addCallback(self._get_events)
 
     def get_auth_chain_ids(self, event_ids):
         return self.runInteraction(
@@ -369,7 +362,7 @@ class EventFederationStore(SQLBaseStore):
         return self.runInteraction(
             "get_backfill_events",
             self._get_backfill_events, room_id, event_list, limit
-        )
+        ).addCallback(self._get_events)
 
     def _get_backfill_events(self, txn, room_id, event_list, limit):
         logger.debug(
@@ -415,16 +408,26 @@ class EventFederationStore(SQLBaseStore):
             front = new_front
             event_results += new_front
 
-        return self._get_events_txn(txn, event_results)
+        return event_results
 
+    @defer.inlineCallbacks
     def get_missing_events(self, room_id, earliest_events, latest_events,
                            limit, min_depth):
-        return self.runInteraction(
+        ids = yield self.runInteraction(
             "get_missing_events",
             self._get_missing_events,
             room_id, earliest_events, latest_events, limit, min_depth
         )
 
+        events = yield self._get_events(ids)
+
+        events = sorted(
+            [ev for ev in events if ev.depth >= min_depth],
+            key=lambda e: e.depth,
+        )
+
+        defer.returnValue(events[:limit])
+
     def _get_missing_events(self, txn, room_id, earliest_events, latest_events,
                             limit, min_depth):
 
@@ -456,14 +459,7 @@ class EventFederationStore(SQLBaseStore):
             front = new_front
             event_results |= new_front
 
-        events = self._get_events_txn(txn, event_results)
-
-        events = sorted(
-            [ev for ev in events if ev.depth >= min_depth],
-            key=lambda e: e.depth,
-        )
-
-        return events[:limit]
+        return event_results
 
     def clean_room_for_join(self, room_id):
         return self.runInteraction(
@@ -476,3 +472,4 @@ class EventFederationStore(SQLBaseStore):
         query = "DELETE FROM event_forward_extremities WHERE room_id = ?"
 
         txn.execute(query, (room_id,))
+        txn.call_after(self.get_latest_event_ids_in_room.invalidate, room_id)
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 971f3211ac..d2a010bd88 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -15,8 +15,12 @@
 
 from _base import SQLBaseStore, _RollbackButIsFineException
 
-from twisted.internet import defer
+from twisted.internet import defer, reactor
 
+from synapse.events import FrozenEvent
+from synapse.events.utils import prune_event
+
+from synapse.util.logcontext import preserve_context_over_deferred
 from synapse.util.logutils import log_function
 from synapse.api.constants import EventTypes
 from synapse.crypto.event_signing import compute_event_reference_hash
@@ -26,10 +30,21 @@ from syutil.jsonutil import encode_canonical_json
 from contextlib import contextmanager
 
 import logging
+import simplejson as json
 
 logger = logging.getLogger(__name__)
 
 
+# These values are used in the `enqueus_event` and `_do_fetch` methods to
+# control how we batch/bulk fetch events from the database.
+# The values are plucked out of thing air to make initial sync run faster
+# on jki.re
+# TODO: Make these configurable.
+EVENT_QUEUE_THREADS = 3  # Max number of threads that will fetch events
+EVENT_QUEUE_ITERATIONS = 3  # No. times we block waiting for requests for events
+EVENT_QUEUE_TIMEOUT_S = 0.1  # Timeout when waiting for requests for events
+
+
 class EventsStore(SQLBaseStore):
     @defer.inlineCallbacks
     @log_function
@@ -87,18 +102,17 @@ class EventsStore(SQLBaseStore):
         Returns:
             Deferred : A FrozenEvent.
         """
-        event = yield self.runInteraction(
-            "get_event", self._get_event_txn,
-            event_id,
+        events = yield self._get_events(
+            [event_id],
             check_redacted=check_redacted,
             get_prev_content=get_prev_content,
             allow_rejected=allow_rejected,
         )
 
-        if not event and not allow_none:
+        if not events and not allow_none:
             raise RuntimeError("Could not find event %s" % (event_id,))
 
-        defer.returnValue(event)
+        defer.returnValue(events[0] if events else None)
 
     @log_function
     def _persist_event_txn(self, txn, event, context, backfilled,
@@ -111,6 +125,12 @@ class EventsStore(SQLBaseStore):
         # We purposefully do this first since if we include a `current_state`
         # key, we *want* to update the `current_state_events` table
         if current_state:
+            txn.call_after(self.get_current_state_for_key.invalidate_all)
+            txn.call_after(self.get_rooms_for_user.invalidate_all)
+            txn.call_after(self.get_users_in_room.invalidate, event.room_id)
+            txn.call_after(self.get_joined_hosts_for_room.invalidate, event.room_id)
+            txn.call_after(self.get_room_name_and_aliases, event.room_id)
+
             self._simple_delete_txn(
                 txn,
                 table="current_state_events",
@@ -118,13 +138,6 @@ class EventsStore(SQLBaseStore):
             )
 
             for s in current_state:
-                if s.type == EventTypes.Member:
-                    txn.call_after(
-                        self.get_rooms_for_user.invalidate, s.state_key
-                    )
-                    txn.call_after(
-                        self.get_joined_hosts_for_room.invalidate, s.room_id
-                    )
                 self._simple_insert_txn(
                     txn,
                     "current_state_events",
@@ -342,6 +355,18 @@ class EventsStore(SQLBaseStore):
             )
 
             if is_new_state and not context.rejected:
+                txn.call_after(
+                    self.get_current_state_for_key.invalidate,
+                    event.room_id, event.type, event.state_key
+                )
+
+                if (event.type == EventTypes.Name
+                        or event.type == EventTypes.Aliases):
+                    txn.call_after(
+                        self.get_room_name_and_aliases.invalidate,
+                        event.room_id
+                    )
+
                 self._simple_upsert_txn(
                     txn,
                     "current_state_events",
@@ -396,3 +421,407 @@ class EventsStore(SQLBaseStore):
         return self.runInteraction(
             "have_events", f,
         )
+
+    @defer.inlineCallbacks
+    def _get_events(self, event_ids, check_redacted=True,
+                    get_prev_content=False, allow_rejected=False):
+        if not event_ids:
+            defer.returnValue([])
+
+        event_map = self._get_events_from_cache(
+            event_ids,
+            check_redacted=check_redacted,
+            get_prev_content=get_prev_content,
+            allow_rejected=allow_rejected,
+        )
+
+        missing_events_ids = [e for e in event_ids if e not in event_map]
+
+        if not missing_events_ids:
+            defer.returnValue([
+                event_map[e_id] for e_id in event_ids
+                if e_id in event_map and event_map[e_id]
+            ])
+
+        missing_events = yield self._enqueue_events(
+            missing_events_ids,
+            check_redacted=check_redacted,
+            get_prev_content=get_prev_content,
+            allow_rejected=allow_rejected,
+        )
+
+        event_map.update(missing_events)
+
+        defer.returnValue([
+            event_map[e_id] for e_id in event_ids
+            if e_id in event_map and event_map[e_id]
+        ])
+
+    def _get_events_txn(self, txn, event_ids, check_redacted=True,
+                        get_prev_content=False, allow_rejected=False):
+        if not event_ids:
+            return []
+
+        event_map = self._get_events_from_cache(
+            event_ids,
+            check_redacted=check_redacted,
+            get_prev_content=get_prev_content,
+            allow_rejected=allow_rejected,
+        )
+
+        missing_events_ids = [e for e in event_ids if e not in event_map]
+
+        if not missing_events_ids:
+            return [
+                event_map[e_id] for e_id in event_ids
+                if e_id in event_map and event_map[e_id]
+            ]
+
+        missing_events = self._fetch_events_txn(
+            txn,
+            missing_events_ids,
+            check_redacted=check_redacted,
+            get_prev_content=get_prev_content,
+            allow_rejected=allow_rejected,
+        )
+
+        event_map.update(missing_events)
+
+        return [
+            event_map[e_id] for e_id in event_ids
+            if e_id in event_map and event_map[e_id]
+        ]
+
+    def _invalidate_get_event_cache(self, event_id):
+        for check_redacted in (False, True):
+            for get_prev_content in (False, True):
+                self._get_event_cache.invalidate(event_id, check_redacted,
+                                                 get_prev_content)
+
+    def _get_event_txn(self, txn, event_id, check_redacted=True,
+                       get_prev_content=False, allow_rejected=False):
+
+        events = self._get_events_txn(
+            txn, [event_id],
+            check_redacted=check_redacted,
+            get_prev_content=get_prev_content,
+            allow_rejected=allow_rejected,
+        )
+
+        return events[0] if events else None
+
+    def _get_events_from_cache(self, events, check_redacted, get_prev_content,
+                               allow_rejected):
+        event_map = {}
+
+        for event_id in events:
+            try:
+                ret = self._get_event_cache.get(
+                    event_id, check_redacted, get_prev_content
+                )
+
+                if allow_rejected or not ret.rejected_reason:
+                    event_map[event_id] = ret
+                else:
+                    event_map[event_id] = None
+            except KeyError:
+                pass
+
+        return event_map
+
+    def _do_fetch(self, conn):
+        """Takes a database connection and waits for requests for events from
+        the _event_fetch_list queue.
+        """
+        event_list = []
+        i = 0
+        while True:
+            try:
+                with self._event_fetch_lock:
+                    event_list = self._event_fetch_list
+                    self._event_fetch_list = []
+
+                    if not event_list:
+                        single_threaded = self.database_engine.single_threaded
+                        if single_threaded or i > EVENT_QUEUE_ITERATIONS:
+                            self._event_fetch_ongoing -= 1
+                            return
+                        else:
+                            self._event_fetch_lock.wait(EVENT_QUEUE_TIMEOUT_S)
+                            i += 1
+                            continue
+                    i = 0
+
+                event_id_lists = zip(*event_list)[0]
+                event_ids = [
+                    item for sublist in event_id_lists for item in sublist
+                ]
+
+                rows = self._new_transaction(
+                    conn, "do_fetch", [], self._fetch_event_rows, event_ids
+                )
+
+                row_dict = {
+                    r["event_id"]: r
+                    for r in rows
+                }
+
+                # We only want to resolve deferreds from the main thread
+                def fire(lst, res):
+                    for ids, d in lst:
+                        if not d.called:
+                            try:
+                                d.callback([
+                                    res[i]
+                                    for i in ids
+                                    if i in res
+                                ])
+                            except:
+                                logger.exception("Failed to callback")
+                reactor.callFromThread(fire, event_list, row_dict)
+            except Exception as e:
+                logger.exception("do_fetch")
+
+                # We only want to resolve deferreds from the main thread
+                def fire(evs):
+                    for _, d in evs:
+                        if not d.called:
+                            d.errback(e)
+
+                if event_list:
+                    reactor.callFromThread(fire, event_list)
+
+    @defer.inlineCallbacks
+    def _enqueue_events(self, events, check_redacted=True,
+                        get_prev_content=False, allow_rejected=False):
+        """Fetches events from the database using the _event_fetch_list. This
+        allows batch and bulk fetching of events - it allows us to fetch events
+        without having to create a new transaction for each request for events.
+        """
+        if not events:
+            defer.returnValue({})
+
+        events_d = defer.Deferred()
+        with self._event_fetch_lock:
+            self._event_fetch_list.append(
+                (events, events_d)
+            )
+
+            self._event_fetch_lock.notify()
+
+            if self._event_fetch_ongoing < EVENT_QUEUE_THREADS:
+                self._event_fetch_ongoing += 1
+                should_start = True
+            else:
+                should_start = False
+
+        if should_start:
+            self.runWithConnection(
+                self._do_fetch
+            )
+
+        rows = yield preserve_context_over_deferred(events_d)
+
+        if not allow_rejected:
+            rows[:] = [r for r in rows if not r["rejects"]]
+
+        res = yield defer.gatherResults(
+            [
+                self._get_event_from_row(
+                    row["internal_metadata"], row["json"], row["redacts"],
+                    check_redacted=check_redacted,
+                    get_prev_content=get_prev_content,
+                    rejected_reason=row["rejects"],
+                )
+                for row in rows
+            ],
+            consumeErrors=True
+        )
+
+        defer.returnValue({
+            e.event_id: e
+            for e in res if e
+        })
+
+    def _fetch_event_rows(self, txn, events):
+        rows = []
+        N = 200
+        for i in range(1 + len(events) / N):
+            evs = events[i*N:(i + 1)*N]
+            if not evs:
+                break
+
+            sql = (
+                "SELECT "
+                " e.event_id as event_id, "
+                " e.internal_metadata,"
+                " e.json,"
+                " r.redacts as redacts,"
+                " rej.event_id as rejects "
+                " FROM event_json as e"
+                " LEFT JOIN rejections as rej USING (event_id)"
+                " LEFT JOIN redactions as r ON e.event_id = r.redacts"
+                " WHERE e.event_id IN (%s)"
+            ) % (",".join(["?"]*len(evs)),)
+
+            txn.execute(sql, evs)
+            rows.extend(self.cursor_to_dict(txn))
+
+        return rows
+
+    def _fetch_events_txn(self, txn, events, check_redacted=True,
+                          get_prev_content=False, allow_rejected=False):
+        if not events:
+            return {}
+
+        rows = self._fetch_event_rows(
+            txn, events,
+        )
+
+        if not allow_rejected:
+            rows[:] = [r for r in rows if not r["rejects"]]
+
+        res = [
+            self._get_event_from_row_txn(
+                txn,
+                row["internal_metadata"], row["json"], row["redacts"],
+                check_redacted=check_redacted,
+                get_prev_content=get_prev_content,
+                rejected_reason=row["rejects"],
+            )
+            for row in rows
+        ]
+
+        return {
+            r.event_id: r
+            for r in res
+        }
+
+    @defer.inlineCallbacks
+    def _get_event_from_row(self, internal_metadata, js, redacted,
+                            check_redacted=True, get_prev_content=False,
+                            rejected_reason=None):
+        d = json.loads(js)
+        internal_metadata = json.loads(internal_metadata)
+
+        if rejected_reason:
+            rejected_reason = yield self._simple_select_one_onecol(
+                table="rejections",
+                keyvalues={"event_id": rejected_reason},
+                retcol="reason",
+                desc="_get_event_from_row",
+            )
+
+        ev = FrozenEvent(
+            d,
+            internal_metadata_dict=internal_metadata,
+            rejected_reason=rejected_reason,
+        )
+
+        if check_redacted and redacted:
+            ev = prune_event(ev)
+
+            redaction_id = yield self._simple_select_one_onecol(
+                table="redactions",
+                keyvalues={"redacts": ev.event_id},
+                retcol="event_id",
+                desc="_get_event_from_row",
+            )
+
+            ev.unsigned["redacted_by"] = redaction_id
+            # Get the redaction event.
+
+            because = yield self.get_event(
+                redaction_id,
+                check_redacted=False
+            )
+
+            if because:
+                ev.unsigned["redacted_because"] = because
+
+        if get_prev_content and "replaces_state" in ev.unsigned:
+            prev = yield self.get_event(
+                ev.unsigned["replaces_state"],
+                get_prev_content=False,
+            )
+            if prev:
+                ev.unsigned["prev_content"] = prev.get_dict()["content"]
+
+        self._get_event_cache.prefill(
+            ev.event_id, check_redacted, get_prev_content, ev
+        )
+
+        defer.returnValue(ev)
+
+    def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted,
+                                check_redacted=True, get_prev_content=False,
+                                rejected_reason=None):
+        d = json.loads(js)
+        internal_metadata = json.loads(internal_metadata)
+
+        if rejected_reason:
+            rejected_reason = self._simple_select_one_onecol_txn(
+                txn,
+                table="rejections",
+                keyvalues={"event_id": rejected_reason},
+                retcol="reason",
+            )
+
+        ev = FrozenEvent(
+            d,
+            internal_metadata_dict=internal_metadata,
+            rejected_reason=rejected_reason,
+        )
+
+        if check_redacted and redacted:
+            ev = prune_event(ev)
+
+            redaction_id = self._simple_select_one_onecol_txn(
+                txn,
+                table="redactions",
+                keyvalues={"redacts": ev.event_id},
+                retcol="event_id",
+            )
+
+            ev.unsigned["redacted_by"] = redaction_id
+            # Get the redaction event.
+
+            because = self._get_event_txn(
+                txn,
+                redaction_id,
+                check_redacted=False
+            )
+
+            if because:
+                ev.unsigned["redacted_because"] = because
+
+        if get_prev_content and "replaces_state" in ev.unsigned:
+            prev = self._get_event_txn(
+                txn,
+                ev.unsigned["replaces_state"],
+                get_prev_content=False,
+            )
+            if prev:
+                ev.unsigned["prev_content"] = prev.get_dict()["content"]
+
+        self._get_event_cache.prefill(
+            ev.event_id, check_redacted, get_prev_content, ev
+        )
+
+        return ev
+
+    def _parse_events(self, rows):
+        return self.runInteraction(
+            "_parse_events", self._parse_events_txn, rows
+        )
+
+    def _parse_events_txn(self, txn, rows):
+        event_ids = [r["event_id"] for r in rows]
+
+        return self._get_events_txn(txn, event_ids)
+
+    def _has_been_redacted_txn(self, txn, event):
+        sql = "SELECT event_id FROM redactions WHERE redacts = ?"
+        txn.execute(sql, (event.event_id,))
+        result = txn.fetchone()
+        return result[0] if result else None
diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py
index e7988676ce..80d0ac4ea3 100644
--- a/synapse/storage/push_rule.py
+++ b/synapse/storage/push_rule.py
@@ -13,9 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import collections
-
-from ._base import SQLBaseStore, Table
+from ._base import SQLBaseStore, cached
 from twisted.internet import defer
 
 import logging
@@ -41,6 +39,7 @@ class PushRuleStore(SQLBaseStore):
 
         defer.returnValue(rows)
 
+    @cached()
     @defer.inlineCallbacks
     def get_push_rules_enabled_for_user(self, user_name):
         results = yield self._simple_select_list(
@@ -151,6 +150,10 @@ class PushRuleStore(SQLBaseStore):
 
             txn.execute(sql, (user_name, priority_class, new_rule_priority))
 
+        txn.call_after(
+            self.get_push_rules_enabled_for_user.invalidate, user_name
+        )
+
         self._simple_insert_txn(
             txn,
             table=PushRuleTable.table_name,
@@ -179,6 +182,10 @@ class PushRuleStore(SQLBaseStore):
         new_rule['priority_class'] = priority_class
         new_rule['priority'] = new_prio
 
+        txn.call_after(
+            self.get_push_rules_enabled_for_user.invalidate, user_name
+        )
+
         self._simple_insert_txn(
             txn,
             table=PushRuleTable.table_name,
@@ -201,6 +208,7 @@ class PushRuleStore(SQLBaseStore):
             {'user_name': user_name, 'rule_id': rule_id},
             desc="delete_push_rule",
         )
+        self.get_push_rules_enabled_for_user.invalidate(user_name)
 
     @defer.inlineCallbacks
     def set_push_rule_enabled(self, user_name, rule_id, enabled):
@@ -220,6 +228,7 @@ class PushRuleStore(SQLBaseStore):
             {'enabled': 1 if enabled else 0},
             {'id': new_id},
         )
+        self.get_push_rules_enabled_for_user.invalidate(user_name)
 
 
 class RuleNotFoundException(Exception):
@@ -230,7 +239,7 @@ class InconsistentRuleException(Exception):
     pass
 
 
-class PushRuleTable(Table):
+class PushRuleTable(object):
     table_name = "push_rules"
 
     fields = [
@@ -243,10 +252,8 @@ class PushRuleTable(Table):
         "actions",
     ]
 
-    EntryType = collections.namedtuple("PushRuleEntry", fields)
-
 
-class PushRuleEnableTable(Table):
+class PushRuleEnableTable(object):
     table_name = "push_rules_enable"
 
     fields = [
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index f956377632..4612a8aa83 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -17,7 +17,7 @@ from twisted.internet import defer
 
 from synapse.api.errors import StoreError
 
-from ._base import SQLBaseStore
+from ._base import SQLBaseStore, cached
 
 import collections
 import logging
@@ -186,6 +186,7 @@ class RoomStore(SQLBaseStore):
                 }
             )
 
+    @cached()
     @defer.inlineCallbacks
     def get_room_name_and_aliases(self, room_id):
         def f(txn):
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 839c74f63a..d36a6c18a8 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -66,6 +66,7 @@ class RoomMemberStore(SQLBaseStore):
 
         txn.call_after(self.get_rooms_for_user.invalidate, target_user_id)
         txn.call_after(self.get_joined_hosts_for_room.invalidate, event.room_id)
+        txn.call_after(self.get_users_in_room.invalidate, event.room_id)
 
     def get_room_member(self, user_id, room_id):
         """Retrieve the current state of a room member.
@@ -76,17 +77,18 @@ class RoomMemberStore(SQLBaseStore):
         Returns:
             Deferred: Results in a MembershipEvent or None.
         """
-        def f(txn):
-            events = self._get_members_events_txn(
-                txn,
-                room_id,
-                user_id=user_id,
-            )
-
-            return events[0] if events else None
-
-        return self.runInteraction("get_room_member", f)
+        return self.runInteraction(
+            "get_room_member",
+            self._get_members_events_txn,
+            room_id,
+            user_id=user_id,
+        ).addCallback(
+            self._get_events
+        ).addCallback(
+            lambda events: events[0] if events else None
+        )
 
+    @cached()
     def get_users_in_room(self, room_id):
         def f(txn):
 
@@ -110,15 +112,12 @@ class RoomMemberStore(SQLBaseStore):
         Returns:
             list of namedtuples representing the members in this room.
         """
-
-        def f(txn):
-            return self._get_members_events_txn(
-                txn,
-                room_id,
-                membership=membership,
-            )
-
-        return self.runInteraction("get_room_members", f)
+        return self.runInteraction(
+            "get_room_members",
+            self._get_members_events_txn,
+            room_id,
+            membership=membership,
+        ).addCallback(self._get_events)
 
     def get_rooms_for_user_where_membership_is(self, user_id, membership_list):
         """ Get all the rooms for this user where the membership for this user
@@ -190,14 +189,14 @@ class RoomMemberStore(SQLBaseStore):
         return self.runInteraction(
             "get_members_query", self._get_members_events_txn,
             where_clause, where_values
-        )
+        ).addCallbacks(self._get_events)
 
     def _get_members_events_txn(self, txn, room_id, membership=None, user_id=None):
         rows = self._get_members_rows_txn(
             txn,
             room_id, membership, user_id,
         )
-        return self._get_events_txn(txn, [r["event_id"] for r in rows])
+        return [r["event_id"] for r in rows]
 
     def _get_members_rows_txn(self, txn, room_id, membership=None, user_id=None):
         where_clause = "c.room_id = ?"
diff --git a/synapse/storage/schema/delta/19/event_index.sql b/synapse/storage/schema/delta/19/event_index.sql
new file mode 100644
index 0000000000..3881fc9897
--- /dev/null
+++ b/synapse/storage/schema/delta/19/event_index.sql
@@ -0,0 +1,19 @@
+/* Copyright 2015 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CREATE INDEX events_order_topo_stream_room ON events(
+    topological_ordering, stream_ordering, room_id
+);
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index dbc0e49c1f..b24de34f23 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ._base import SQLBaseStore
+from ._base import SQLBaseStore, cached
 
 from twisted.internet import defer
 
@@ -43,6 +43,7 @@ class StateStore(SQLBaseStore):
       * `state_groups_state`: Maps state group to state events.
     """
 
+    @defer.inlineCallbacks
     def get_state_groups(self, event_ids):
         """ Get the state groups for the given list of event_ids
 
@@ -71,17 +72,29 @@ class StateStore(SQLBaseStore):
                     retcol="event_id",
                 )
 
-                state = self._get_events_txn(txn, state_ids)
-
-                res[group] = state
+                res[group] = state_ids
 
             return res
 
-        return self.runInteraction(
+        states = yield self.runInteraction(
             "get_state_groups",
             f,
         )
 
+        @defer.inlineCallbacks
+        def c(vals):
+            vals[:] = yield self._get_events(vals, get_prev_content=False)
+
+        yield defer.gatherResults(
+            [
+                c(vals)
+                for vals in states.values()
+            ],
+            consumeErrors=True,
+        )
+
+        defer.returnValue(states)
+
     def _store_state_groups_txn(self, txn, event, context):
         if context.current_state is None:
             return
@@ -130,6 +143,12 @@ class StateStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def get_current_state(self, room_id, event_type=None, state_key=""):
+        if event_type and state_key is not None:
+            result = yield self.get_current_state_for_key(
+                room_id, event_type, state_key
+            )
+            defer.returnValue(result)
+
         def f(txn):
             sql = (
                 "SELECT event_id FROM current_state_events"
@@ -146,11 +165,29 @@ class StateStore(SQLBaseStore):
                 args = (room_id, )
 
             txn.execute(sql, args)
-            results = self.cursor_to_dict(txn)
+            results = txn.fetchall()
 
-            return self._parse_events_txn(txn, results)
+            return [r[0] for r in results]
 
-        events = yield self.runInteraction("get_current_state", f)
+        event_ids = yield self.runInteraction("get_current_state", f)
+        events = yield self._get_events(event_ids, get_prev_content=False)
+        defer.returnValue(events)
+
+    @cached(num_args=3)
+    @defer.inlineCallbacks
+    def get_current_state_for_key(self, room_id, event_type, state_key):
+        def f(txn):
+            sql = (
+                "SELECT event_id FROM current_state_events"
+                " WHERE room_id = ? AND type = ? AND state_key = ?"
+            )
+
+            args = (room_id, event_type, state_key)
+            txn.execute(sql, args)
+            results = txn.fetchall()
+            return [r[0] for r in results]
+        event_ids = yield self.runInteraction("get_current_state_for_key", f)
+        events = yield self._get_events(event_ids, get_prev_content=False)
         defer.returnValue(events)
 
 
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 8045e17fd7..af45fc5619 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -224,7 +224,7 @@ class StreamStore(SQLBaseStore):
 
         return self.runInteraction("get_room_events_stream", f)
 
-    @log_function
+    @defer.inlineCallbacks
     def paginate_room_events(self, room_id, from_key, to_key=None,
                              direction='b', limit=-1,
                              with_feedback=False):
@@ -286,18 +286,20 @@ class StreamStore(SQLBaseStore):
                 # TODO (erikj): We should work out what to do here instead.
                 next_token = to_key if to_key else from_key
 
-            events = self._get_events_txn(
-                txn,
-                [r["event_id"] for r in rows],
-                get_prev_content=True
-            )
+            return rows, next_token,
+
+        rows, token = yield self.runInteraction("paginate_room_events", f)
 
-            self._set_before_and_after(events, rows)
+        events = yield self._get_events(
+            [r["event_id"] for r in rows],
+            get_prev_content=True
+        )
 
-            return events, next_token,
+        self._set_before_and_after(events, rows)
 
-        return self.runInteraction("paginate_room_events", f)
+        defer.returnValue((events, token))
 
+    @defer.inlineCallbacks
     def get_recent_events_for_room(self, room_id, limit, end_token,
                                    with_feedback=False, from_token=None):
         # TODO (erikj): Handle compressed feedback
@@ -349,20 +351,23 @@ class StreamStore(SQLBaseStore):
             else:
                 token = (str(end_token), str(end_token))
 
-            events = self._get_events_txn(
-                txn,
-                [r["event_id"] for r in rows],
-                get_prev_content=True
-            )
-
-            self._set_before_and_after(events, rows)
-
-            return events, token
+            return rows, token
 
-        return self.runInteraction(
+        rows, token = yield self.runInteraction(
             "get_recent_events_for_room", get_recent_events_for_room_txn
         )
 
+        logger.debug("stream before")
+        events = yield self._get_events(
+            [r["event_id"] for r in rows],
+            get_prev_content=True
+        )
+        logger.debug("stream after")
+
+        self._set_before_and_after(events, rows)
+
+        defer.returnValue((events, token))
+
     @defer.inlineCallbacks
     def get_room_events_max_id(self, direction='f'):
         token = yield self._stream_id_gen.get_max_token(self)
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index c1a16b639a..260714ccc2 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -29,6 +29,34 @@ def unwrapFirstError(failure):
     return failure.value.subFailure
 
 
+def unwrap_deferred(d):
+    """Given a deferred that we know has completed, return its value or raise
+    the failure as an exception
+    """
+    if not d.called:
+        raise RuntimeError("deferred has not finished")
+
+    res = []
+
+    def f(r):
+        res.append(r)
+        return r
+    d.addCallback(f)
+
+    if res:
+        return res[0]
+
+    def f(r):
+        res.append(r)
+        return r
+    d.addErrback(f)
+
+    if res:
+        res[0].raiseException()
+    else:
+        raise RuntimeError("deferred did not call callbacks")
+
+
 class Clock(object):
     """A small utility that obtains current time-of-day so that time may be
     mocked during unit-tests.
@@ -52,16 +80,16 @@ class Clock(object):
     def stop_looping_call(self, loop):
         loop.stop()
 
-    def call_later(self, delay, callback):
+    def call_later(self, delay, callback, *args, **kwargs):
         current_context = LoggingContext.current_context()
 
-        def wrapped_callback():
+        def wrapped_callback(*args, **kwargs):
             with PreserveLoggingContext():
                 LoggingContext.thread_local.current_context = current_context
-                callback()
+                callback(*args, **kwargs)
 
         with PreserveLoggingContext():
-            return reactor.callLater(delay, wrapped_callback)
+            return reactor.callLater(delay, wrapped_callback, *args, **kwargs)
 
     def cancel_call_later(self, timer):
         timer.cancel()
diff --git a/synapse/util/lrucache.py b/synapse/util/lrucache.py
index 96163c90f1..cacd7e45fa 100644
--- a/synapse/util/lrucache.py
+++ b/synapse/util/lrucache.py
@@ -20,7 +20,6 @@ import threading
 
 class LruCache(object):
     """Least-recently-used cache."""
-    # TODO(mjark) Add mutex for linked list for thread safety.
     def __init__(self, max_size):
         cache = {}
         list_root = []
@@ -106,6 +105,12 @@ class LruCache(object):
                 return default
 
         @synchronized
+        def cache_clear():
+            list_root[NEXT] = list_root
+            list_root[PREV] = list_root
+            cache.clear()
+
+        @synchronized
         def cache_len():
             return len(cache)
 
@@ -120,6 +125,7 @@ class LruCache(object):
         self.pop = cache_pop
         self.len = cache_len
         self.contains = cache_contains
+        self.clear = cache_clear
 
     def __getitem__(self, key):
         result = self.get(key, self.sentinel)
diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py
index 62149d6902..8ce8dc0a87 100644
--- a/tests/appservice/test_appservice.py
+++ b/tests/appservice/test_appservice.py
@@ -217,18 +217,9 @@ class ApplicationServiceTestCase(unittest.TestCase):
             _regex("@irc_.*")
         )
         join_list = [
-            Mock(
-                type="m.room.member", room_id="!foo:bar", sender="@alice:here",
-                state_key="@alice:here"
-            ),
-            Mock(
-                type="m.room.member", room_id="!foo:bar", sender="@irc_fo:here",
-                state_key="@irc_fo:here"  # AS user
-            ),
-            Mock(
-                type="m.room.member", room_id="!foo:bar", sender="@bob:here",
-                state_key="@bob:here"
-            )
+            "@alice:here",
+            "@irc_fo:here",  # AS user
+            "@bob:here",
         ]
 
         self.event.sender = "@xmpp_foobar:matrix.org"
diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py
index 8f3df92418..523b30cf8a 100644
--- a/tests/rest/client/v1/test_presence.py
+++ b/tests/rest/client/v1/test_presence.py
@@ -29,6 +29,8 @@ from synapse.rest.client.v1 import events
 from synapse.types import UserID
 from synapse.util.async import run_on_reactor
 
+from collections import namedtuple
+
 
 OFFLINE = PresenceState.OFFLINE
 UNAVAILABLE = PresenceState.UNAVAILABLE
@@ -305,7 +307,10 @@ class PresenceEventStreamTestCase(unittest.TestCase):
             return_value=defer.succeed(None)
         )
         self.mock_datastore.get_rooms_for_user = (
-            lambda u: get_rooms_for_user(UserID.from_string(u))
+            lambda u: [
+                namedtuple("Room", "room_id")(r)
+                for r in get_rooms_for_user(UserID.from_string(u))
+            ]
         )
 
         def get_profile_displayname(user_id):
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index 8c348ecc95..8573f18b55 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -33,8 +33,9 @@ class SQLBaseStoreTestCase(unittest.TestCase):
     def setUp(self):
         self.db_pool = Mock(spec=["runInteraction"])
         self.mock_txn = Mock()
-        self.mock_conn = Mock(spec_set=["cursor"])
+        self.mock_conn = Mock(spec_set=["cursor", "rollback", "commit"])
         self.mock_conn.cursor.return_value = self.mock_txn
+        self.mock_conn.rollback.return_value = None
         # Our fake runInteraction just runs synchronously inline
 
         def runInteraction(func, *args, **kwargs):