summary refs log tree commit diff
diff options
context:
space:
mode:
authorMark Haines <mark.haines@matrix.org>2016-03-30 15:58:20 +0100
committerMark Haines <mark.haines@matrix.org>2016-03-30 16:01:58 +0100
commit31a9eceda5cf00b0482baf1c8bf1e138c823f621 (patch)
treeb41ec3d2ee01f6cc896418b523a2a9510ecd30bd
parentMerge pull request #674 from matrix-org/markjh/replicate_state (diff)
downloadsynapse-31a9eceda5cf00b0482baf1c8bf1e138c823f621.tar.xz
Add a replication stream for state groups
-rw-r--r--synapse/replication/resource.py36
-rw-r--r--synapse/storage/events.py6
-rw-r--r--synapse/storage/state.py30
-rw-r--r--tests/replication/test_resource.py30
4 files changed, 91 insertions, 11 deletions
diff --git a/synapse/replication/resource.py b/synapse/replication/resource.py
index 8c1ae0fbc7..096a79a7a4 100644
--- a/synapse/replication/resource.py
+++ b/synapse/replication/resource.py
@@ -38,6 +38,7 @@ STREAM_NAMES = (
     ("backfill",),
     ("push_rules",),
     ("pushers",),
+    ("state",),
 )
 
 
@@ -123,6 +124,7 @@ class ReplicationResource(Resource):
         backfill_token = yield self.store.get_current_backfill_token()
         push_rules_token, room_stream_token = self.store.get_push_rules_stream_token()
         pushers_token = self.store.get_pushers_stream_token()
+        state_token = self.store.get_state_stream_token()
 
         defer.returnValue(_ReplicationToken(
             room_stream_token,
@@ -133,6 +135,7 @@ class ReplicationResource(Resource):
             backfill_token,
             push_rules_token,
             pushers_token,
+            state_token,
         ))
 
     @request_handler
@@ -156,6 +159,7 @@ class ReplicationResource(Resource):
             yield self.receipts(writer, current_token, limit)
             yield self.push_rules(writer, current_token, limit)
             yield self.pushers(writer, current_token, limit)
+            yield self.state(writer, current_token, limit)
             self.streams(writer, current_token)
 
             logger.info("Replicated %d rows", writer.total)
@@ -205,12 +209,12 @@ class ReplicationResource(Resource):
                 current_token.backfill, current_token.events,
                 limit
             )
-            writer.write_header_and_rows(
-                "events", events_rows, ("position", "internal", "json")
-            )
-            writer.write_header_and_rows(
-                "backfill", backfill_rows, ("position", "internal", "json")
-            )
+            writer.write_header_and_rows("events", events_rows, (
+                "position", "internal", "json", "state_group"
+            ))
+            writer.write_header_and_rows("backfill", backfill_rows, (
+                "position", "internal", "json", "state_group"
+            ))
 
     @defer.inlineCallbacks
     def presence(self, writer, current_token):
@@ -320,6 +324,24 @@ class ReplicationResource(Resource):
                 "position", "user_id", "app_id", "pushkey"
             ))
 
+    @defer.inlineCallbacks
+    def state(self, writer, current_token, limit):
+        current_position = current_token.state
+
+        state = parse_integer(writer.request, "state")
+        if state is not None:
+            state_groups, state_group_state = (
+                yield self.store.get_all_new_state_groups(
+                    state, current_position, limit
+                )
+            )
+            writer.write_header_and_rows("state_groups", state_groups, (
+                "position", "room_id", "event_id"
+            ))
+            writer.write_header_and_rows("state_group_state", state_group_state, (
+                "position", "type", "state_key", "event_id"
+            ))
+
 
 class _Writer(object):
     """Writes the streams as a JSON object as the response to the request"""
@@ -350,7 +372,7 @@ class _Writer(object):
 
 class _ReplicationToken(collections.namedtuple("_ReplicationToken", (
     "events", "presence", "typing", "receipts", "account_data", "backfill",
-    "push_rules", "pushers"
+    "push_rules", "pushers", "state"
 ))):
     __slots__ = []
 
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 5f675ab09b..a4b8995496 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -1097,10 +1097,12 @@ class EventsStore(SQLBaseStore):
         new events or as backfilled events"""
         def get_all_new_events_txn(txn):
             sql = (
-                "SELECT e.stream_ordering, ej.internal_metadata, ej.json"
+                "SELECT e.stream_ordering, ej.internal_metadata, ej.json, eg.state_group"
                 " FROM events as e"
                 " JOIN event_json as ej"
                 " ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
+                " LEFT JOIN event_to_state_groups as eg"
+                " ON e.event_id = eg.event_id"
                 " WHERE ? < e.stream_ordering AND e.stream_ordering <= ?"
                 " ORDER BY e.stream_ordering ASC"
                 " LIMIT ?"
@@ -1116,6 +1118,8 @@ class EventsStore(SQLBaseStore):
                 " FROM events as e"
                 " JOIN event_json as ej"
                 " ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
+                " LEFT JOIN event_to_state_groups as eg"
+                " ON e.event_id = eg.event_id"
                 " WHERE ? > e.stream_ordering AND e.stream_ordering >= ?"
                 " ORDER BY e.stream_ordering DESC"
                 " LIMIT ?"
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 30d1060ecd..7fc9a4f264 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -429,3 +429,33 @@ class StateStore(SQLBaseStore):
             }
 
         defer.returnValue(results)
+
+    def get_all_new_state_groups(self, last_id, current_id, limit):
+        def get_all_new_state_groups_txn(txn):
+            sql = (
+                "SELECT id, room_id, event_id FROM state_groups"
+                " WHERE ? < id AND id <= ? ORDER BY id LIMIT ?"
+            )
+            txn.execute(sql, (last_id, current_id, limit))
+            groups = txn.fetchall()
+
+            if not groups:
+                return ([], [])
+
+            lower_bound = groups[0][0]
+            upper_bound = groups[-1][0]
+            sql = (
+                "SELECT state_group, type, state_key, event_id"
+                " FROM state_groups_state"
+                " WHERE ? <= state_group AND state_group <= ?"
+            )
+
+            txn.execute(sql, (lower_bound, upper_bound))
+            state_group_state = txn.fetchall()
+            return (groups, state_group_state)
+        return self.runInteraction(
+            "get_all_new_state_groups", get_all_new_state_groups_txn
+        )
+
+    def get_state_stream_token(self):
+        return self._state_groups_id_gen.get_max_token()
diff --git a/tests/replication/test_resource.py b/tests/replication/test_resource.py
index f4b5fb3328..b1dd7b4a74 100644
--- a/tests/replication/test_resource.py
+++ b/tests/replication/test_resource.py
@@ -58,15 +58,21 @@ class ReplicationResourceCase(unittest.TestCase):
         self.assertEquals(body, {})
 
     @defer.inlineCallbacks
-    def test_events(self):
-        get = self.get(events="-1", timeout="0")
+    def test_events_and_state(self):
+        get = self.get(events="-1", state="-1", timeout="0")
         yield self.hs.get_handlers().room_creation_handler.create_room(
             Requester(self.user, "", False), {}
         )
         code, body = yield get
         self.assertEquals(code, 200)
         self.assertEquals(body["events"]["field_names"], [
-            "position", "internal", "json"
+            "position", "internal", "json", "state_group"
+        ])
+        self.assertEquals(body["state_groups"]["field_names"], [
+            "position", "room_id", "event_id"
+        ])
+        self.assertEquals(body["state_group_state"]["field_names"], [
+            "position", "type", "state_key", "event_id"
         ])
 
     @defer.inlineCallbacks
@@ -132,6 +138,7 @@ class ReplicationResourceCase(unittest.TestCase):
     test_timeout_backfill = _test_timeout("backfill")
     test_timeout_push_rules = _test_timeout("push_rules")
     test_timeout_pushers = _test_timeout("pushers")
+    test_timeout_state = _test_timeout("state")
 
     @defer.inlineCallbacks
     def send_text_message(self, room_id, message):
@@ -182,4 +189,21 @@ class ReplicationResourceCase(unittest.TestCase):
         )
         response_body = json.loads(response_json)
 
+        if response_code == 200:
+            self.check_response(response_body)
+
         defer.returnValue((response_code, response_body))
+
+    def check_response(self, response_body):
+        for name, stream in response_body.items():
+            self.assertIn("field_names", stream)
+            field_names = stream["field_names"]
+            self.assertIn("rows", stream)
+            self.assertTrue(stream["rows"])
+            for row in stream["rows"]:
+                self.assertEquals(
+                    len(row), len(field_names),
+                    "%s: len(row = %r) == len(field_names = %r)" % (
+                        name, row, field_names
+                    )
+                )