diff --git a/synapse/replication/resource.py b/synapse/replication/resource.py
index 8c1ae0fbc7..096a79a7a4 100644
--- a/synapse/replication/resource.py
+++ b/synapse/replication/resource.py
@@ -38,6 +38,7 @@ STREAM_NAMES = (
("backfill",),
("push_rules",),
("pushers",),
+ ("state",),
)
@@ -123,6 +124,7 @@ class ReplicationResource(Resource):
backfill_token = yield self.store.get_current_backfill_token()
push_rules_token, room_stream_token = self.store.get_push_rules_stream_token()
pushers_token = self.store.get_pushers_stream_token()
+ state_token = self.store.get_state_stream_token()
defer.returnValue(_ReplicationToken(
room_stream_token,
@@ -133,6 +135,7 @@ class ReplicationResource(Resource):
backfill_token,
push_rules_token,
pushers_token,
+ state_token,
))
@request_handler
@@ -156,6 +159,7 @@ class ReplicationResource(Resource):
yield self.receipts(writer, current_token, limit)
yield self.push_rules(writer, current_token, limit)
yield self.pushers(writer, current_token, limit)
+ yield self.state(writer, current_token, limit)
self.streams(writer, current_token)
logger.info("Replicated %d rows", writer.total)
@@ -205,12 +209,12 @@ class ReplicationResource(Resource):
current_token.backfill, current_token.events,
limit
)
- writer.write_header_and_rows(
- "events", events_rows, ("position", "internal", "json")
- )
- writer.write_header_and_rows(
- "backfill", backfill_rows, ("position", "internal", "json")
- )
+ writer.write_header_and_rows("events", events_rows, (
+ "position", "internal", "json", "state_group"
+ ))
+ writer.write_header_and_rows("backfill", backfill_rows, (
+ "position", "internal", "json", "state_group"
+ ))
@defer.inlineCallbacks
def presence(self, writer, current_token):
@@ -320,6 +324,24 @@ class ReplicationResource(Resource):
"position", "user_id", "app_id", "pushkey"
))
+ @defer.inlineCallbacks
+ def state(self, writer, current_token, limit):
+ current_position = current_token.state
+
+ state = parse_integer(writer.request, "state")
+ if state is not None:
+ state_groups, state_group_state = (
+ yield self.store.get_all_new_state_groups(
+ state, current_position, limit
+ )
+ )
+ writer.write_header_and_rows("state_groups", state_groups, (
+ "position", "room_id", "event_id"
+ ))
+ writer.write_header_and_rows("state_group_state", state_group_state, (
+ "position", "type", "state_key", "event_id"
+ ))
+
class _Writer(object):
"""Writes the streams as a JSON object as the response to the request"""
@@ -350,7 +372,7 @@ class _Writer(object):
class _ReplicationToken(collections.namedtuple("_ReplicationToken", (
"events", "presence", "typing", "receipts", "account_data", "backfill",
- "push_rules", "pushers"
+ "push_rules", "pushers", "state"
))):
__slots__ = []
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 5f675ab09b..a4b8995496 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -1097,10 +1097,12 @@ class EventsStore(SQLBaseStore):
new events or as backfilled events"""
def get_all_new_events_txn(txn):
sql = (
- "SELECT e.stream_ordering, ej.internal_metadata, ej.json"
+ "SELECT e.stream_ordering, ej.internal_metadata, ej.json, eg.state_group"
" FROM events as e"
" JOIN event_json as ej"
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
+ " LEFT JOIN event_to_state_groups as eg"
+ " ON e.event_id = eg.event_id"
" WHERE ? < e.stream_ordering AND e.stream_ordering <= ?"
" ORDER BY e.stream_ordering ASC"
" LIMIT ?"
@@ -1116,6 +1118,8 @@ class EventsStore(SQLBaseStore):
" FROM events as e"
" JOIN event_json as ej"
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
+ " LEFT JOIN event_to_state_groups as eg"
+ " ON e.event_id = eg.event_id"
" WHERE ? > e.stream_ordering AND e.stream_ordering >= ?"
" ORDER BY e.stream_ordering DESC"
" LIMIT ?"
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 30d1060ecd..7fc9a4f264 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -429,3 +429,33 @@ class StateStore(SQLBaseStore):
}
defer.returnValue(results)
+
+ def get_all_new_state_groups(self, last_id, current_id, limit):
+ def get_all_new_state_groups_txn(txn):
+ sql = (
+ "SELECT id, room_id, event_id FROM state_groups"
+ " WHERE ? < id AND id <= ? ORDER BY id LIMIT ?"
+ )
+ txn.execute(sql, (last_id, current_id, limit))
+ groups = txn.fetchall()
+
+ if not groups:
+ return ([], [])
+
+ lower_bound = groups[0][0]
+ upper_bound = groups[-1][0]
+ sql = (
+ "SELECT state_group, type, state_key, event_id"
+ " FROM state_groups_state"
+ " WHERE ? <= state_group AND state_group <= ?"
+ )
+
+ txn.execute(sql, (lower_bound, upper_bound))
+ state_group_state = txn.fetchall()
+ return (groups, state_group_state)
+ return self.runInteraction(
+ "get_all_new_state_groups", get_all_new_state_groups_txn
+ )
+
+ def get_state_stream_token(self):
+ return self._state_groups_id_gen.get_max_token()
|