diff options
author | Mark Haines <mjark@negativecurvature.net> | 2016-03-01 15:08:24 +0000 |
---|---|---|
committer | Mark Haines <mjark@negativecurvature.net> | 2016-03-01 15:08:24 +0000 |
commit | a612ce66597f2d3837c468803044e0400e385fe6 (patch) | |
tree | 0b5734ff804d1b7e24ea0039783695055f49f556 /synapse/storage/events.py | |
parent | Merge pull request #613 from matrix-org/markjh/yield (diff) | |
parent | Add a /replication API for extracting the updates that happened on (diff) | |
download | synapse-a612ce66597f2d3837c468803044e0400e385fe6.tar.xz |
Merge pull request #489 from matrix-org/markjh/replication
Add a /replication API for extracting the updates that happened on synapse.
Diffstat (limited to 'synapse/storage/events.py')
-rw-r--r-- | synapse/storage/events.py | 45 |
1 files changed, 45 insertions, 0 deletions
diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 73a152bc07..60936500d8 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1064,3 +1064,48 @@ class EventsStore(SQLBaseStore): yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) defer.returnValue(result) + + def get_current_backfill_token(self): + """The current minimum token that backfilled events have reached""" + + # TODO: Fix race with the persit_event txn by using one of the + # stream id managers + return -self.min_stream_token + + def get_all_new_events(self, last_backfill_id, last_forward_id, + current_backfill_id, current_forward_id, limit): + """Get all the new events that have arrived at the server either as + new events or as backfilled events""" + def get_all_new_events_txn(txn): + sql = ( + "SELECT e.stream_ordering, ej.internal_metadata, ej.json" + " FROM events as e" + " JOIN event_json as ej" + " ON e.event_id = ej.event_id AND e.room_id = ej.room_id" + " WHERE ? < e.stream_ordering AND e.stream_ordering <= ?" + " ORDER BY e.stream_ordering ASC" + " LIMIT ?" + ) + if last_forward_id != current_forward_id: + txn.execute(sql, (last_forward_id, current_forward_id, limit)) + new_forward_events = txn.fetchall() + else: + new_forward_events = [] + + sql = ( + "SELECT -e.stream_ordering, ej.internal_metadata, ej.json" + " FROM events as e" + " JOIN event_json as ej" + " ON e.event_id = ej.event_id AND e.room_id = ej.room_id" + " WHERE ? > e.stream_ordering AND e.stream_ordering >= ?" + " ORDER BY e.stream_ordering DESC" + " LIMIT ?" + ) + if last_backfill_id != current_backfill_id: + txn.execute(sql, (-last_backfill_id, -current_backfill_id, limit)) + new_backfill_events = txn.fetchall() + else: + new_backfill_events = [] + + return (new_forward_events, new_backfill_events) + return self.runInteraction("get_all_new_events", get_all_new_events_txn) |