From 43f2b67e4d2ce95b3b13d88e755afc7e3907e82b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 18 Sep 2020 14:25:52 +0100 Subject: Intelligently select extremities used in backfill. (#8349) Instead of just using the most recent extremities let's pick the ones that will give us results that the pagination request cares about, i.e. pick extremities only if they have a smaller depth than the pagination token. This is useful when we fail to backfill an extremity, as we no longer get stuck requesting that same extremity repeatedly. --- synapse/storage/databases/main/stream.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'synapse/storage/databases') diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index be6df8a6d1..db20a3db30 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -648,23 +648,20 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) return "t%d-%d" % (row["topological_ordering"], row["stream_ordering"]) - async def get_max_topological_token(self, room_id: str, stream_key: int) -> int: - """Get the max topological token in a room before the given stream + async def get_current_topological_token(self, room_id: str, stream_key: int) -> int: + """Gets the topological token in a room after or at the given stream ordering. Args: room_id stream_key - - Returns: - The maximum topological token. """ sql = ( - "SELECT coalesce(max(topological_ordering), 0) FROM events" - " WHERE room_id = ? AND stream_ordering < ?" + "SELECT coalesce(MIN(topological_ordering), 0) FROM events" + " WHERE room_id = ? AND stream_ordering >= ?" ) row = await self.db_pool.execute( - "get_max_topological_token", None, sql, room_id, stream_key + "get_current_topological_token", None, sql, room_id, stream_key ) return row[0][0] if row else 0 -- cgit 1.4.1