Reduce DB usage when backpaginating
In certain cases going from a stream token to a topological token can be
expensive (as it all rows in the room above the stream ordering), so we
refactor things so we do that calculation less.
1 files changed, 21 insertions, 10 deletions
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index cd3a9088cd..7b15924331 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -468,16 +468,6 @@ class PaginationHandler:
room_id, requester, allow_departed_users=True
)
- if pagin_config.direction == Direction.BACKWARDS:
- # if we're going backwards, we might need to backfill. This
- # requires that we have a topo token.
- if room_token.topological:
- curr_topo = room_token.topological
- else:
- curr_topo = await self.store.get_current_topological_token(
- room_id, room_token.stream
- )
-
# If they have left the room then clamp the token to be before
# they left the room, to save the effort of loading from the
# database.
@@ -496,6 +486,14 @@ class PaginationHandler:
)
assert leave_token.topological is not None
+ # We need the topological part of the token to compare against.
+ if room_token.topological:
+ curr_topo = room_token.topological
+ else:
+ curr_topo = await self.store.get_current_topological_token(
+ room_id, room_token.stream
+ )
+
if leave_token.topological < curr_topo:
from_token = from_token.copy_and_replace(
StreamKeyType.ROOM, leave_token
@@ -561,6 +559,19 @@ class PaginationHandler:
break
previous_event_depth = event_depth
+ # if we're going backwards, we might need to backfill. This
+ # requires that we have a topo token.
+ if room_token.topological:
+ curr_topo = room_token.topological
+ elif events:
+ # If we've already fetched some events then we can just use
+ # those to get the right depth.
+ curr_topo = events[0].depth
+ else:
+ curr_topo = await self.store.get_current_topological_token(
+ room_id, room_token.stream
+ )
+
# Backfill in the foreground if we found a big gap, have too many holes,
# or we don't have enough events to fill the limit that the client asked
# for.
|