summary refs log tree commit diff
path: root/synapse/handlers/pagination.py
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--synapse/handlers/pagination.py63
1 files changed, 40 insertions, 23 deletions
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py

index 6fd7afa280..365c9cabcb 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py
@@ -507,15 +507,17 @@ class PaginationHandler: # Initially fetch the events from the database. With any luck, we can return # these without blocking on backfill (handled below). - events, next_key = ( - await self.store.paginate_room_events_by_topological_ordering( - room_id=room_id, - from_key=from_token.room_key, - to_key=to_room_key, - direction=pagin_config.direction, - limit=pagin_config.limit, - event_filter=event_filter, - ) + ( + events, + next_key, + limited, + ) = await self.store.paginate_room_events_by_topological_ordering( + room_id=room_id, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, + event_filter=event_filter, ) if pagin_config.direction == Direction.BACKWARDS: @@ -575,25 +577,31 @@ class PaginationHandler: or missing_too_many_events or not_enough_events_to_fill_response ): - did_backfill = await self.hs.get_federation_handler().maybe_backfill( + # Historical Note: There used to be a check here for if backfill was + # successful or not + await self.hs.get_federation_handler().maybe_backfill( room_id, curr_topo, limit=pagin_config.limit, ) - # If we did backfill something, refetch the events from the database to - # catch anything new that might have been added since we last fetched. - if did_backfill: - events, next_key = ( - await self.store.paginate_room_events_by_topological_ordering( - room_id=room_id, - from_key=from_token.room_key, - to_key=to_room_key, - direction=pagin_config.direction, - limit=pagin_config.limit, - event_filter=event_filter, - ) - ) + # Regardless if we backfilled or not, another worker or even a + # simultaneous request may have backfilled for us while we were held + # behind the linearizer. This should not have too much additional + # database load as it will only be triggered if a backfill *might* have + # been needed + ( + events, + next_key, + limited, + ) = await self.store.paginate_room_events_by_topological_ordering( + room_id=room_id, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, + event_filter=event_filter, + ) else: # Otherwise, we can backfill in the background for eventual # consistency's sake but we don't need to block the client waiting @@ -608,6 +616,15 @@ class PaginationHandler: next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key) + # We might have hit some internal filtering first, for example rejected + # events. Ensure we return a pagination token then. + if not events and limited: + return { + "chunk": [], + "start": await from_token.to_string(self.store), + "end": await next_token.to_string(self.store), + } + # if no events are returned from pagination, that implies # we have reached the end of the available events. # In that case we do not return end, to tell the client