diff options
author | Richard van der Hoff <1389908+richvdh@users.noreply.github.com> | 2022-06-09 11:13:03 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-06-09 10:13:03 +0000 |
commit | 7c6b2204d143550d81e5bf9612c4e69fe0866b4c (patch) | |
tree | 91c657909b18c0da55dba304b5883f617ad535a1 /synapse/handlers/federation.py | |
parent | Type annotations for `test_v2` (#12985) (diff) | |
download | synapse-7c6b2204d143550d81e5bf9612c4e69fe0866b4c.tar.xz |
Faster joins: add issue links to the TODOs (#13004)
... to help us keep track of these things
Diffstat (limited to 'synapse/handlers/federation.py')
-rw-r--r-- | synapse/handlers/federation.py | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6a143440d3..5e16139626 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -545,6 +545,7 @@ class FederationHandler: if ret.partial_state: # TODO(faster_joins): roll this back if we don't manage to start the # background resync (eg process_remote_join fails) + # https://github.com/matrix-org/synapse/issues/12998 await self.store.store_partial_state_room(room_id, ret.servers_in_room) max_stream_id = await self._federation_event_handler.process_remote_join( @@ -1506,14 +1507,17 @@ class FederationHandler: # TODO(faster_joins): do we need to lock to avoid races? What happens if other # worker processes kick off a resync in parallel? Perhaps we should just elect # a single worker to do the resync. + # https://github.com/matrix-org/synapse/issues/12994 # # TODO(faster_joins): what happens if we leave the room during a resync? if we # really leave, that might mean we have difficulty getting the room state over # federation. + # https://github.com/matrix-org/synapse/issues/12802 # # TODO(faster_joins): we need some way of prioritising which homeservers in # `other_destinations` to try first, otherwise we'll spend ages trying dead # homeservers for large rooms. + # https://github.com/matrix-org/synapse/issues/12999 if initial_destination is None and len(other_destinations) == 0: raise ValueError( @@ -1543,9 +1547,11 @@ class FederationHandler: # all the events are updated, so we can update current state and # clear the lazy-loading flag. logger.info("Updating current state for %s", room_id) + # TODO(faster_joins): support workers + # https://github.com/matrix-org/synapse/issues/12994 assert ( self._storage_controllers.persistence is not None - ), "TODO(faster_joins): support for workers" + ), "worker-mode deployments not currently supported here" await self._storage_controllers.persistence.update_current_state( room_id ) @@ -1559,6 +1565,8 @@ class FederationHandler: ) # TODO(faster_joins) update room stats and user directory? + # https://github.com/matrix-org/synapse/issues/12814 + # https://github.com/matrix-org/synapse/issues/12815 return # we raced against more events arriving with partial state. Go round @@ -1566,6 +1574,8 @@ class FederationHandler: # TODO(faster_joins): there is still a race here, whereby incoming events which raced # with us will fail to be persisted after the call to `clear_partial_state_room` due to # having partial state. + # https://github.com/matrix-org/synapse/issues/12988 + # continue events = await self.store.get_events_as_list( @@ -1588,6 +1598,7 @@ class FederationHandler: # indefinitely is also not the right thing to do if we can # reach all homeservers and they all claim they don't have # the state we want. + # https://github.com/matrix-org/synapse/issues/13000 logger.error( "Failed to get state for %s at %s from %s because %s, " "giving up!", |