diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index a5b6a61195..5fb342c097 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -669,16 +669,22 @@ class FederationClient(FederationBase):
logger.debug("Got content: %s", content)
+ logger.info("send_join content: %d", len(content))
+
state = [
event_from_pdu_json(p, room_version, outlier=True)
for p in content.get("state", [])
]
+ logger.info("Parsed auth chain: %d", len(state))
+
auth_chain = [
event_from_pdu_json(p, room_version, outlier=True)
for p in content.get("auth_chain", [])
]
+ logger.info("Parsed auth chain: %d", len(auth_chain))
+
pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
create_event = None
@@ -711,6 +717,8 @@ class FederationClient(FederationBase):
room_version=room_version,
)
+ logger.info("_check_sigs_and_hash_and_fetch done")
+
valid_pdus_map = {p.event_id: p for p in valid_pdus}
# NB: We *need* to copy to ensure that we don't have multiple
@@ -744,6 +752,8 @@ class FederationClient(FederationBase):
% (auth_chain_create_events,)
)
+ logger.info("Returning from send_join")
+
return {
"state": signed_state,
"auth_chain": signed_auth,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 9d867aaf4d..69055a14b3 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1452,7 +1452,7 @@ class FederationHandler(BaseHandler):
# room stuff after join currently doesn't work on workers.
assert self.config.worker.worker_app is None
- logger.debug("Joining %s to %s", joinee, room_id)
+ logger.info("Joining %s to %s", joinee, room_id)
origin, event, room_version_obj = await self._make_and_verify_event(
target_hosts,
@@ -1463,6 +1463,8 @@ class FederationHandler(BaseHandler):
params={"ver": KNOWN_ROOM_VERSIONS},
)
+ logger.info("make_join done from %s", origin)
+
# This shouldn't happen, because the RoomMemberHandler has a
# linearizer lock which only allows one operation per user per room
# at a time - so this is just paranoia.
@@ -1482,10 +1484,13 @@ class FederationHandler(BaseHandler):
except ValueError:
pass
+ logger.info("Sending join")
ret = await self.federation_client.send_join(
host_list, event, room_version_obj
)
+ logger.info("send join done")
+
origin = ret["origin"]
state = ret["state"]
auth_chain = ret["auth_chain"]
@@ -1510,10 +1515,14 @@ class FederationHandler(BaseHandler):
room_version=room_version_obj,
)
+ logger.info("Persisting auth true")
+
max_stream_id = await self._persist_auth_tree(
origin, room_id, auth_chain, state, event, room_version_obj
)
+ logger.info("Persisted auth true")
+
# We wait here until this instance has seen the events come down
# replication (if we're using replication) as the below uses caches.
await self._replication.wait_for_stream_position(
@@ -2166,6 +2175,8 @@ class FederationHandler(BaseHandler):
ctx = await self.state_handler.compute_event_context(e)
events_to_context[e.event_id] = ctx
+ logger.info("Computed contexts")
+
event_map = {
e.event_id: e for e in itertools.chain(auth_events, state, [event])
}
@@ -2207,6 +2218,8 @@ class FederationHandler(BaseHandler):
else:
logger.info("Failed to find auth event %r", e_id)
+ logger.info("Got missing events")
+
for e in itertools.chain(auth_events, state, [event]):
auth_for_e = {
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
@@ -2231,6 +2244,8 @@ class FederationHandler(BaseHandler):
raise
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
+ logger.info("Authed events")
+
await self.persist_events_and_notify(
room_id,
[
@@ -2239,10 +2254,14 @@ class FederationHandler(BaseHandler):
],
)
+ logger.info("Persisted events")
+
new_event_context = await self.state_handler.compute_event_context(
event, old_state=state
)
+ logger.info("Computed context")
+
return await self.persist_events_and_notify(
room_id, [(event, new_event_context)]
)
|