diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 1ea837d082..b8990da954 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -167,9 +167,14 @@ class FederationHandler:
oldest_events_with_depth = (
await self.store.get_oldest_event_ids_with_depth_in_room(room_id)
)
- insertion_events_to_be_backfilled = (
- await self.store.get_insertion_event_backwards_extremities_in_room(room_id)
- )
+
+ insertion_events_to_be_backfilled: Dict[str, int] = {}
+ if self.hs.config.experimental.msc2716_enabled:
+ insertion_events_to_be_backfilled = (
+ await self.store.get_insertion_event_backward_extremities_in_room(
+ room_id
+ )
+ )
logger.debug(
"_maybe_backfill_inner: extremities oldest_events_with_depth=%s insertion_events_to_be_backfilled=%s",
oldest_events_with_depth,
@@ -272,11 +277,12 @@ class FederationHandler:
]
logger.debug(
- "room_id: %s, backfill: current_depth: %s, limit: %s, max_depth: %s, extrems: %s filtered_sorted_extremeties_tuple: %s",
+ "room_id: %s, backfill: current_depth: %s, limit: %s, max_depth: %s, extrems (%d): %s filtered_sorted_extremeties_tuple: %s",
room_id,
current_depth,
limit,
max_depth,
+ len(sorted_extremeties_tuple),
sorted_extremeties_tuple,
filtered_sorted_extremeties_tuple,
)
@@ -1048,6 +1054,19 @@ class FederationHandler:
limit = min(limit, 100)
events = await self.store.get_backfill_events(room_id, pdu_list, limit)
+ logger.debug(
+ "on_backfill_request: backfill events=%s",
+ [
+ "event_id=%s,depth=%d,body=%s,prevs=%s\n"
+ % (
+ event.event_id,
+ event.depth,
+ event.content.get("body", event.type),
+ event.prev_event_ids(),
+ )
+ for event in events
+ ],
+ )
events = await filter_events_for_server(self.storage, origin, events)
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 9917613298..2a0fb9dbdf 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -514,7 +514,11 @@ class FederationEventHandler:
f"room {ev.room_id}, when we were backfilling in {room_id}"
)
- await self._process_pulled_events(dest, events, backfilled=True)
+ await self._process_pulled_events(
+ dest,
+ events,
+ backfilled=True,
+ )
async def _get_missing_events_for_pdu(
self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int
@@ -632,11 +636,24 @@ class FederationEventHandler:
backfilled: True if this is part of a historical batch of events (inhibits
notification to clients, and validation of device keys.)
"""
+ logger.debug(
+ "processing pulled backfilled=%s events=%s",
+ backfilled,
+ [
+ "event_id=%s,depth=%d,body=%s,prevs=%s\n"
+ % (
+ event.event_id,
+ event.depth,
+ event.content.get("body", event.type),
+ event.prev_event_ids(),
+ )
+ for event in events
+ ],
+ )
# We want to sort these by depth so we process them and
# tell clients about them in order.
sorted_events = sorted(events, key=lambda x: x.depth)
-
for ev in sorted_events:
with nested_logging_context(ev.event_id):
await self._process_pulled_event(origin, ev, backfilled=backfilled)
@@ -996,6 +1013,8 @@ class FederationEventHandler:
await self._run_push_actions_and_persist_event(event, context, backfilled)
+ await self._handle_marker_event(origin, event)
+
if backfilled or context.rejected:
return
@@ -1075,8 +1094,6 @@ class FederationEventHandler:
event.sender,
)
- await self._handle_marker_event(origin, event)
-
async def _resync_device(self, sender: str) -> None:
"""We have detected that the device list for the given user may be out
of sync, so we try and resync them.
@@ -1323,7 +1340,14 @@ class FederationEventHandler:
return event, context
events_to_persist = (x for x in (prep(event) for event in fetched_events) if x)
- await self.persist_events_and_notify(room_id, tuple(events_to_persist))
+ await self.persist_events_and_notify(
+ room_id,
+ tuple(events_to_persist),
+ # Mark these events backfilled as they're historic events that will
+ # eventually be backfilled. For example, missing events we fetch
+ # during backfill should be marked as backfilled as well.
+ backfilled=True,
+ )
async def _check_event_auth(
self,
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 87f671708c..d370b2c302 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -943,14 +943,24 @@ class EventCreationHandler:
else:
prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
- # we now ought to have some prev_events (unless it's a create event).
- #
- # do a quick sanity check here, rather than waiting until we've created the
+ # Do a quick sanity check here, rather than waiting until we've created the
# event and then try to auth it (which fails with a somewhat confusing "No
# create event in auth events")
- assert (
- builder.type == EventTypes.Create or len(prev_event_ids) > 0
- ), "Attempting to create an event with no prev_events"
+ room_version_obj = await self.store.get_room_version(builder.room_id)
+ if room_version_obj.msc2716_empty_prev_events:
+ # We allow events with no `prev_events` but it better have some `auth_events`
+ assert (
+ builder.type == EventTypes.Create
+ or len(prev_event_ids) > 0
+ # Allow an event to have empty list of prev_event_ids
+ # only if it has auth_event_ids.
+ or (auth_event_ids and len(auth_event_ids) > 0)
+ ), "Attempting to create an event with no prev_events or auth_event_ids"
+ else:
+ # we now ought to have some prev_events (unless it's a create event).
+ assert (
+ builder.type == EventTypes.Create or len(prev_event_ids) > 0
+ ), "Attempting to create an event with no prev_events"
event = await builder.build(
prev_event_ids=prev_event_ids,
@@ -1540,6 +1550,7 @@ class EventCreationHandler:
next_batch_id = event.content.get(
EventContentFields.MSC2716_NEXT_BATCH_ID
)
+
conflicting_insertion_event_id = None
if next_batch_id:
conflicting_insertion_event_id = (
diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py
index f880aa93d2..c4d22ad297 100644
--- a/synapse/handlers/room_batch.py
+++ b/synapse/handlers/room_batch.py
@@ -13,10 +13,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-def generate_fake_event_id() -> str:
- return "$fake_" + random_string(43)
-
-
class RoomBatchHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
@@ -184,7 +180,7 @@ class RoomBatchHandler:
# Make the state events float off on their own so we don't have a
# bunch of `@mxid joined the room` noise between each batch
- prev_event_id_for_state_chain = generate_fake_event_id()
+ prev_event_ids_for_state_chain: List[str] = []
for state_event in state_events_at_start:
assert_params_in_dict(
@@ -222,7 +218,7 @@ class RoomBatchHandler:
content=event_dict["content"],
outlier=True,
historical=True,
- prev_event_ids=[prev_event_id_for_state_chain],
+ prev_event_ids=prev_event_ids_for_state_chain,
# Make sure to use a copy of this list because we modify it
# later in the loop here. Otherwise it will be the same
# reference and also update in the event when we append later.
@@ -242,7 +238,7 @@ class RoomBatchHandler:
event_dict,
outlier=True,
historical=True,
- prev_event_ids=[prev_event_id_for_state_chain],
+ prev_event_ids=prev_event_ids_for_state_chain,
# Make sure to use a copy of this list because we modify it
# later in the loop here. Otherwise it will be the same
# reference and also update in the event when we append later.
@@ -253,7 +249,7 @@ class RoomBatchHandler:
state_event_ids_at_start.append(event_id)
auth_event_ids.append(event_id)
# Connect all the state in a floating chain
- prev_event_id_for_state_chain = event_id
+ prev_event_ids_for_state_chain = [event_id]
return state_event_ids_at_start
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index a6dbff637f..805c49ac01 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -658,7 +658,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if block_invite:
raise SynapseError(403, "Invites have been disabled on this server")
- if prev_event_ids:
+ if prev_event_ids is not None:
return await self._local_membership_update(
requester=requester,
target=target,
|