diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py
index d0bbc1c803..621f46fff8 100644
--- a/tests/storage/test_sliding_sync_tables.py
+++ b/tests/storage/test_sliding_sync_tables.py
@@ -38,6 +38,7 @@ from synapse.storage.databases.main.events_bg_updates import (
_resolve_stale_data_in_sliding_sync_joined_rooms_table,
_resolve_stale_data_in_sliding_sync_membership_snapshots_table,
)
+from synapse.types import create_requester
from synapse.util import Clock
from tests.test_utils.event_injection import create_event
@@ -925,6 +926,128 @@ class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase):
user2_snapshot,
)
+ @parameterized.expand(
+ # Test both an insert an upsert into the
+ # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` to exercise
+ # more possibilities of things going wrong.
+ [
+ ("insert", True),
+ ("upsert", False),
+ ]
+ )
+ def test_joined_room_outlier_and_deoutlier(
+ self, description: str, should_insert: bool
+ ) -> None:
+ """
+ This is a regression test.
+
+ This is to simulate the case where an event is first persisted as an outlier
+ (like a remote invite) and then later persisted again to de-outlier it. The
+ first the time, the `outlier` is persisted with one `stream_ordering` but when
+ persisted again and de-outliered, it is assigned a different `stream_ordering`
+ that won't end up being used. Since we call
+ `_calculate_sliding_sync_table_changes()` before `_update_outliers_txn()` which
+ fixes this discrepancy (always use the `stream_ordering` from the first time it
+ was persisted), make sure we're not using an unreliable `stream_ordering` values
+ that will cause `FOREIGN KEY constraint failed` in the
+ `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables.
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_version = RoomVersions.V10
+ room_id = self.helper.create_room_as(
+ user2_id, tok=user2_tok, room_version=room_version.identifier
+ )
+
+ if should_insert:
+ # Clear these out so we always insert
+ self.get_success(
+ self.store.db_pool.simple_delete(
+ table="sliding_sync_joined_rooms",
+ keyvalues={"room_id": room_id},
+ desc="TODO",
+ )
+ )
+ self.get_success(
+ self.store.db_pool.simple_delete(
+ table="sliding_sync_membership_snapshots",
+ keyvalues={"room_id": room_id},
+ desc="TODO",
+ )
+ )
+
+ # Create a membership event (which triggers an insert into
+ # `sliding_sync_membership_snapshots`)
+ membership_event_dict = {
+ "type": EventTypes.Member,
+ "state_key": user1_id,
+ "sender": user1_id,
+ "room_id": room_id,
+ "content": {EventContentFields.MEMBERSHIP: Membership.JOIN},
+ }
+ # Create a relevant state event (which triggers an insert into
+ # `sliding_sync_joined_rooms`)
+ state_event_dict = {
+ "type": EventTypes.Name,
+ "state_key": "",
+ "sender": user2_id,
+ "room_id": room_id,
+ "content": {EventContentFields.ROOM_NAME: "my super room"},
+ }
+ event_dicts_to_persist = [
+ membership_event_dict,
+ state_event_dict,
+ ]
+
+ for event_dict in event_dicts_to_persist:
+ events_to_persist = []
+
+ # Create the events as an outliers
+ (
+ event,
+ unpersisted_context,
+ ) = self.get_success(
+ self.hs.get_event_creation_handler().create_event(
+ requester=create_requester(user1_id),
+ event_dict=event_dict,
+ outlier=True,
+ )
+ )
+ # FIXME: Should we use an `EventContext.for_outlier(...)` here?
+ # Doesn't seem to matter for this test.
+ context = self.get_success(unpersisted_context.persist(event))
+ events_to_persist.append((event, context))
+
+ # Create the event again but as an non-outlier. This will de-outlier the event
+ # when we persist it.
+ (
+ event,
+ unpersisted_context,
+ ) = self.get_success(
+ self.hs.get_event_creation_handler().create_event(
+ requester=create_requester(user1_id),
+ event_dict=event_dict,
+ outlier=False,
+ )
+ )
+ context = self.get_success(unpersisted_context.persist(event))
+ events_to_persist.append((event, context))
+
+ persist_controller = self.hs.get_storage_controllers().persistence
+ assert persist_controller is not None
+ for event, context in events_to_persist:
+ self.get_success(
+ persist_controller.persist_event(
+ event,
+ context,
+ )
+ )
+
+ # We're just testing that it does not explode
+
def test_joined_room_meta_state_reset(self) -> None:
"""
Test that a state reset on the room name is reflected in the
|