diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py
index c2cfb29866..ea3ca57957 100644
--- a/tests/rest/client/sliding_sync/test_sliding_sync.py
+++ b/tests/rest/client/sliding_sync/test_sliding_sync.py
@@ -15,7 +15,7 @@ import logging
from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple
from unittest.mock import AsyncMock
-from parameterized import parameterized_class
+from parameterized import parameterized, parameterized_class
from typing_extensions import assert_never
from twisted.test.proto_helpers import MemoryReactor
@@ -23,12 +23,16 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import (
AccountDataTypes,
+ EventContentFields,
EventTypes,
+ JoinRules,
Membership,
+ RoomTypes,
)
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict
from synapse.events.snapshot import EventContext
+from synapse.handlers.sliding_sync import StateValues
from synapse.rest.client import account_data, devices, login, receipts, room, sync
from synapse.server import HomeServer
from synapse.types import (
@@ -43,6 +47,7 @@ from synapse.util.stringutils import random_string
from tests import unittest
from tests.server import TimedOutException
+from tests.test_utils.event_injection import create_event
logger = logging.getLogger(__name__)
@@ -421,6 +426,9 @@ class SlidingSyncTestCase(SlidingSyncBase):
self.event_sources = hs.get_event_sources()
self.storage_controllers = hs.get_storage_controllers()
self.account_data_handler = hs.get_account_data_handler()
+ persistence = self.hs.get_storage_controllers().persistence
+ assert persistence is not None
+ self.persistence = persistence
super().prepare(reactor, clock, hs)
@@ -988,3 +996,472 @@ class SlidingSyncTestCase(SlidingSyncBase):
# Make the Sliding Sync request
response_body, _ = self.do_sync(sync_body, tok=user1_tok)
self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+
+ def test_state_reset_room_comes_down_incremental_sync(self) -> None:
+ """Test that a room that we were state reset out of comes down
+ incremental sync"""
+
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(
+ user2_id,
+ is_public=True,
+ tok=user2_tok,
+ extra_content={
+ "name": "my super room",
+ },
+ )
+
+ # Create an event for us to point back to for the state reset
+ event_response = self.helper.send(room_id1, "test", tok=user2_tok)
+ event_id = event_response["event_id"]
+
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ sync_body = {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [
+ # Request all state just to see what we get back when we are
+ # state reset out of the room
+ [StateValues.WILDCARD, StateValues.WILDCARD]
+ ],
+ "timeline_limit": 1,
+ }
+ }
+ }
+
+ # Make the Sliding Sync request
+ response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+ # Make sure we see room1
+ self.assertIncludes(set(response_body["rooms"].keys()), {room_id1}, exact=True)
+ self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+
+ # Trigger a state reset
+ join_rule_event, join_rule_context = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[event_id],
+ type=EventTypes.JoinRules,
+ state_key="",
+ content={"join_rule": JoinRules.INVITE},
+ sender=user2_id,
+ room_id=room_id1,
+ room_version=self.get_success(self.store.get_room_version_id(room_id1)),
+ )
+ )
+ _, join_rule_event_pos, _ = self.get_success(
+ self.persistence.persist_event(join_rule_event, join_rule_context)
+ )
+
+ # FIXME: We're manually busting the cache since
+ # https://github.com/element-hq/synapse/issues/17368 is not solved yet
+ self.store._membership_stream_cache.entity_has_changed(
+ user1_id, join_rule_event_pos.stream
+ )
+
+ # Ensure that the state reset worked and only user2 is in the room now
+ users_in_room = self.get_success(self.store.get_users_in_room(room_id1))
+ self.assertIncludes(set(users_in_room), {user2_id}, exact=True)
+
+ state_map_at_reset = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+
+ # Update the state after user1 was state reset out of the room
+ self.helper.send_state(
+ room_id1,
+ EventTypes.Name,
+ {EventContentFields.ROOM_NAME: "my super duper room"},
+ tok=user2_tok,
+ )
+
+ # Make another Sliding Sync request (incremental)
+ response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+ # Expect to see room1 because it is `newly_left` thanks to being state reset out
+ # of it since the last time we synced. We need to let the client know that
+ # something happened and that they are no longer in the room.
+ self.assertIncludes(set(response_body["rooms"].keys()), {room_id1}, exact=True)
+ # We set `initial=True` to indicate that the client should reset the state they
+ # have about the room
+ self.assertEqual(response_body["rooms"][room_id1]["initial"], True)
+ # They shouldn't see anything past the state reset
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ # We should see all the state events in the room
+ state_map_at_reset.values(),
+ exact=True,
+ )
+ # The position where the state reset happened
+ self.assertEqual(
+ response_body["rooms"][room_id1]["bump_stamp"],
+ join_rule_event_pos.stream,
+ response_body["rooms"][room_id1],
+ )
+
+ # Other non-important things. We just want to check what these are so we know
+ # what happens in a state reset scenario.
+ #
+ # Room name was set at the time of the state reset so we should still be able to
+ # see it.
+ self.assertEqual(response_body["rooms"][room_id1]["name"], "my super room")
+ # Could be set but there is no avatar for this room
+ self.assertIsNone(
+ response_body["rooms"][room_id1].get("avatar"),
+ response_body["rooms"][room_id1],
+ )
+ # Could be set but this room isn't marked as a DM
+ self.assertIsNone(
+ response_body["rooms"][room_id1].get("is_dm"),
+ response_body["rooms"][room_id1],
+ )
+ # Empty timeline because we are not in the room at all (they are all being
+ # filtered out)
+ self.assertIsNone(
+ response_body["rooms"][room_id1].get("timeline"),
+ response_body["rooms"][room_id1],
+ )
+ # `limited` since we're not providing any timeline events but there are some in
+ # the room.
+ self.assertEqual(response_body["rooms"][room_id1]["limited"], True)
+ # User is no longer in the room so they can't see this info
+ self.assertIsNone(
+ response_body["rooms"][room_id1].get("joined_count"),
+ response_body["rooms"][room_id1],
+ )
+ self.assertIsNone(
+ response_body["rooms"][room_id1].get("invited_count"),
+ response_body["rooms"][room_id1],
+ )
+
+ def test_state_reset_previously_room_comes_down_incremental_sync_with_filters(
+ self,
+ ) -> None:
+ """
+ Test that a room that we were state reset out of should always be sent down
+ regardless of the filters if it has been sent down the connection before.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Create a space room
+ space_room_id = self.helper.create_room_as(
+ user2_id,
+ tok=user2_tok,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE},
+ "name": "my super space",
+ },
+ )
+
+ # Create an event for us to point back to for the state reset
+ event_response = self.helper.send(space_room_id, "test", tok=user2_tok)
+ event_id = event_response["event_id"]
+
+ self.helper.join(space_room_id, user1_id, tok=user1_tok)
+
+ sync_body = {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [
+ # Request all state just to see what we get back when we are
+ # state reset out of the room
+ [StateValues.WILDCARD, StateValues.WILDCARD]
+ ],
+ "timeline_limit": 1,
+ "filters": {
+ "room_types": [RoomTypes.SPACE],
+ },
+ }
+ }
+ }
+
+ # Make the Sliding Sync request
+ response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+ # Make sure we see room1
+ self.assertIncludes(
+ set(response_body["rooms"].keys()), {space_room_id}, exact=True
+ )
+ self.assertEqual(response_body["rooms"][space_room_id]["initial"], True)
+
+ # Trigger a state reset
+ join_rule_event, join_rule_context = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[event_id],
+ type=EventTypes.JoinRules,
+ state_key="",
+ content={"join_rule": JoinRules.INVITE},
+ sender=user2_id,
+ room_id=space_room_id,
+ room_version=self.get_success(
+ self.store.get_room_version_id(space_room_id)
+ ),
+ )
+ )
+ _, join_rule_event_pos, _ = self.get_success(
+ self.persistence.persist_event(join_rule_event, join_rule_context)
+ )
+
+ # FIXME: We're manually busting the cache since
+ # https://github.com/element-hq/synapse/issues/17368 is not solved yet
+ self.store._membership_stream_cache.entity_has_changed(
+ user1_id, join_rule_event_pos.stream
+ )
+
+ # Ensure that the state reset worked and only user2 is in the room now
+ users_in_room = self.get_success(self.store.get_users_in_room(space_room_id))
+ self.assertIncludes(set(users_in_room), {user2_id}, exact=True)
+
+ state_map_at_reset = self.get_success(
+ self.storage_controllers.state.get_current_state(space_room_id)
+ )
+
+ # Update the state after user1 was state reset out of the room
+ self.helper.send_state(
+ space_room_id,
+ EventTypes.Name,
+ {EventContentFields.ROOM_NAME: "my super duper space"},
+ tok=user2_tok,
+ )
+
+ # User2 also leaves the room so the server is no longer participating in the room
+ # and we don't have access to current state
+ self.helper.leave(space_room_id, user2_id, tok=user2_tok)
+
+ # Make another Sliding Sync request (incremental)
+ response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+ # Expect to see room1 because it is `newly_left` thanks to being state reset out
+ # of it since the last time we synced. We need to let the client know that
+ # something happened and that they are no longer in the room.
+ self.assertIncludes(
+ set(response_body["rooms"].keys()), {space_room_id}, exact=True
+ )
+ # We set `initial=True` to indicate that the client should reset the state they
+ # have about the room
+ self.assertEqual(response_body["rooms"][space_room_id]["initial"], True)
+ # They shouldn't see anything past the state reset
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][space_room_id]["required_state"],
+ # We should see all the state events in the room
+ state_map_at_reset.values(),
+ exact=True,
+ )
+ # The position where the state reset happened
+ self.assertEqual(
+ response_body["rooms"][space_room_id]["bump_stamp"],
+ join_rule_event_pos.stream,
+ response_body["rooms"][space_room_id],
+ )
+
+ # Other non-important things. We just want to check what these are so we know
+ # what happens in a state reset scenario.
+ #
+ # Room name was set at the time of the state reset so we should still be able to
+ # see it.
+ self.assertEqual(
+ response_body["rooms"][space_room_id]["name"], "my super space"
+ )
+ # Could be set but there is no avatar for this room
+ self.assertIsNone(
+ response_body["rooms"][space_room_id].get("avatar"),
+ response_body["rooms"][space_room_id],
+ )
+ # Could be set but this room isn't marked as a DM
+ self.assertIsNone(
+ response_body["rooms"][space_room_id].get("is_dm"),
+ response_body["rooms"][space_room_id],
+ )
+ # Empty timeline because we are not in the room at all (they are all being
+ # filtered out)
+ self.assertIsNone(
+ response_body["rooms"][space_room_id].get("timeline"),
+ response_body["rooms"][space_room_id],
+ )
+ # `limited` since we're not providing any timeline events but there are some in
+ # the room.
+ self.assertEqual(response_body["rooms"][space_room_id]["limited"], True)
+ # User is no longer in the room so they can't see this info
+ self.assertIsNone(
+ response_body["rooms"][space_room_id].get("joined_count"),
+ response_body["rooms"][space_room_id],
+ )
+ self.assertIsNone(
+ response_body["rooms"][space_room_id].get("invited_count"),
+ response_body["rooms"][space_room_id],
+ )
+
+ @parameterized.expand(
+ [
+ ("server_leaves_room", True),
+ ("server_participating_in_room", False),
+ ]
+ )
+ def test_state_reset_never_room_incremental_sync_with_filters(
+ self, test_description: str, server_leaves_room: bool
+ ) -> None:
+ """
+ Test that a room that we were state reset out of should be sent down if we can
+ figure out the state or if it was sent down the connection before.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Create a space room
+ space_room_id = self.helper.create_room_as(
+ user2_id,
+ tok=user2_tok,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE},
+ "name": "my super space",
+ },
+ )
+
+ # Create another space room
+ space_room_id2 = self.helper.create_room_as(
+ user2_id,
+ tok=user2_tok,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE},
+ },
+ )
+
+ # Create an event for us to point back to for the state reset
+ event_response = self.helper.send(space_room_id, "test", tok=user2_tok)
+ event_id = event_response["event_id"]
+
+ # User1 joins the rooms
+ #
+ self.helper.join(space_room_id, user1_id, tok=user1_tok)
+ # Join space_room_id2 so that it is at the top of the list
+ self.helper.join(space_room_id2, user1_id, tok=user1_tok)
+
+ # Make a SS request for only the top room.
+ sync_body = {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 0]],
+ "required_state": [
+ # Request all state just to see what we get back when we are
+ # state reset out of the room
+ [StateValues.WILDCARD, StateValues.WILDCARD]
+ ],
+ "timeline_limit": 1,
+ "filters": {
+ "room_types": [RoomTypes.SPACE],
+ },
+ }
+ }
+ }
+
+ # Make the Sliding Sync request
+ response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+ # Make sure we only see space_room_id2
+ self.assertIncludes(
+ set(response_body["rooms"].keys()), {space_room_id2}, exact=True
+ )
+ self.assertEqual(response_body["rooms"][space_room_id2]["initial"], True)
+
+ # Just create some activity in space_room_id2 so it appears when we incremental sync again
+ self.helper.send(space_room_id2, "test", tok=user2_tok)
+
+ # Trigger a state reset
+ join_rule_event, join_rule_context = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[event_id],
+ type=EventTypes.JoinRules,
+ state_key="",
+ content={"join_rule": JoinRules.INVITE},
+ sender=user2_id,
+ room_id=space_room_id,
+ room_version=self.get_success(
+ self.store.get_room_version_id(space_room_id)
+ ),
+ )
+ )
+ _, join_rule_event_pos, _ = self.get_success(
+ self.persistence.persist_event(join_rule_event, join_rule_context)
+ )
+
+ # FIXME: We're manually busting the cache since
+ # https://github.com/element-hq/synapse/issues/17368 is not solved yet
+ self.store._membership_stream_cache.entity_has_changed(
+ user1_id, join_rule_event_pos.stream
+ )
+
+ # Ensure that the state reset worked and only user2 is in the room now
+ users_in_room = self.get_success(self.store.get_users_in_room(space_room_id))
+ self.assertIncludes(set(users_in_room), {user2_id}, exact=True)
+
+ # Update the state after user1 was state reset out of the room.
+ # This will also bump it to the top of the list.
+ self.helper.send_state(
+ space_room_id,
+ EventTypes.Name,
+ {EventContentFields.ROOM_NAME: "my super duper space"},
+ tok=user2_tok,
+ )
+
+ if server_leaves_room:
+ # User2 also leaves the room so the server is no longer participating in the room
+ # and we don't have access to current state
+ self.helper.leave(space_room_id, user2_id, tok=user2_tok)
+
+ # Make another Sliding Sync request (incremental)
+ sync_body = {
+ "lists": {
+ "foo-list": {
+ # Expand the range to include all rooms
+ "ranges": [[0, 1]],
+ "required_state": [
+ # Request all state just to see what we get back when we are
+ # state reset out of the room
+ [StateValues.WILDCARD, StateValues.WILDCARD]
+ ],
+ "timeline_limit": 1,
+ "filters": {
+ "room_types": [RoomTypes.SPACE],
+ },
+ }
+ }
+ }
+ response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+ if self.use_new_tables:
+ if server_leaves_room:
+ # We still only expect to see space_room_id2 because even though we were state
+ # reset out of space_room_id, it was never sent down the connection before so we
+ # don't need to bother the client with it.
+ self.assertIncludes(
+ set(response_body["rooms"].keys()), {space_room_id2}, exact=True
+ )
+ else:
+ # Both rooms show up because we can figure out the state for the
+ # `filters.room_types` if someone is still in the room (we look at the
+ # current state because `room_type` never changes).
+ self.assertIncludes(
+ set(response_body["rooms"].keys()),
+ {space_room_id, space_room_id2},
+ exact=True,
+ )
+ else:
+ # Both rooms show up because we can actually take the time to figure out the
+ # state for the `filters.room_types` in the fallback path (we look at
+ # historical state for `LEAVE` membership).
+ self.assertIncludes(
+ set(response_body["rooms"].keys()),
+ {space_room_id, space_room_id2},
+ exact=True,
+ )
|