diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py
index fd1f5e7fd5..104d141a72 100644
--- a/tests/storage/databases/main/test_events_worker.py
+++ b/tests/storage/databases/main/test_events_worker.py
@@ -20,7 +20,7 @@
#
import json
from contextlib import contextmanager
-from typing import Generator, List, Tuple
+from typing import Generator, List, Set, Tuple
from unittest import mock
from twisted.enterprise.adbapi import ConnectionPool
@@ -295,6 +295,53 @@ class EventCacheTestCase(unittest.HomeserverTestCase):
self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1)
+class GetEventsTestCase(unittest.HomeserverTestCase):
+ """Test `get_events(...)`/`get_events_as_list(...)`"""
+
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store: EventsWorkerStore = hs.get_datastores().main
+
+ def test_get_lots_of_messages(self) -> None:
+ """Sanity check that `get_events(...)`/`get_events_as_list(...)` works"""
+ num_events = 100
+
+ user_id = self.register_user("user", "pass")
+ user_tok = self.login(user_id, "pass")
+
+ room_id = self.helper.create_room_as(user_id, tok=user_tok)
+
+ event_ids: Set[str] = set()
+ for i in range(num_events):
+ event = self.get_success(
+ inject_event(
+ self.hs,
+ room_id=room_id,
+ type="m.room.message",
+ sender=user_id,
+ content={
+ "body": f"foo{i}",
+ "msgtype": "m.text",
+ },
+ )
+ )
+ event_ids.add(event.event_id)
+
+ # Sanity check that we actually created the events
+ self.assertEqual(len(event_ids), num_events)
+
+ # This is the function under test
+ fetched_event_map = self.get_success(self.store.get_events(event_ids))
+
+ # Sanity check that we got the events back
+ self.assertIncludes(fetched_event_map.keys(), event_ids, exact=True)
+
+
class DatabaseOutageTestCase(unittest.HomeserverTestCase):
"""Test event fetching during a database outage."""
diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py
index 506d981ce6..49dc973a36 100644
--- a/tests/storage/test__base.py
+++ b/tests/storage/test__base.py
@@ -112,6 +112,24 @@ class UpdateUpsertManyTests(unittest.HomeserverTestCase):
{(1, "user1", "hello"), (2, "user2", "bleb")},
)
+ self.get_success(
+ self.storage.db_pool.runInteraction(
+ "test",
+ self.storage.db_pool.simple_upsert_many_txn,
+ self.table_name,
+ key_names=key_names,
+ key_values=[[2, "user2"]],
+ value_names=[],
+ value_values=[],
+ )
+ )
+
+ # Check results are what we expect
+ self.assertEqual(
+ set(self._dump_table_to_tuple()),
+ {(1, "user1", "hello"), (2, "user2", "bleb")},
+ )
+
def test_simple_update_many(self) -> None:
"""
simple_update_many performs many updates at once.
diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py
index 2859bcf4bd..0e52dd26ce 100644
--- a/tests/storage/test_account_data.py
+++ b/tests/storage/test_account_data.py
@@ -24,6 +24,7 @@ from typing import Iterable, Optional, Set
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import AccountDataTypes
+from synapse.api.errors import Codes, SynapseError
from synapse.server import HomeServer
from synapse.util import Clock
@@ -93,6 +94,20 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
# Check the removed user.
self.assert_ignorers("@another:remote", {self.user})
+ def test_ignoring_self_fails(self) -> None:
+ """Ensure users cannot add themselves to the ignored list."""
+
+ f = self.get_failure(
+ self.store.add_account_data_for_user(
+ self.user,
+ AccountDataTypes.IGNORED_USER_LIST,
+ {"ignored_users": {self.user: {}}},
+ ),
+ SynapseError,
+ ).value
+ self.assertEqual(f.code, 400)
+ self.assertEqual(f.errcode, Codes.INVALID_PARAM)
+
def test_caching(self) -> None:
"""Ensure that caching works properly between different users."""
# The first user ignores a user.
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index 9420d03841..11313fc933 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -349,7 +349,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
)
self.mock_txn.execute.assert_called_once_with(
- "UPDATE tablename SET colC = ?, colD = ? WHERE" " colA = ? AND colB = ?",
+ "UPDATE tablename SET colC = ?, colD = ? WHERE colA = ? AND colB = ?",
[3, 4, 1, 2],
)
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index ba01b038ab..74edca7523 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -211,9 +211,9 @@ class DeviceStoreTestCase(HomeserverTestCase):
even if that means leaving an earlier batch one EDU short of the limit.
"""
- assert self.hs.is_mine_id(
- "@user_id:test"
- ), "Test not valid: this MXID should be considered local"
+ assert self.hs.is_mine_id("@user_id:test"), (
+ "Test not valid: this MXID should be considered local"
+ )
self.get_success(
self.store.set_e2e_cross_signing_key(
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index 088f0d24f9..0500c68e9d 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -114,7 +114,7 @@ def get_all_topologically_sorted_orders(
# This is implemented by Kahn's algorithm, and forking execution each time
# we have a choice over which node to consider next.
- degree_map = {node: 0 for node in nodes}
+ degree_map = dict.fromkeys(nodes, 0)
reverse_graph: Dict[T, Set[T]] = {}
for node, edges in graph.items():
diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
index 233066bf82..b095090535 100644
--- a/tests/storage/test_event_push_actions.py
+++ b/tests/storage/test_event_push_actions.py
@@ -101,14 +101,6 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
)
self.assertEqual(2, len(http_actions))
- # Fetch unread actions for email pushers.
- email_actions = self.get_success(
- self.store.get_unread_push_actions_for_user_in_range_for_email(
- user_id, 0, 1000, 20
- )
- )
- self.assertEqual(2, len(email_actions))
-
# Send a receipt, which should clear the first action.
self.get_success(
self.store.insert_receipt(
@@ -126,12 +118,6 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
)
)
self.assertEqual(1, len(http_actions))
- email_actions = self.get_success(
- self.store.get_unread_push_actions_for_user_in_range_for_email(
- user_id, 0, 1000, 20
- )
- )
- self.assertEqual(1, len(email_actions))
# Send a thread receipt to clear the thread action.
self.get_success(
@@ -150,12 +136,6 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
)
)
self.assertEqual([], http_actions)
- email_actions = self.get_success(
- self.store.get_unread_push_actions_for_user_in_range_for_email(
- user_id, 0, 1000, 20
- )
- )
- self.assertEqual([], email_actions)
def test_count_aggregation(self) -> None:
# Create a user to receive notifications and send receipts.
diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py
index 0a7c4c9421..cb3d8e19bc 100644
--- a/tests/storage/test_events.py
+++ b/tests/storage/test_events.py
@@ -19,6 +19,7 @@
#
#
+import logging
from typing import List, Optional
from twisted.test.proto_helpers import MemoryReactor
@@ -35,6 +36,8 @@ from synapse.util import Clock
from tests.unittest import HomeserverTestCase
+logger = logging.getLogger(__name__)
+
class ExtremPruneTestCase(HomeserverTestCase):
servlets = [
diff --git a/tests/storage/test_events_bg_updates.py b/tests/storage/test_events_bg_updates.py
new file mode 100644
index 0000000000..ecdf413e3b
--- /dev/null
+++ b/tests/storage/test_events_bg_updates.py
@@ -0,0 +1,157 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2025 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+#
+
+from typing import Dict
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.constants import MAX_DEPTH
+from synapse.api.room_versions import RoomVersion, RoomVersions
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests.unittest import HomeserverTestCase
+
+
+class TestFixupMaxDepthCapBgUpdate(HomeserverTestCase):
+ """Test the background update that caps topological_ordering at MAX_DEPTH."""
+
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
+ self.store = self.hs.get_datastores().main
+ self.db_pool = self.store.db_pool
+
+ self.room_id = "!testroom:example.com"
+
+ # Reinsert the background update as it was already run at the start of
+ # the test.
+ self.get_success(
+ self.db_pool.simple_insert(
+ table="background_updates",
+ values={
+ "update_name": "fixup_max_depth_cap",
+ "progress_json": "{}",
+ },
+ )
+ )
+
+ def create_room(self, room_version: RoomVersion) -> Dict[str, int]:
+ """Create a room with a known room version and insert events.
+
+ Returns the set of event IDs that exceed MAX_DEPTH and
+ their depth.
+ """
+
+ # Create a room with a specific room version
+ self.get_success(
+ self.db_pool.simple_insert(
+ table="rooms",
+ values={
+ "room_id": self.room_id,
+ "room_version": room_version.identifier,
+ },
+ )
+ )
+
+ # Insert events with some depths exceeding MAX_DEPTH
+ event_id_to_depth: Dict[str, int] = {}
+ for depth in range(MAX_DEPTH - 5, MAX_DEPTH + 5):
+ event_id = f"$event{depth}:example.com"
+ event_id_to_depth[event_id] = depth
+
+ self.get_success(
+ self.db_pool.simple_insert(
+ table="events",
+ values={
+ "event_id": event_id,
+ "room_id": self.room_id,
+ "topological_ordering": depth,
+ "depth": depth,
+ "type": "m.test",
+ "sender": "@user:test",
+ "processed": True,
+ "outlier": False,
+ },
+ )
+ )
+
+ return event_id_to_depth
+
+ def test_fixup_max_depth_cap_bg_update(self) -> None:
+ """Test that the background update correctly caps topological_ordering
+ at MAX_DEPTH."""
+
+ event_id_to_depth = self.create_room(RoomVersions.V6)
+
+ # Run the background update
+ progress = {"room_id": ""}
+ batch_size = 10
+ num_rooms = self.get_success(
+ self.store.fixup_max_depth_cap_bg_update(progress, batch_size)
+ )
+
+ # Verify the number of rooms processed
+ self.assertEqual(num_rooms, 1)
+
+ # Verify that the topological_ordering of events has been capped at
+ # MAX_DEPTH
+ rows = self.get_success(
+ self.db_pool.simple_select_list(
+ table="events",
+ keyvalues={"room_id": self.room_id},
+ retcols=["event_id", "topological_ordering"],
+ )
+ )
+
+ for event_id, topological_ordering in rows:
+ if event_id_to_depth[event_id] >= MAX_DEPTH:
+ # Events with a depth greater than or equal to MAX_DEPTH should
+ # be capped at MAX_DEPTH.
+ self.assertEqual(topological_ordering, MAX_DEPTH)
+ else:
+ # Events with a depth less than MAX_DEPTH should remain
+ # unchanged.
+ self.assertEqual(topological_ordering, event_id_to_depth[event_id])
+
+ def test_fixup_max_depth_cap_bg_update_old_room_version(self) -> None:
+ """Test that the background update does not cap topological_ordering for
+ rooms with old room versions."""
+
+ event_id_to_depth = self.create_room(RoomVersions.V5)
+
+ # Run the background update
+ progress = {"room_id": ""}
+ batch_size = 10
+ num_rooms = self.get_success(
+ self.store.fixup_max_depth_cap_bg_update(progress, batch_size)
+ )
+
+ # Verify the number of rooms processed
+ self.assertEqual(num_rooms, 0)
+
+ # Verify that the topological_ordering of events has been capped at
+ # MAX_DEPTH
+ rows = self.get_success(
+ self.db_pool.simple_select_list(
+ table="events",
+ keyvalues={"room_id": self.room_id},
+ retcols=["event_id", "topological_ordering"],
+ )
+ )
+
+ # Assert that the topological_ordering of events has not been changed
+ # from their depth.
+ self.assertDictEqual(event_id_to_depth, dict(rows))
diff --git a/tests/storage/test_invite_rule.py b/tests/storage/test_invite_rule.py
new file mode 100644
index 0000000000..38c97ecaa3
--- /dev/null
+++ b/tests/storage/test_invite_rule.py
@@ -0,0 +1,167 @@
+from synapse.storage.invite_rule import InviteRule, InviteRulesConfig
+from synapse.types import UserID
+
+from tests import unittest
+
+regular_user = UserID.from_string("@test:example.org")
+allowed_user = UserID.from_string("@allowed:allow.example.org")
+blocked_user = UserID.from_string("@blocked:block.example.org")
+ignored_user = UserID.from_string("@ignored:ignore.example.org")
+
+
+class InviteFilterTestCase(unittest.TestCase):
+ def test_empty(self) -> None:
+ """Permit by default"""
+ config = InviteRulesConfig(None)
+ self.assertEqual(
+ config.get_invite_rule(regular_user.to_string()), InviteRule.ALLOW
+ )
+
+ def test_ignore_invalid(self) -> None:
+ """Invalid strings are ignored"""
+ config = InviteRulesConfig({"blocked_users": ["not a user"]})
+ self.assertEqual(
+ config.get_invite_rule(blocked_user.to_string()), InviteRule.ALLOW
+ )
+
+ def test_user_blocked(self) -> None:
+ """Permit all, except explicitly blocked users"""
+ config = InviteRulesConfig({"blocked_users": [blocked_user.to_string()]})
+ self.assertEqual(
+ config.get_invite_rule(blocked_user.to_string()), InviteRule.BLOCK
+ )
+ self.assertEqual(
+ config.get_invite_rule(regular_user.to_string()), InviteRule.ALLOW
+ )
+
+ def test_user_ignored(self) -> None:
+ """Permit all, except explicitly ignored users"""
+ config = InviteRulesConfig({"ignored_users": [ignored_user.to_string()]})
+ self.assertEqual(
+ config.get_invite_rule(ignored_user.to_string()), InviteRule.IGNORE
+ )
+ self.assertEqual(
+ config.get_invite_rule(regular_user.to_string()), InviteRule.ALLOW
+ )
+
+ def test_user_precedence(self) -> None:
+ """Always take allowed over ignored, ignored over blocked, and then block."""
+ config = InviteRulesConfig(
+ {
+ "allowed_users": [allowed_user.to_string()],
+ "ignored_users": [allowed_user.to_string(), ignored_user.to_string()],
+ "blocked_users": [
+ allowed_user.to_string(),
+ ignored_user.to_string(),
+ blocked_user.to_string(),
+ ],
+ }
+ )
+ self.assertEqual(
+ config.get_invite_rule(allowed_user.to_string()), InviteRule.ALLOW
+ )
+ self.assertEqual(
+ config.get_invite_rule(ignored_user.to_string()), InviteRule.IGNORE
+ )
+ self.assertEqual(
+ config.get_invite_rule(blocked_user.to_string()), InviteRule.BLOCK
+ )
+
+ def test_server_blocked(self) -> None:
+ """Block all users on the server except those allowed."""
+ user_on_same_server = UserID("blocked", allowed_user.domain)
+ config = InviteRulesConfig(
+ {
+ "allowed_users": [allowed_user.to_string()],
+ "blocked_servers": [allowed_user.domain],
+ }
+ )
+ self.assertEqual(
+ config.get_invite_rule(allowed_user.to_string()), InviteRule.ALLOW
+ )
+ self.assertEqual(
+ config.get_invite_rule(user_on_same_server.to_string()), InviteRule.BLOCK
+ )
+
+ def test_server_ignored(self) -> None:
+ """Ignore all users on the server except those allowed."""
+ user_on_same_server = UserID("ignored", allowed_user.domain)
+ config = InviteRulesConfig(
+ {
+ "allowed_users": [allowed_user.to_string()],
+ "ignored_servers": [allowed_user.domain],
+ }
+ )
+ self.assertEqual(
+ config.get_invite_rule(allowed_user.to_string()), InviteRule.ALLOW
+ )
+ self.assertEqual(
+ config.get_invite_rule(user_on_same_server.to_string()), InviteRule.IGNORE
+ )
+
+ def test_server_allow(self) -> None:
+ """Allow all from a server except explictly blocked or ignored users."""
+ blocked_user_on_same_server = UserID("blocked", allowed_user.domain)
+ ignored_user_on_same_server = UserID("ignored", allowed_user.domain)
+ allowed_user_on_same_server = UserID("another", allowed_user.domain)
+ config = InviteRulesConfig(
+ {
+ "ignored_users": [ignored_user_on_same_server.to_string()],
+ "blocked_users": [blocked_user_on_same_server.to_string()],
+ "allowed_servers": [allowed_user.to_string()],
+ }
+ )
+ self.assertEqual(
+ config.get_invite_rule(allowed_user.to_string()), InviteRule.ALLOW
+ )
+ self.assertEqual(
+ config.get_invite_rule(allowed_user_on_same_server.to_string()),
+ InviteRule.ALLOW,
+ )
+ self.assertEqual(
+ config.get_invite_rule(blocked_user_on_same_server.to_string()),
+ InviteRule.BLOCK,
+ )
+ self.assertEqual(
+ config.get_invite_rule(ignored_user_on_same_server.to_string()),
+ InviteRule.IGNORE,
+ )
+
+ def test_server_precedence(self) -> None:
+ """Always take allowed over ignored, ignored over blocked, and then block."""
+ config = InviteRulesConfig(
+ {
+ "allowed_servers": [allowed_user.domain],
+ "ignored_servers": [allowed_user.domain, ignored_user.domain],
+ "blocked_servers": [
+ allowed_user.domain,
+ ignored_user.domain,
+ blocked_user.domain,
+ ],
+ }
+ )
+ self.assertEqual(
+ config.get_invite_rule(allowed_user.to_string()), InviteRule.ALLOW
+ )
+ self.assertEqual(
+ config.get_invite_rule(ignored_user.to_string()), InviteRule.IGNORE
+ )
+ self.assertEqual(
+ config.get_invite_rule(blocked_user.to_string()), InviteRule.BLOCK
+ )
+
+ def test_server_glob(self) -> None:
+ """Test that glob patterns match"""
+ config = InviteRulesConfig({"blocked_servers": ["*.example.org"]})
+ self.assertEqual(
+ config.get_invite_rule(allowed_user.to_string()), InviteRule.BLOCK
+ )
+ self.assertEqual(
+ config.get_invite_rule(ignored_user.to_string()), InviteRule.BLOCK
+ )
+ self.assertEqual(
+ config.get_invite_rule(blocked_user.to_string()), InviteRule.BLOCK
+ )
+ self.assertEqual(
+ config.get_invite_rule(regular_user.to_string()), InviteRule.ALLOW
+ )
diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index 15ae582051..c453c8b642 100644
--- a/tests/storage/test_monthly_active_users.py
+++ b/tests/storage/test_monthly_active_users.py
@@ -32,13 +32,6 @@ from tests.unittest import default_config, override_config
FORTY_DAYS = 40 * 24 * 60 * 60
-def gen_3pids(count: int) -> List[Dict[str, Any]]:
- """Generate `count` threepids as a list."""
- return [
- {"medium": "email", "address": "user%i@matrix.org" % i} for i in range(count)
- ]
-
-
class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
def default_config(self) -> Dict[str, Any]:
config = default_config("test")
@@ -57,87 +50,6 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
# Advance the clock a bit
self.reactor.advance(FORTY_DAYS)
- @override_config({"max_mau_value": 3, "mau_limit_reserved_threepids": gen_3pids(3)})
- def test_initialise_reserved_users(self) -> None:
- threepids = self.hs.config.server.mau_limits_reserved_threepids
-
- # register three users, of which two have reserved 3pids, and a third
- # which is a support user.
- user1 = "@user1:server"
- user1_email = threepids[0]["address"]
- user2 = "@user2:server"
- user2_email = threepids[1]["address"]
- user3 = "@user3:server"
-
- self.get_success(self.store.register_user(user_id=user1))
- self.get_success(self.store.register_user(user_id=user2))
- self.get_success(
- self.store.register_user(user_id=user3, user_type=UserTypes.SUPPORT)
- )
-
- now = int(self.hs.get_clock().time_msec())
- self.get_success(
- self.store.user_add_threepid(user1, "email", user1_email, now, now)
- )
- self.get_success(
- self.store.user_add_threepid(user2, "email", user2_email, now, now)
- )
-
- # XXX why are we doing this here? this function is only run at startup
- # so it is odd to re-run it here.
- self.get_success(
- self.store.db_pool.runInteraction(
- "initialise", self.store._initialise_reserved_users, threepids
- )
- )
-
- # the number of users we expect will be counted against the mau limit
- # -1 because user3 is a support user and does not count
- user_num = len(threepids) - 1
-
- # Check the number of active users. Ensure user3 (support user) is not counted
- active_count = self.get_success(self.store.get_monthly_active_count())
- self.assertEqual(active_count, user_num)
-
- # Test each of the registered users is marked as active
- timestamp = self.get_success(self.store.user_last_seen_monthly_active(user1))
- # Mypy notes that one shouldn't compare Optional[int] to 0 with assertGreater.
- # Check that timestamp really is an int.
- assert timestamp is not None
- self.assertGreater(timestamp, 0)
- timestamp = self.get_success(self.store.user_last_seen_monthly_active(user2))
- assert timestamp is not None
- self.assertGreater(timestamp, 0)
-
- # Test that users with reserved 3pids are not removed from the MAU table
- # XXX some of this is redundant. poking things into the config shouldn't
- # work, and in any case it's not obvious what we expect to happen when
- # we advance the reactor.
- self.hs.config.server.max_mau_value = 0
- self.reactor.advance(FORTY_DAYS)
- self.hs.config.server.max_mau_value = 5
-
- self.get_success(self.store.reap_monthly_active_users())
-
- active_count = self.get_success(self.store.get_monthly_active_count())
- self.assertEqual(active_count, user_num)
-
- # Add some more users and check they are counted as active
- ru_count = 2
-
- self.get_success(self.store.upsert_monthly_active_user("@ru1:server"))
- self.get_success(self.store.upsert_monthly_active_user("@ru2:server"))
-
- active_count = self.get_success(self.store.get_monthly_active_count())
- self.assertEqual(active_count, user_num + ru_count)
-
- # now run the reaper and check that the number of active users is reduced
- # to max_mau_value
- self.get_success(self.store.reap_monthly_active_users())
-
- active_count = self.get_success(self.store.get_monthly_active_count())
- self.assertEqual(active_count, 3)
-
def test_can_insert_and_count_mau(self) -> None:
count = self.get_success(self.store.get_monthly_active_count())
self.assertEqual(count, 0)
@@ -206,49 +118,6 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
count = self.get_success(self.store.get_monthly_active_count())
self.assertEqual(count, 0)
- # Note that below says mau_limit (no s), this is the name of the config
- # value, although it gets stored on the config object as mau_limits.
- @override_config({"max_mau_value": 5, "mau_limit_reserved_threepids": gen_3pids(5)})
- def test_reap_monthly_active_users_reserved_users(self) -> None:
- """Tests that reaping correctly handles reaping where reserved users are
- present"""
- threepids = self.hs.config.server.mau_limits_reserved_threepids
- initial_users = len(threepids)
- reserved_user_number = initial_users - 1
- for i in range(initial_users):
- user = "@user%d:server" % i
- email = "user%d@matrix.org" % i
-
- self.get_success(self.store.upsert_monthly_active_user(user))
-
- # Need to ensure that the most recent entries in the
- # monthly_active_users table are reserved
- now = int(self.hs.get_clock().time_msec())
- if i != 0:
- self.get_success(
- self.store.register_user(user_id=user, password_hash=None)
- )
- self.get_success(
- self.store.user_add_threepid(user, "email", email, now, now)
- )
-
- self.get_success(
- self.store.db_pool.runInteraction(
- "initialise", self.store._initialise_reserved_users, threepids
- )
- )
-
- count = self.get_success(self.store.get_monthly_active_count())
- self.assertEqual(count, initial_users)
-
- users = self.get_success(self.store.get_registered_reserved_users())
- self.assertEqual(len(users), reserved_user_number)
-
- self.get_success(self.store.reap_monthly_active_users())
-
- count = self.get_success(self.store.get_monthly_active_count())
- self.assertEqual(count, self.hs.config.server.max_mau_value)
-
def test_populate_monthly_users_is_guest(self) -> None:
# Test that guest users are not added to mau list
user_id = "@user_id:host"
@@ -289,46 +158,6 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.store.upsert_monthly_active_user.assert_not_called()
- def test_get_reserved_real_user_account(self) -> None:
- # Test no reserved users, or reserved threepids
- users = self.get_success(self.store.get_registered_reserved_users())
- self.assertEqual(len(users), 0)
-
- # Test reserved users but no registered users
- user1 = "@user1:example.com"
- user2 = "@user2:example.com"
-
- user1_email = "user1@example.com"
- user2_email = "user2@example.com"
- threepids = [
- {"medium": "email", "address": user1_email},
- {"medium": "email", "address": user2_email},
- ]
-
- self.hs.config.server.mau_limits_reserved_threepids = threepids
- d = self.store.db_pool.runInteraction(
- "initialise", self.store._initialise_reserved_users, threepids
- )
- self.get_success(d)
-
- users = self.get_success(self.store.get_registered_reserved_users())
- self.assertEqual(len(users), 0)
-
- # Test reserved registered users
- self.get_success(self.store.register_user(user_id=user1, password_hash=None))
- self.get_success(self.store.register_user(user_id=user2, password_hash=None))
-
- now = int(self.hs.get_clock().time_msec())
- self.get_success(
- self.store.user_add_threepid(user1, "email", user1_email, now, now)
- )
- self.get_success(
- self.store.user_add_threepid(user2, "email", user2_email, now, now)
- )
-
- users = self.get_success(self.store.get_registered_reserved_users())
- self.assertEqual(len(users), len(threepids))
-
def test_support_user_not_add_to_mau_limits(self) -> None:
support_user_id = "@support:test"
diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py
index 080d5640a5..0aa14fd1f4 100644
--- a/tests/storage/test_purge.py
+++ b/tests/storage/test_purge.py
@@ -23,6 +23,8 @@ from twisted.test.proto_helpers import MemoryReactor
from synapse.api.errors import NotFoundError, SynapseError
from synapse.rest.client import room
from synapse.server import HomeServer
+from synapse.types.state import StateFilter
+from synapse.types.storage import _BackgroundUpdates
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@@ -40,6 +42,8 @@ class PurgeTests(HomeserverTestCase):
self.room_id = self.helper.create_room_as(self.user_id)
self.store = hs.get_datastores().main
+ self.state_store = hs.get_datastores().state
+ self.state_deletion_store = hs.get_datastores().state_deletion
self._storage_controllers = self.hs.get_storage_controllers()
def test_purge_history(self) -> None:
@@ -128,3 +132,328 @@ class PurgeTests(HomeserverTestCase):
self.store._invalidate_local_get_event_cache(create_event.event_id)
self.get_failure(self.store.get_event(create_event.event_id), NotFoundError)
self.get_failure(self.store.get_event(first["event_id"]), NotFoundError)
+
+ def test_purge_history_deletes_state_groups(self) -> None:
+ """Test that unreferenced state groups get cleaned up after purge"""
+
+ # Send four state changes to the room.
+ first = self.helper.send_state(
+ self.room_id, event_type="m.foo", body={"test": 1}
+ )
+ second = self.helper.send_state(
+ self.room_id, event_type="m.foo", body={"test": 2}
+ )
+ third = self.helper.send_state(
+ self.room_id, event_type="m.foo", body={"test": 3}
+ )
+ last = self.helper.send_state(
+ self.room_id, event_type="m.foo", body={"test": 4}
+ )
+
+ # Get references to the state groups
+ event_to_groups = self.get_success(
+ self.store._get_state_group_for_events(
+ [
+ first["event_id"],
+ second["event_id"],
+ third["event_id"],
+ last["event_id"],
+ ]
+ )
+ )
+
+ # Get the topological token
+ token = self.get_success(
+ self.store.get_topological_token_for_event(last["event_id"])
+ )
+ token_str = self.get_success(token.to_string(self.hs.get_datastores().main))
+
+ # Purge everything before this topological token
+ self.get_success(
+ self._storage_controllers.purge_events.purge_history(
+ self.room_id, token_str, True
+ )
+ )
+
+ # Advance so that the background jobs to delete the state groups runs
+ self.reactor.advance(
+ 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
+ )
+
+ # We expect all the state groups associated with events above, except
+ # the last one, should return no state.
+ state_groups = self.get_success(
+ self.state_store._get_state_groups_from_groups(
+ list(event_to_groups.values()), StateFilter.all()
+ )
+ )
+ first_state = state_groups[event_to_groups[first["event_id"]]]
+ second_state = state_groups[event_to_groups[second["event_id"]]]
+ third_state = state_groups[event_to_groups[third["event_id"]]]
+ last_state = state_groups[event_to_groups[last["event_id"]]]
+
+ self.assertEqual(first_state, {})
+ self.assertEqual(second_state, {})
+ self.assertEqual(third_state, {})
+ self.assertNotEqual(last_state, {})
+
+ def test_purge_unreferenced_state_group(self) -> None:
+ """Test that purging a room also gets rid of unreferenced state groups
+ it encounters during the purge.
+
+ This is important, as otherwise these unreferenced state groups get
+ "de-deltaed" during the purge process, consuming lots of disk space.
+ """
+
+ self.helper.send(self.room_id, body="test1")
+ state1 = self.helper.send_state(
+ self.room_id, "org.matrix.test", body={"number": 2}
+ )
+ state2 = self.helper.send_state(
+ self.room_id, "org.matrix.test", body={"number": 3}
+ )
+ self.helper.send(self.room_id, body="test4")
+ last = self.helper.send(self.room_id, body="test5")
+
+ # Create an unreferenced state group that has a prev group of one of the
+ # to-be-purged events.
+ prev_group = self.get_success(
+ self.store._get_state_group_for_event(state1["event_id"])
+ )
+ unreferenced_state_group = self.get_success(
+ self.state_store.store_state_group(
+ event_id=last["event_id"],
+ room_id=self.room_id,
+ prev_group=prev_group,
+ delta_ids={("org.matrix.test", ""): state2["event_id"]},
+ current_state_ids=None,
+ )
+ )
+
+ # Get the topological token
+ token = self.get_success(
+ self.store.get_topological_token_for_event(last["event_id"])
+ )
+ token_str = self.get_success(token.to_string(self.hs.get_datastores().main))
+
+ # Purge everything before this topological token
+ self.get_success(
+ self._storage_controllers.purge_events.purge_history(
+ self.room_id, token_str, True
+ )
+ )
+
+ # Advance so that the background jobs to delete the state groups runs
+ self.reactor.advance(
+ 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
+ )
+
+ # We expect that the unreferenced state group has been deleted from all tables.
+ row = self.get_success(
+ self.state_store.db_pool.simple_select_one_onecol(
+ table="state_groups",
+ keyvalues={"id": unreferenced_state_group},
+ retcol="id",
+ allow_none=True,
+ desc="test_purge_unreferenced_state_group",
+ )
+ )
+ self.assertIsNone(row)
+
+ row = self.get_success(
+ self.state_store.db_pool.simple_select_one_onecol(
+ table="state_groups_state",
+ keyvalues={"state_group": unreferenced_state_group},
+ retcol="state_group",
+ allow_none=True,
+ desc="test_purge_unreferenced_state_group",
+ )
+ )
+ self.assertIsNone(row)
+
+ row = self.get_success(
+ self.state_store.db_pool.simple_select_one_onecol(
+ table="state_group_edges",
+ keyvalues={"state_group": unreferenced_state_group},
+ retcol="state_group",
+ allow_none=True,
+ desc="test_purge_unreferenced_state_group",
+ )
+ )
+ self.assertIsNone(row)
+
+ row = self.get_success(
+ self.state_store.db_pool.simple_select_one_onecol(
+ table="state_groups_pending_deletion",
+ keyvalues={"state_group": unreferenced_state_group},
+ retcol="state_group",
+ allow_none=True,
+ desc="test_purge_unreferenced_state_group",
+ )
+ )
+ self.assertIsNone(row)
+
+ # We expect there to now only be one state group for the room, which is
+ # the state group of the last event (as the only outlier).
+ state_groups = self.get_success(
+ self.state_store.db_pool.simple_select_onecol(
+ table="state_groups",
+ keyvalues={"room_id": self.room_id},
+ retcol="id",
+ desc="test_purge_unreferenced_state_group",
+ )
+ )
+ self.assertEqual(len(state_groups), 1)
+
+ def test_clear_unreferenced_state_groups(self) -> None:
+ """Test that any unreferenced state groups are automatically cleaned up."""
+
+ self.helper.send(self.room_id, body="test1")
+ state1 = self.helper.send_state(
+ self.room_id, "org.matrix.test", body={"number": 2}
+ )
+ # Create enough state events to require multiple batches of
+ # mark_unreferenced_state_groups_for_deletion_bg_update to be run.
+ for i in range(200):
+ self.helper.send_state(self.room_id, "org.matrix.test", body={"number": i})
+ self.helper.send(self.room_id, body="test4")
+ last = self.helper.send(self.room_id, body="test5")
+
+ # Create an unreferenced state group that has no prev group.
+ unreferenced_free_state_group = self.get_success(
+ self.state_store.store_state_group(
+ event_id=last["event_id"],
+ room_id=self.room_id,
+ prev_group=None,
+ delta_ids={("org.matrix.test", ""): state1["event_id"]},
+ current_state_ids={("org.matrix.test", ""): ""},
+ )
+ )
+
+ # Create some unreferenced state groups that have a prev group of one of the
+ # existing state groups.
+ prev_group = self.get_success(
+ self.store._get_state_group_for_event(state1["event_id"])
+ )
+ unreferenced_end_state_group = self.get_success(
+ self.state_store.store_state_group(
+ event_id=last["event_id"],
+ room_id=self.room_id,
+ prev_group=prev_group,
+ delta_ids={("org.matrix.test", ""): state1["event_id"]},
+ current_state_ids=None,
+ )
+ )
+ another_unreferenced_end_state_group = self.get_success(
+ self.state_store.store_state_group(
+ event_id=last["event_id"],
+ room_id=self.room_id,
+ prev_group=unreferenced_end_state_group,
+ delta_ids={("org.matrix.test", ""): state1["event_id"]},
+ current_state_ids=None,
+ )
+ )
+
+ # Add some other unreferenced state groups which lead to a referenced state
+ # group.
+ # These state groups should not get deleted.
+ chain_state_group = self.get_success(
+ self.state_store.store_state_group(
+ event_id=last["event_id"],
+ room_id=self.room_id,
+ prev_group=None,
+ delta_ids={("org.matrix.test", ""): ""},
+ current_state_ids={("org.matrix.test", ""): ""},
+ )
+ )
+ chain_state_group_2 = self.get_success(
+ self.state_store.store_state_group(
+ event_id=last["event_id"],
+ room_id=self.room_id,
+ prev_group=chain_state_group,
+ delta_ids={("org.matrix.test", ""): ""},
+ current_state_ids=None,
+ )
+ )
+ referenced_chain_state_group = self.get_success(
+ self.state_store.store_state_group(
+ event_id=last["event_id"],
+ room_id=self.room_id,
+ prev_group=chain_state_group_2,
+ delta_ids={("org.matrix.test", ""): ""},
+ current_state_ids=None,
+ )
+ )
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "event_to_state_groups",
+ {
+ "event_id": "$new_event",
+ "state_group": referenced_chain_state_group,
+ },
+ )
+ )
+
+ # Insert and run the background update.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": _BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE,
+ "progress_json": "{}",
+ },
+ )
+ )
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Advance so that the background job to delete the state groups runs
+ self.reactor.advance(
+ 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
+ )
+
+ # We expect that the unreferenced free state group has been deleted.
+ row = self.get_success(
+ self.state_store.db_pool.simple_select_one_onecol(
+ table="state_groups",
+ keyvalues={"id": unreferenced_free_state_group},
+ retcol="id",
+ allow_none=True,
+ desc="test_purge_unreferenced_state_group",
+ )
+ )
+ self.assertIsNone(row)
+
+ # We expect that both unreferenced end state groups have been deleted.
+ row = self.get_success(
+ self.state_store.db_pool.simple_select_one_onecol(
+ table="state_groups",
+ keyvalues={"id": unreferenced_end_state_group},
+ retcol="id",
+ allow_none=True,
+ desc="test_purge_unreferenced_state_group",
+ )
+ )
+ self.assertIsNone(row)
+ row = self.get_success(
+ self.state_store.db_pool.simple_select_one_onecol(
+ table="state_groups",
+ keyvalues={"id": another_unreferenced_end_state_group},
+ retcol="id",
+ allow_none=True,
+ desc="test_purge_unreferenced_state_group",
+ )
+ )
+ self.assertIsNone(row)
+
+ # We expect there to now only be one state group for the room, which is
+ # the state group of the last event (as the only outlier).
+ state_groups = self.get_success(
+ self.state_store.db_pool.simple_select_onecol(
+ table="state_groups",
+ keyvalues={"room_id": self.room_id},
+ retcol="id",
+ desc="test_purge_unreferenced_state_group",
+ )
+ )
+ self.assertEqual(len(state_groups), 210)
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index 14e3871dc1..ebad759fd1 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -21,7 +21,6 @@
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import UserTypes
-from synapse.api.errors import ThreepidValidationError
from synapse.server import HomeServer
from synapse.types import JsonDict, UserID, UserInfo
from synapse.util import Clock
@@ -145,39 +144,6 @@ class RegistrationStoreTestCase(HomeserverTestCase):
res = self.get_success(self.store.is_support_user(SUPPORT_USER))
self.assertTrue(res)
- def test_3pid_inhibit_invalid_validation_session_error(self) -> None:
- """Tests that enabling the configuration option to inhibit 3PID errors on
- /requestToken also inhibits validation errors caused by an unknown session ID.
- """
-
- # Check that, with the config setting set to false (the default value), a
- # validation error is caused by the unknown session ID.
- e = self.get_failure(
- self.store.validate_threepid_session(
- "fake_sid",
- "fake_client_secret",
- "fake_token",
- 0,
- ),
- ThreepidValidationError,
- )
- self.assertEqual(e.value.msg, "Unknown session_id", e)
-
- # Set the config setting to true.
- self.store._ignore_unknown_session_error = True
-
- # Check that now the validation error is caused by the token not matching.
- e = self.get_failure(
- self.store.validate_threepid_session(
- "fake_sid",
- "fake_client_secret",
- "fake_token",
- 0,
- ),
- ThreepidValidationError,
- )
- self.assertEqual(e.value.msg, "Validation token not found or has expired", e)
-
class ApprovalRequiredRegistrationTestCase(HomeserverTestCase):
def default_config(self) -> JsonDict:
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 418b556108..330fea0e62 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -24,7 +24,7 @@ from typing import List, Optional, Tuple, cast
from twisted.test.proto_helpers import MemoryReactor
-from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.api.constants import EventContentFields, EventTypes, JoinRules, Membership
from synapse.api.room_versions import RoomVersions
from synapse.rest import admin
from synapse.rest.admin import register_servlets_for_client_rest_resource
@@ -38,6 +38,7 @@ from synapse.util import Clock
from tests import unittest
from tests.server import TestHomeServer
from tests.test_utils import event_injection
+from tests.test_utils.event_injection import create_event
from tests.unittest import skip_unless
logger = logging.getLogger(__name__)
@@ -54,6 +55,10 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
# We can't test the RoomMemberStore on its own without the other event
# storage logic
self.store = hs.get_datastores().main
+ self.state_handler = self.hs.get_state_handler()
+ persistence = self.hs.get_storage_controllers().persistence
+ assert persistence is not None
+ self.persistence = persistence
self.u_alice = self.register_user("alice", "pass")
self.t_alice = self.login("alice", "pass")
@@ -220,31 +225,166 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
)
def test_join_locally_forgotten_room(self) -> None:
- """Tests if a user joins a forgotten room the room is not forgotten anymore."""
- self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
- self.assertFalse(
- self.get_success(self.store.is_locally_forgotten_room(self.room))
+ """
+ Tests if a user joins a forgotten room, the room is not forgotten anymore.
+
+ Since a room can't be re-joined if everyone has left. This can only happen with
+ a room with remote users in it.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ # Create a remote room
+ creator = "@user:other"
+ room_id = "!foo:other"
+ room_version = RoomVersions.V10
+ shared_kwargs = {
+ "room_id": room_id,
+ "room_version": room_version.identifier,
+ }
+
+ create_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[],
+ type=EventTypes.Create,
+ state_key="",
+ content={
+ # The `ROOM_CREATOR` field could be removed if we used a room
+ # version > 10 (in favor of relying on `sender`)
+ EventContentFields.ROOM_CREATOR: creator,
+ EventContentFields.ROOM_VERSION: room_version.identifier,
+ },
+ sender=creator,
+ **shared_kwargs,
+ )
+ )
+ creator_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[create_tuple[0].event_id],
+ auth_event_ids=[create_tuple[0].event_id],
+ type=EventTypes.Member,
+ state_key=creator,
+ content={"membership": Membership.JOIN},
+ sender=creator,
+ **shared_kwargs,
+ )
)
- # after leaving and forget the room, it is forgotten
- self.get_success(
- event_injection.inject_member_event(
- self.hs, self.room, self.u_alice, "leave"
+ remote_events_and_contexts = [
+ create_tuple,
+ creator_tuple,
+ ]
+
+ # Ensure the local HS knows the room version
+ self.get_success(self.store.store_room(room_id, creator, False, room_version))
+
+ # Persist these events as backfilled events.
+ for event, context in remote_events_and_contexts:
+ self.get_success(
+ self.persistence.persist_event(event, context, backfilled=True)
+ )
+
+ # Now we join the local user to the room. We want to make this feel as close to
+ # the real `process_remote_join()` as possible but we'd like to avoid some of
+ # the auth checks that would be done in the real code.
+ #
+ # FIXME: The test was originally written using this less-real
+ # `persist_event(...)` shortcut but it would be nice to use the real remote join
+ # process in a `FederatingHomeserverTestCase`.
+ flawed_join_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[creator_tuple[0].event_id],
+ # This doesn't work correctly to create an `EventContext` that includes
+ # both of these state events. I assume it's because we're working on our
+ # local homeserver which has the remote state set as `outlier`. We have
+ # to create our own EventContext below to get this right.
+ auth_event_ids=[create_tuple[0].event_id],
+ type=EventTypes.Member,
+ state_key=user1_id,
+ content={"membership": Membership.JOIN},
+ sender=user1_id,
+ **shared_kwargs,
)
)
- self.get_success(self.store.forget(self.u_alice, self.room))
- self.assertTrue(
- self.get_success(self.store.is_locally_forgotten_room(self.room))
+ # We have to create our own context to get the state set correctly. If we use
+ # the `EventContext` from the `flawed_join_tuple`, the `current_state_events`
+ # table will only have the join event in it which should never happen in our
+ # real server.
+ join_event = flawed_join_tuple[0]
+ join_context = self.get_success(
+ self.state_handler.compute_event_context(
+ join_event,
+ state_ids_before_event={
+ (e.type, e.state_key): e.event_id for e in [create_tuple[0]]
+ },
+ partial_state=False,
+ )
)
+ self.get_success(self.persistence.persist_event(join_event, join_context))
- # after rejoin the room is not forgotten anymore
- self.get_success(
- event_injection.inject_member_event(
- self.hs, self.room, self.u_alice, "join"
+ # The room shouldn't be forgotten because the local user just joined
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(room_id))
+ )
+
+ # After all of the local users (there is only user1) leave and forgetting the
+ # room, it is forgotten
+ user1_leave_response = self.helper.leave(room_id, user1_id, tok=user1_tok)
+ user1_leave_event = self.get_success(
+ self.store.get_event(user1_leave_response["event_id"])
+ )
+ self.get_success(self.store.forget(user1_id, room_id))
+ self.assertTrue(self.get_success(self.store.is_locally_forgotten_room(room_id)))
+
+ # Join the local user to the room (again). We want to make this feel as close to
+ # the real `process_remote_join()` as possible but we'd like to avoid some of
+ # the auth checks that would be done in the real code.
+ #
+ # FIXME: The test was originally written using this less-real
+ # `event_injection.inject_member_event(...)` shortcut but it would be nice to
+ # use the real remote join process in a `FederatingHomeserverTestCase`.
+ flawed_join_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[user1_leave_response["event_id"]],
+ # This doesn't work correctly to create an `EventContext` that includes
+ # both of these state events. I assume it's because we're working on our
+ # local homeserver which has the remote state set as `outlier`. We have
+ # to create our own EventContext below to get this right.
+ auth_event_ids=[
+ create_tuple[0].event_id,
+ user1_leave_response["event_id"],
+ ],
+ type=EventTypes.Member,
+ state_key=user1_id,
+ content={"membership": Membership.JOIN},
+ sender=user1_id,
+ **shared_kwargs,
+ )
+ )
+ # We have to create our own context to get the state set correctly. If we use
+ # the `EventContext` from the `flawed_join_tuple`, the `current_state_events`
+ # table will only have the join event in it which should never happen in our
+ # real server.
+ join_event = flawed_join_tuple[0]
+ join_context = self.get_success(
+ self.state_handler.compute_event_context(
+ join_event,
+ state_ids_before_event={
+ (e.type, e.state_key): e.event_id
+ for e in [create_tuple[0], user1_leave_event]
+ },
+ partial_state=False,
)
)
+ self.get_success(self.persistence.persist_event(join_event, join_context))
+
+ # After the local user rejoins the remote room, it isn't forgotten anymore
self.assertFalse(
- self.get_success(self.store.is_locally_forgotten_room(self.room))
+ self.get_success(self.store.is_locally_forgotten_room(room_id))
)
diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py
new file mode 100644
index 0000000000..53212f7c45
--- /dev/null
+++ b/tests/storage/test_sliding_sync_tables.py
@@ -0,0 +1,5119 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2024 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+# Originally licensed under the Apache License, Version 2.0:
+# <http://www.apache.org/licenses/LICENSE-2.0>.
+#
+# [This file includes modifications made by New Vector Limited]
+#
+#
+import logging
+from typing import Dict, List, Optional, Tuple, cast
+
+import attr
+from parameterized import parameterized
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.constants import EventContentFields, EventTypes, Membership, RoomTypes
+from synapse.api.room_versions import RoomVersions
+from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict
+from synapse.events.snapshot import EventContext
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.storage.databases.main.events import DeltaState
+from synapse.storage.databases.main.events_bg_updates import (
+ _resolve_stale_data_in_sliding_sync_joined_rooms_table,
+ _resolve_stale_data_in_sliding_sync_membership_snapshots_table,
+)
+from synapse.types import create_requester
+from synapse.types.storage import _BackgroundUpdates
+from synapse.util import Clock
+
+from tests.test_utils.event_injection import create_event
+from tests.unittest import HomeserverTestCase
+
+logger = logging.getLogger(__name__)
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _SlidingSyncJoinedRoomResult:
+ room_id: str
+ # `event_stream_ordering` is only optional to allow easier semantics when we make
+ # expected objects from `event.internal_metadata.stream_ordering`. in the tests.
+ # `event.internal_metadata.stream_ordering` is marked optional because it only
+ # exists for persisted events but in the context of these tests, we're only working
+ # with persisted events and we're making comparisons so we will find any mismatch.
+ event_stream_ordering: Optional[int]
+ bump_stamp: Optional[int]
+ room_type: Optional[str]
+ room_name: Optional[str]
+ is_encrypted: bool
+ tombstone_successor_room_id: Optional[str]
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _SlidingSyncMembershipSnapshotResult:
+ room_id: str
+ user_id: str
+ sender: str
+ membership_event_id: str
+ membership: str
+ # `event_stream_ordering` is only optional to allow easier semantics when we make
+ # expected objects from `event.internal_metadata.stream_ordering`. in the tests.
+ # `event.internal_metadata.stream_ordering` is marked optional because it only
+ # exists for persisted events but in the context of these tests, we're only working
+ # with persisted events and we're making comparisons so we will find any mismatch.
+ event_stream_ordering: Optional[int]
+ has_known_state: bool
+ room_type: Optional[str]
+ room_name: Optional[str]
+ is_encrypted: bool
+ tombstone_successor_room_id: Optional[str]
+ # Make this default to "not forgotten" because it doesn't apply to many tests and we
+ # don't want to force all of the tests to deal with it.
+ forgotten: bool = False
+
+
+class SlidingSyncTablesTestCaseBase(HomeserverTestCase):
+ """
+ Helpers to deal with testing that the
+ `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` database tables are
+ populated correctly.
+ """
+
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.storage_controllers = hs.get_storage_controllers()
+ persist_events_store = self.hs.get_datastores().persist_events
+ assert persist_events_store is not None
+ self.persist_events_store = persist_events_store
+
+ persist_controller = self.hs.get_storage_controllers().persistence
+ assert persist_controller is not None
+ self.persist_controller = persist_controller
+
+ self.state_handler = self.hs.get_state_handler()
+
+ def _get_sliding_sync_joined_rooms(self) -> Dict[str, _SlidingSyncJoinedRoomResult]:
+ """
+ Return the rows from the `sliding_sync_joined_rooms` table.
+
+ Returns:
+ Mapping from room_id to _SlidingSyncJoinedRoomResult.
+ """
+ rows = cast(
+ List[Tuple[str, int, int, str, str, bool, str]],
+ self.get_success(
+ self.store.db_pool.simple_select_list(
+ "sliding_sync_joined_rooms",
+ None,
+ retcols=(
+ "room_id",
+ "event_stream_ordering",
+ "bump_stamp",
+ "room_type",
+ "room_name",
+ "is_encrypted",
+ "tombstone_successor_room_id",
+ ),
+ ),
+ ),
+ )
+
+ return {
+ row[0]: _SlidingSyncJoinedRoomResult(
+ room_id=row[0],
+ event_stream_ordering=row[1],
+ bump_stamp=row[2],
+ room_type=row[3],
+ room_name=row[4],
+ is_encrypted=bool(row[5]),
+ tombstone_successor_room_id=row[6],
+ )
+ for row in rows
+ }
+
+ def _get_sliding_sync_membership_snapshots(
+ self,
+ ) -> Dict[Tuple[str, str], _SlidingSyncMembershipSnapshotResult]:
+ """
+ Return the rows from the `sliding_sync_membership_snapshots` table.
+
+ Returns:
+ Mapping from the (room_id, user_id) to _SlidingSyncMembershipSnapshotResult.
+ """
+ rows = cast(
+ List[Tuple[str, str, str, str, str, int, int, bool, str, str, bool, str]],
+ self.get_success(
+ self.store.db_pool.simple_select_list(
+ "sliding_sync_membership_snapshots",
+ None,
+ retcols=(
+ "room_id",
+ "user_id",
+ "sender",
+ "membership_event_id",
+ "membership",
+ "forgotten",
+ "event_stream_ordering",
+ "has_known_state",
+ "room_type",
+ "room_name",
+ "is_encrypted",
+ "tombstone_successor_room_id",
+ ),
+ ),
+ ),
+ )
+
+ return {
+ (row[0], row[1]): _SlidingSyncMembershipSnapshotResult(
+ room_id=row[0],
+ user_id=row[1],
+ sender=row[2],
+ membership_event_id=row[3],
+ membership=row[4],
+ forgotten=bool(row[5]),
+ event_stream_ordering=row[6],
+ has_known_state=bool(row[7]),
+ room_type=row[8],
+ room_name=row[9],
+ is_encrypted=bool(row[10]),
+ tombstone_successor_room_id=row[11],
+ )
+ for row in rows
+ }
+
+ _remote_invite_count: int = 0
+
+ def _create_remote_invite_room_for_user(
+ self,
+ invitee_user_id: str,
+ unsigned_invite_room_state: Optional[List[StrippedStateEvent]],
+ ) -> Tuple[str, EventBase]:
+ """
+ Create a fake invite for a remote room and persist it.
+
+ We don't have any state for these kind of rooms and can only rely on the
+ stripped state included in the unsigned portion of the invite event to identify
+ the room.
+
+ Args:
+ invitee_user_id: The person being invited
+ unsigned_invite_room_state: List of stripped state events to assist the
+ receiver in identifying the room.
+
+ Returns:
+ The room ID of the remote invite room and the persisted remote invite event.
+ """
+ invite_room_id = f"!test_room{self._remote_invite_count}:remote_server"
+
+ invite_event_dict = {
+ "room_id": invite_room_id,
+ "sender": "@inviter:remote_server",
+ "state_key": invitee_user_id,
+ "depth": 1,
+ "origin_server_ts": 1,
+ "type": EventTypes.Member,
+ "content": {"membership": Membership.INVITE},
+ "auth_events": [],
+ "prev_events": [],
+ }
+ if unsigned_invite_room_state is not None:
+ serialized_stripped_state_events = []
+ for stripped_event in unsigned_invite_room_state:
+ serialized_stripped_state_events.append(
+ {
+ "type": stripped_event.type,
+ "state_key": stripped_event.state_key,
+ "sender": stripped_event.sender,
+ "content": stripped_event.content,
+ }
+ )
+
+ invite_event_dict["unsigned"] = {
+ "invite_room_state": serialized_stripped_state_events
+ }
+
+ invite_event = make_event_from_dict(
+ invite_event_dict,
+ room_version=RoomVersions.V10,
+ )
+ invite_event.internal_metadata.outlier = True
+ invite_event.internal_metadata.out_of_band_membership = True
+
+ self.get_success(
+ self.store.maybe_store_room_on_outlier_membership(
+ room_id=invite_room_id, room_version=invite_event.room_version
+ )
+ )
+ context = EventContext.for_outlier(self.hs.get_storage_controllers())
+ persisted_event, _, _ = self.get_success(
+ self.persist_controller.persist_event(invite_event, context)
+ )
+
+ self._remote_invite_count += 1
+
+ return invite_room_id, persisted_event
+
+ def _retract_remote_invite_for_user(
+ self,
+ user_id: str,
+ remote_room_id: str,
+ ) -> EventBase:
+ """
+ Create a fake invite retraction for a remote room and persist it.
+
+ Retracting an invite just means the person is no longer invited to the room.
+ This is done by someone with proper power levels kicking the user from the room.
+ A kick shows up as a leave event for a given person with a different `sender`.
+
+ Args:
+ user_id: The person who was invited and we're going to retract the
+ invite for.
+ remote_room_id: The room ID that the invite was for.
+
+ Returns:
+ The persisted leave (kick) event.
+ """
+
+ kick_event_dict = {
+ "room_id": remote_room_id,
+ "sender": "@inviter:remote_server",
+ "state_key": user_id,
+ "depth": 1,
+ "origin_server_ts": 1,
+ "type": EventTypes.Member,
+ "content": {"membership": Membership.LEAVE},
+ "auth_events": [],
+ "prev_events": [],
+ }
+
+ kick_event = make_event_from_dict(
+ kick_event_dict,
+ room_version=RoomVersions.V10,
+ )
+ kick_event.internal_metadata.outlier = True
+ kick_event.internal_metadata.out_of_band_membership = True
+
+ self.get_success(
+ self.store.maybe_store_room_on_outlier_membership(
+ room_id=remote_room_id, room_version=kick_event.room_version
+ )
+ )
+ context = EventContext.for_outlier(self.hs.get_storage_controllers())
+ persisted_event, _, _ = self.get_success(
+ self.persist_controller.persist_event(kick_event, context)
+ )
+
+ return persisted_event
+
+
+class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase):
+ """
+ Tests to make sure the
+ `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` database tables are
+ populated and updated correctly as new events are sent.
+ """
+
+ def test_joined_room_with_no_info(self) -> None:
+ """
+ Test joined room that doesn't have a room type, encryption, or name shows up in
+ `sliding_sync_joined_rooms`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id1},
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id1],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id1,
+ # History visibility just happens to be the last event sent in the room
+ event_stream_ordering=state_map[
+ (EventTypes.RoomHistoryVisibility, "")
+ ].internal_metadata.stream_ordering,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_joined_room_with_info(self) -> None:
+ """
+ Test joined encrypted room with name shows up in `sliding_sync_joined_rooms`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ # Add a room name
+ self.helper.send_state(
+ room_id1,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user2_tok,
+ )
+ # Encrypt the room
+ self.helper.send_state(
+ room_id1,
+ EventTypes.RoomEncryption,
+ {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+ tok=user2_tok,
+ )
+ # Add a tombstone
+ self.helper.send_state(
+ room_id1,
+ EventTypes.Tombstone,
+ {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"},
+ tok=user2_tok,
+ )
+
+ # User1 joins the room
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id1},
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id1],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id1,
+ # This should be whatever is the last event in the room
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=True,
+ tombstone_successor_room_id="another_room",
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ (room_id1, user2_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=True,
+ tombstone_successor_room_id="another_room",
+ ),
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user2_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ # Even though this room does have a name, is encrypted, and has a
+ # tombstone, user2 is the room creator and joined at the room creation
+ # time which didn't have this state set yet.
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_joined_space_room_with_info(self) -> None:
+ """
+ Test joined space room with name shows up in `sliding_sync_joined_rooms`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ space_room_id = self.helper.create_room_as(
+ user2_id,
+ tok=user2_tok,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+ },
+ )
+ # Add a room name
+ self.helper.send_state(
+ space_room_id,
+ EventTypes.Name,
+ {"name": "my super duper space"},
+ tok=user2_tok,
+ )
+
+ # User1 joins the room
+ user1_join_response = self.helper.join(space_room_id, user1_id, tok=user1_tok)
+ user1_join_event_pos = self.get_success(
+ self.store.get_position_for_event(user1_join_response["event_id"])
+ )
+
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(space_room_id)
+ )
+
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {space_room_id},
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[space_room_id],
+ _SlidingSyncJoinedRoomResult(
+ room_id=space_room_id,
+ event_stream_ordering=user1_join_event_pos.stream,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=RoomTypes.SPACE,
+ room_name="my super duper space",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (space_room_id, user1_id),
+ (space_room_id, user2_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=space_room_id,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=RoomTypes.SPACE,
+ room_name="my super duper space",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((space_room_id, user2_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=space_room_id,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user2_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=RoomTypes.SPACE,
+ # Even though this room does have a name, user2 is the room creator and
+ # joined at the room creation time which didn't have this state set yet.
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_joined_room_with_state_updated(self) -> None:
+ """
+ Test state derived info in `sliding_sync_joined_rooms` is updated when the
+ current state is updated.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ # Add a room name
+ self.helper.send_state(
+ room_id1,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user2_tok,
+ )
+
+ # User1 joins the room
+ user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+ user1_join_event_pos = self.get_success(
+ self.store.get_position_for_event(user1_join_response["event_id"])
+ )
+
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id1},
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id1],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id1,
+ event_stream_ordering=user1_join_event_pos.stream,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ (room_id1, user2_id),
+ },
+ exact=True,
+ )
+
+ # Update the room name
+ self.helper.send_state(
+ room_id1,
+ EventTypes.Name,
+ {"name": "my super duper room was renamed"},
+ tok=user2_tok,
+ )
+ # Encrypt the room
+ encrypt_room_response = self.helper.send_state(
+ room_id1,
+ EventTypes.RoomEncryption,
+ {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+ tok=user2_tok,
+ )
+ encrypt_room_event_pos = self.get_success(
+ self.store.get_position_for_event(encrypt_room_response["event_id"])
+ )
+
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id1},
+ exact=True,
+ )
+ # Make sure we see the new room name
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id1],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id1,
+ event_stream_ordering=encrypt_room_event_pos.stream,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room was renamed",
+ is_encrypted=True,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ (room_id1, user2_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user2_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_joined_room_is_bumped(self) -> None:
+ """
+ Test that `event_stream_ordering` and `bump_stamp` is updated when a new bump
+ event is sent (`sliding_sync_joined_rooms`).
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ # Add a room name
+ self.helper.send_state(
+ room_id1,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user2_tok,
+ )
+
+ # User1 joins the room
+ user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+ user1_join_event_pos = self.get_success(
+ self.store.get_position_for_event(user1_join_response["event_id"])
+ )
+
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id1},
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id1],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id1,
+ event_stream_ordering=user1_join_event_pos.stream,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ (room_id1, user2_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user joined
+ user1_snapshot = _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+ user1_snapshot,
+ )
+ # Holds the info according to the current state when the user joined
+ user2_snapshot = _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user2_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+ user2_snapshot,
+ )
+
+ # Send a new message to bump the room
+ event_response = self.helper.send(room_id1, "some message", tok=user1_tok)
+ event_pos = self.get_success(
+ self.store.get_position_for_event(event_response["event_id"])
+ )
+
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id1},
+ exact=True,
+ )
+ # Make sure we see the new room name
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id1],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id1,
+ # Updated `event_stream_ordering`
+ event_stream_ordering=event_pos.stream,
+ # And since the event was a bump event, the `bump_stamp` should be updated
+ bump_stamp=event_pos.stream,
+ # The state is still the same (it didn't change)
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ (room_id1, user2_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+ user1_snapshot,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+ user2_snapshot,
+ )
+
+ def test_joined_room_bump_stamp_backfill(self) -> None:
+ """
+ Test that `bump_stamp` ignores backfilled events, i.e. events with a
+ negative stream ordering.
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+
+ # Create a remote room
+ creator = "@user:other"
+ room_id = "!foo:other"
+ room_version = RoomVersions.V10
+ shared_kwargs = {
+ "room_id": room_id,
+ "room_version": room_version.identifier,
+ }
+
+ create_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[],
+ type=EventTypes.Create,
+ state_key="",
+ content={
+ # The `ROOM_CREATOR` field could be removed if we used a room
+ # version > 10 (in favor of relying on `sender`)
+ EventContentFields.ROOM_CREATOR: creator,
+ EventContentFields.ROOM_VERSION: room_version.identifier,
+ },
+ sender=creator,
+ **shared_kwargs,
+ )
+ )
+ creator_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[create_tuple[0].event_id],
+ auth_event_ids=[create_tuple[0].event_id],
+ type=EventTypes.Member,
+ state_key=creator,
+ content={"membership": Membership.JOIN},
+ sender=creator,
+ **shared_kwargs,
+ )
+ )
+ room_name_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[creator_tuple[0].event_id],
+ auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id],
+ type=EventTypes.Name,
+ state_key="",
+ content={
+ EventContentFields.ROOM_NAME: "my super duper room",
+ },
+ sender=creator,
+ **shared_kwargs,
+ )
+ )
+ # We add a message event as a valid "bump type"
+ msg_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[room_name_tuple[0].event_id],
+ auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id],
+ type=EventTypes.Message,
+ content={"body": "foo", "msgtype": "m.text"},
+ sender=creator,
+ **shared_kwargs,
+ )
+ )
+ invite_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[msg_tuple[0].event_id],
+ auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id],
+ type=EventTypes.Member,
+ state_key=user1_id,
+ content={"membership": Membership.INVITE},
+ sender=creator,
+ **shared_kwargs,
+ )
+ )
+
+ remote_events_and_contexts = [
+ create_tuple,
+ creator_tuple,
+ room_name_tuple,
+ msg_tuple,
+ invite_tuple,
+ ]
+
+ # Ensure the local HS knows the room version
+ self.get_success(self.store.store_room(room_id, creator, False, room_version))
+
+ # Persist these events as backfilled events.
+ for event, context in remote_events_and_contexts:
+ self.get_success(
+ self.persist_controller.persist_event(event, context, backfilled=True)
+ )
+
+ # Now we join the local user to the room. We want to make this feel as close to
+ # the real `process_remote_join()` as possible but we'd like to avoid some of
+ # the auth checks that would be done in the real code.
+ #
+ # FIXME: The test was originally written using this less-real
+ # `persist_event(...)` shortcut but it would be nice to use the real remote join
+ # process in a `FederatingHomeserverTestCase`.
+ flawed_join_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[invite_tuple[0].event_id],
+ # This doesn't work correctly to create an `EventContext` that includes
+ # both of these state events. I assume it's because we're working on our
+ # local homeserver which has the remote state set as `outlier`. We have
+ # to create our own EventContext below to get this right.
+ auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id],
+ type=EventTypes.Member,
+ state_key=user1_id,
+ content={"membership": Membership.JOIN},
+ sender=user1_id,
+ **shared_kwargs,
+ )
+ )
+ # We have to create our own context to get the state set correctly. If we use
+ # the `EventContext` from the `flawed_join_tuple`, the `current_state_events`
+ # table will only have the join event in it which should never happen in our
+ # real server.
+ join_event = flawed_join_tuple[0]
+ join_context = self.get_success(
+ self.state_handler.compute_event_context(
+ join_event,
+ state_ids_before_event={
+ (e.type, e.state_key): e.event_id
+ for e in [create_tuple[0], invite_tuple[0], room_name_tuple[0]]
+ },
+ partial_state=False,
+ )
+ )
+ join_event, _join_event_pos, _room_token = self.get_success(
+ self.persist_controller.persist_event(join_event, join_context)
+ )
+
+ # Make sure the tables are populated correctly
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id},
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id,
+ # This should be the last event in the room (the join membership)
+ event_stream_ordering=join_event.internal_metadata.stream_ordering,
+ # Since all of the bump events are backfilled, the `bump_stamp` should
+ # still be `None`. (and we will fallback to the users membership event
+ # position in the Sliding Sync API)
+ bump_stamp=None,
+ room_type=None,
+ # We still pick up state of the room even if it's backfilled
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=join_event.event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=join_event.internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ @parameterized.expand(
+ # Test both an insert an upsert into the
+ # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` to exercise
+ # more possibilities of things going wrong.
+ [
+ ("insert", True),
+ ("upsert", False),
+ ]
+ )
+ def test_joined_room_outlier_and_deoutlier(
+ self, description: str, should_insert: bool
+ ) -> None:
+ """
+ This is a regression test.
+
+ This is to simulate the case where an event is first persisted as an outlier
+ (like a remote invite) and then later persisted again to de-outlier it. The
+ first the time, the `outlier` is persisted with one `stream_ordering` but when
+ persisted again and de-outliered, it is assigned a different `stream_ordering`
+ that won't end up being used. Since we call
+ `_calculate_sliding_sync_table_changes()` before `_update_outliers_txn()` which
+ fixes this discrepancy (always use the `stream_ordering` from the first time it
+ was persisted), make sure we're not using an unreliable `stream_ordering` values
+ that will cause `FOREIGN KEY constraint failed` in the
+ `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables.
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_version = RoomVersions.V10
+ room_id = self.helper.create_room_as(
+ user2_id, tok=user2_tok, room_version=room_version.identifier
+ )
+
+ if should_insert:
+ # Clear these out so we always insert
+ self.get_success(
+ self.store.db_pool.simple_delete(
+ table="sliding_sync_joined_rooms",
+ keyvalues={"room_id": room_id},
+ desc="TODO",
+ )
+ )
+ self.get_success(
+ self.store.db_pool.simple_delete(
+ table="sliding_sync_membership_snapshots",
+ keyvalues={"room_id": room_id},
+ desc="TODO",
+ )
+ )
+
+ # Create a membership event (which triggers an insert into
+ # `sliding_sync_membership_snapshots`)
+ membership_event_dict = {
+ "type": EventTypes.Member,
+ "state_key": user1_id,
+ "sender": user1_id,
+ "room_id": room_id,
+ "content": {EventContentFields.MEMBERSHIP: Membership.JOIN},
+ }
+ # Create a relevant state event (which triggers an insert into
+ # `sliding_sync_joined_rooms`)
+ state_event_dict = {
+ "type": EventTypes.Name,
+ "state_key": "",
+ "sender": user2_id,
+ "room_id": room_id,
+ "content": {EventContentFields.ROOM_NAME: "my super room"},
+ }
+ event_dicts_to_persist = [
+ membership_event_dict,
+ state_event_dict,
+ ]
+
+ for event_dict in event_dicts_to_persist:
+ events_to_persist = []
+
+ # Create the events as an outliers
+ (
+ event,
+ unpersisted_context,
+ ) = self.get_success(
+ self.hs.get_event_creation_handler().create_event(
+ requester=create_requester(user1_id),
+ event_dict=event_dict,
+ outlier=True,
+ )
+ )
+ # FIXME: Should we use an `EventContext.for_outlier(...)` here?
+ # Doesn't seem to matter for this test.
+ context = self.get_success(unpersisted_context.persist(event))
+ events_to_persist.append((event, context))
+
+ # Create the event again but as an non-outlier. This will de-outlier the event
+ # when we persist it.
+ (
+ event,
+ unpersisted_context,
+ ) = self.get_success(
+ self.hs.get_event_creation_handler().create_event(
+ requester=create_requester(user1_id),
+ event_dict=event_dict,
+ outlier=False,
+ )
+ )
+ context = self.get_success(unpersisted_context.persist(event))
+ events_to_persist.append((event, context))
+
+ for event, context in events_to_persist:
+ self.get_success(
+ self.persist_controller.persist_event(
+ event,
+ context,
+ )
+ )
+
+ # We're just testing that it does not explode
+
+ def test_joined_room_meta_state_reset(self) -> None:
+ """
+ Test that a state reset on the room name is reflected in the
+ `sliding_sync_joined_rooms` table.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+ # Add a room name
+ self.helper.send_state(
+ room_id,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user2_tok,
+ )
+
+ # User1 joins the room
+ self.helper.join(room_id, user1_id, tok=user1_tok)
+
+ # Make sure we see the new room name
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id},
+ exact=True,
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id,
+ # This should be whatever is the last event in the room
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+ user1_snapshot = _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+ user1_snapshot,
+ )
+ # Holds the info according to the current state when the user joined (no room
+ # name when the room creator joined)
+ user2_snapshot = _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user2_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+ user2_snapshot,
+ )
+
+ # Mock a state reset removing the room name state from the current state
+ message_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[state_map[(EventTypes.Name, "")].event_id],
+ auth_event_ids=[
+ state_map[(EventTypes.Create, "")].event_id,
+ state_map[(EventTypes.Member, user1_id)].event_id,
+ ],
+ type=EventTypes.Message,
+ content={"body": "foo", "msgtype": "m.text"},
+ sender=user1_id,
+ room_id=room_id,
+ room_version=RoomVersions.V10.identifier,
+ )
+ )
+ event_chunk = [message_tuple]
+ self.get_success(
+ self.persist_events_store._persist_events_and_state_updates(
+ room_id,
+ event_chunk,
+ state_delta_for_room=DeltaState(
+ # This is the state reset part. We're removing the room name state.
+ to_delete=[(EventTypes.Name, "")],
+ to_insert={},
+ ),
+ new_forward_extremities={message_tuple[0].event_id},
+ use_negative_stream_ordering=False,
+ inhibit_local_membership_updates=False,
+ new_event_links={},
+ )
+ )
+
+ # Make sure the state reset is reflected in the `sliding_sync_joined_rooms` table
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id},
+ exact=True,
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id,
+ # This should be whatever is the last event in the room
+ event_stream_ordering=message_tuple[
+ 0
+ ].internal_metadata.stream_ordering,
+ bump_stamp=message_tuple[0].internal_metadata.stream_ordering,
+ room_type=None,
+ # This was state reset back to None
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ # State reset shouldn't be reflected in the `sliding_sync_membership_snapshots`
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+ # Snapshots haven't changed
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+ user1_snapshot,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+ user2_snapshot,
+ )
+
+ def test_joined_room_fully_insert_on_state_update(self) -> None:
+ """
+ Test that when an existing room updates it's state and we don't have a
+ corresponding row in `sliding_sync_joined_rooms` yet, we fully-insert the row
+ even though only a tiny piece of state changed.
+
+ FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+ foreground update for
+ `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+ https://github.com/element-hq/synapse/issues/17623)
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+ # Add a room name
+ self.helper.send_state(
+ room_id,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user1_tok,
+ )
+
+ # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made
+ # it into the table. This is to simulate an existing room (before we event added
+ # the sliding sync tables) not being in the `sliding_sync_joined_rooms` table
+ # yet.
+ self.get_success(
+ self.store.db_pool.simple_delete(
+ table="sliding_sync_joined_rooms",
+ keyvalues={"room_id": room_id},
+ desc="simulate existing room not being in the sliding_sync_joined_rooms table yet",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Encrypt the room
+ self.helper.send_state(
+ room_id,
+ EventTypes.RoomEncryption,
+ {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+ tok=user1_tok,
+ )
+
+ # The room should now be in the `sliding_sync_joined_rooms` table
+ # (fully-inserted with all of the state values).
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id},
+ exact=True,
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id,
+ # This should be whatever is the last event in the room
+ event_stream_ordering=state_map[
+ (EventTypes.RoomEncryption, "")
+ ].internal_metadata.stream_ordering,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=True,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_joined_room_nothing_if_not_in_table_when_bumped(self) -> None:
+ """
+ Test a new message being sent in an existing room when we don't have a
+ corresponding row in `sliding_sync_joined_rooms` yet; either nothing should
+ happen or we should fully-insert the row. We currently do nothing.
+
+ FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+ foreground update for
+ `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+ https://github.com/element-hq/synapse/issues/17623)
+ """
+
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+ # Add a room name
+ self.helper.send_state(
+ room_id,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user1_tok,
+ )
+ # Encrypt the room
+ self.helper.send_state(
+ room_id,
+ EventTypes.RoomEncryption,
+ {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+ tok=user1_tok,
+ )
+
+ # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made
+ # it into the table. This is to simulate an existing room (before we event added
+ # the sliding sync tables) not being in the `sliding_sync_joined_rooms` table
+ # yet.
+ self.get_success(
+ self.store.db_pool.simple_delete(
+ table="sliding_sync_joined_rooms",
+ keyvalues={"room_id": room_id},
+ desc="simulate existing room not being in the sliding_sync_joined_rooms table yet",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Send a new message to bump the room
+ self.helper.send(room_id, "some message", tok=user1_tok)
+
+ # Either nothing should happen or we should fully-insert the row. We currently
+ # do nothing for non-state events.
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ def test_non_join_space_room_with_info(self) -> None:
+ """
+ Test users who was invited shows up in `sliding_sync_membership_snapshots`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ space_room_id = self.helper.create_room_as(
+ user2_id,
+ tok=user2_tok,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+ },
+ )
+ # Add a room name
+ self.helper.send_state(
+ space_room_id,
+ EventTypes.Name,
+ {"name": "my super duper space"},
+ tok=user2_tok,
+ )
+ # Encrypt the room
+ self.helper.send_state(
+ space_room_id,
+ EventTypes.RoomEncryption,
+ {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+ tok=user2_tok,
+ )
+ # Add a tombstone
+ self.helper.send_state(
+ space_room_id,
+ EventTypes.Tombstone,
+ {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"},
+ tok=user2_tok,
+ )
+
+ # User1 is invited to the room
+ user1_invited_response = self.helper.invite(
+ space_room_id, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+ user1_invited_event_pos = self.get_success(
+ self.store.get_position_for_event(user1_invited_response["event_id"])
+ )
+
+ # Update the room name after we are invited just to make sure
+ # we don't update non-join memberships when the room name changes.
+ rename_response = self.helper.send_state(
+ space_room_id,
+ EventTypes.Name,
+ {"name": "my super duper space was renamed"},
+ tok=user2_tok,
+ )
+ rename_event_pos = self.get_success(
+ self.store.get_position_for_event(rename_response["event_id"])
+ )
+
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(space_room_id)
+ )
+
+ # User2 is still joined to the room so we should still have an entry in the
+ # `sliding_sync_joined_rooms` table.
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {space_room_id},
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[space_room_id],
+ _SlidingSyncJoinedRoomResult(
+ room_id=space_room_id,
+ event_stream_ordering=rename_event_pos.stream,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=RoomTypes.SPACE,
+ room_name="my super duper space was renamed",
+ is_encrypted=True,
+ tombstone_successor_room_id="another_room",
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (space_room_id, user1_id),
+ (space_room_id, user2_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user was invited
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=space_room_id,
+ user_id=user1_id,
+ sender=user2_id,
+ membership_event_id=user1_invited_response["event_id"],
+ membership=Membership.INVITE,
+ event_stream_ordering=user1_invited_event_pos.stream,
+ has_known_state=True,
+ room_type=RoomTypes.SPACE,
+ room_name="my super duper space",
+ is_encrypted=True,
+ tombstone_successor_room_id="another_room",
+ ),
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((space_room_id, user2_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=space_room_id,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user2_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=RoomTypes.SPACE,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_non_join_invite_ban(self) -> None:
+ """
+ Test users who have invite/ban membership in room shows up in
+ `sliding_sync_membership_snapshots`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+ user3_id = self.register_user("user3", "pass")
+ user3_tok = self.login(user3_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+ # User1 is invited to the room
+ user1_invited_response = self.helper.invite(
+ room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+ user1_invited_event_pos = self.get_success(
+ self.store.get_position_for_event(user1_invited_response["event_id"])
+ )
+
+ # User3 joins the room
+ self.helper.join(room_id1, user3_id, tok=user3_tok)
+ # User3 is banned from the room
+ user3_ban_response = self.helper.ban(
+ room_id1, src=user2_id, targ=user3_id, tok=user2_tok
+ )
+ user3_ban_event_pos = self.get_success(
+ self.store.get_position_for_event(user3_ban_response["event_id"])
+ )
+
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+
+ # User2 is still joined to the room so we should still have an entry
+ # in the `sliding_sync_joined_rooms` table.
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id1},
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id1],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id1,
+ event_stream_ordering=user3_ban_event_pos.stream,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ (room_id1, user2_id),
+ (room_id1, user3_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user was invited
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user1_id,
+ sender=user2_id,
+ membership_event_id=user1_invited_response["event_id"],
+ membership=Membership.INVITE,
+ event_stream_ordering=user1_invited_event_pos.stream,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user2_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ # Holds the info according to the current state when the user was banned
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user3_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user3_id,
+ sender=user2_id,
+ membership_event_id=user3_ban_response["event_id"],
+ membership=Membership.BAN,
+ event_stream_ordering=user3_ban_event_pos.stream,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_non_join_reject_invite_empty_room(self) -> None:
+ """
+ In a room where no one is joined (`no_longer_in_room`), test rejecting an invite.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+ # User1 is invited to the room
+ self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok)
+
+ # User2 leaves the room
+ user2_leave_response = self.helper.leave(room_id1, user2_id, tok=user2_tok)
+ user2_leave_event_pos = self.get_success(
+ self.store.get_position_for_event(user2_leave_response["event_id"])
+ )
+
+ # User1 rejects the invite
+ user1_leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ user1_leave_event_pos = self.get_success(
+ self.store.get_position_for_event(user1_leave_response["event_id"])
+ )
+
+ # No one is joined to the room
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ (room_id1, user2_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user left
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=user1_leave_response["event_id"],
+ membership=Membership.LEAVE,
+ event_stream_ordering=user1_leave_event_pos.stream,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ # Holds the info according to the current state when the left
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=user2_leave_response["event_id"],
+ membership=Membership.LEAVE,
+ event_stream_ordering=user2_leave_event_pos.stream,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_membership_changing(self) -> None:
+ """
+ Test latest snapshot evolves when membership changes (`sliding_sync_membership_snapshots`).
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+ # User1 is invited to the room
+ # ======================================================
+ user1_invited_response = self.helper.invite(
+ room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+ user1_invited_event_pos = self.get_success(
+ self.store.get_position_for_event(user1_invited_response["event_id"])
+ )
+
+ # Update the room name after the user was invited
+ room_name_update_response = self.helper.send_state(
+ room_id1,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user2_tok,
+ )
+ room_name_update_event_pos = self.get_success(
+ self.store.get_position_for_event(room_name_update_response["event_id"])
+ )
+
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+
+ # Assert joined room status
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id1},
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id1],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id1,
+ # Latest event in the room
+ event_stream_ordering=room_name_update_event_pos.stream,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ # Assert membership snapshots
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ (room_id1, user2_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user was invited
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user1_id,
+ sender=user2_id,
+ membership_event_id=user1_invited_response["event_id"],
+ membership=Membership.INVITE,
+ event_stream_ordering=user1_invited_event_pos.stream,
+ has_known_state=True,
+ room_type=None,
+ # Room name was updated after the user was invited so we should still
+ # see it unset here
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ # Holds the info according to the current state when the user joined
+ user2_snapshot = _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user2_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+ user2_snapshot,
+ )
+
+ # User1 joins the room
+ # ======================================================
+ user1_joined_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+ user1_joined_event_pos = self.get_success(
+ self.store.get_position_for_event(user1_joined_response["event_id"])
+ )
+
+ # Assert joined room status
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id1},
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id1],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id1,
+ # Latest event in the room
+ event_stream_ordering=user1_joined_event_pos.stream,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ # Assert membership snapshots
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ (room_id1, user2_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=user1_joined_response["event_id"],
+ membership=Membership.JOIN,
+ event_stream_ordering=user1_joined_event_pos.stream,
+ has_known_state=True,
+ room_type=None,
+ # We see the update state because the user joined after the room name
+ # change
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+ user2_snapshot,
+ )
+
+ # User1 is banned from the room
+ # ======================================================
+ user1_ban_response = self.helper.ban(
+ room_id1, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+ user1_ban_event_pos = self.get_success(
+ self.store.get_position_for_event(user1_ban_response["event_id"])
+ )
+
+ # Assert joined room status
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id1},
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id1],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id1,
+ # Latest event in the room
+ event_stream_ordering=user1_ban_event_pos.stream,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ # Assert membership snapshots
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ (room_id1, user2_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user was banned
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user1_id,
+ sender=user2_id,
+ membership_event_id=user1_ban_response["event_id"],
+ membership=Membership.BAN,
+ event_stream_ordering=user1_ban_event_pos.stream,
+ has_known_state=True,
+ room_type=None,
+ # We see the update state because the user joined after the room name
+ # change
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+ user2_snapshot,
+ )
+
+ def test_non_join_server_left_room(self) -> None:
+ """
+ Test everyone local leaves the room but their leave membership still shows up in
+ `sliding_sync_membership_snapshots`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+ # User1 joins the room
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ # User2 leaves the room
+ user2_leave_response = self.helper.leave(room_id1, user2_id, tok=user2_tok)
+ user2_leave_event_pos = self.get_success(
+ self.store.get_position_for_event(user2_leave_response["event_id"])
+ )
+
+ # User1 leaves the room
+ user1_leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok)
+ user1_leave_event_pos = self.get_success(
+ self.store.get_position_for_event(user1_leave_response["event_id"])
+ )
+
+ # No one is joined to the room anymore so we shouldn't have an entry in the
+ # `sliding_sync_joined_rooms` table.
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # We should still see rows for the leave events (non-joins)
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id1, user1_id),
+ (room_id1, user2_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=user1_leave_response["event_id"],
+ membership=Membership.LEAVE,
+ event_stream_ordering=user1_leave_event_pos.stream,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id1, user2_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id1,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=user2_leave_response["event_id"],
+ membership=Membership.LEAVE,
+ event_stream_ordering=user2_leave_event_pos.stream,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ @parameterized.expand(
+ [
+ # No stripped state provided
+ ("none", None),
+ # Empty stripped state provided
+ ("empty", []),
+ ]
+ )
+ def test_non_join_remote_invite_no_stripped_state(
+ self, _description: str, stripped_state: Optional[List[StrippedStateEvent]]
+ ) -> None:
+ """
+ Test remote invite with no stripped state provided shows up in
+ `sliding_sync_membership_snapshots` with `has_known_state=False`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+
+ # Create a remote invite room without any `unsigned.invite_room_state`
+ remote_invite_room_id, remote_invite_event = (
+ self._create_remote_invite_room_for_user(user1_id, stripped_state)
+ )
+
+ # No one local is joined to the remote room
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (remote_invite_room_id, user1_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (remote_invite_room_id, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=remote_invite_room_id,
+ user_id=user1_id,
+ sender="@inviter:remote_server",
+ membership_event_id=remote_invite_event.event_id,
+ membership=Membership.INVITE,
+ event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering,
+ # No stripped state provided
+ has_known_state=False,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_non_join_remote_invite_unencrypted_room(self) -> None:
+ """
+ Test remote invite with stripped state (unencrypted room) shows up in
+ `sliding_sync_membership_snapshots`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+
+ # Create a remote invite room with some `unsigned.invite_room_state`
+ # indicating that the room is encrypted.
+ remote_invite_room_id, remote_invite_event = (
+ self._create_remote_invite_room_for_user(
+ user1_id,
+ [
+ StrippedStateEvent(
+ type=EventTypes.Create,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+ EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.Name,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_NAME: "my super duper room",
+ },
+ ),
+ ],
+ )
+ )
+
+ # No one local is joined to the remote room
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (remote_invite_room_id, user1_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (remote_invite_room_id, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=remote_invite_room_id,
+ user_id=user1_id,
+ sender="@inviter:remote_server",
+ membership_event_id=remote_invite_event.event_id,
+ membership=Membership.INVITE,
+ event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_non_join_remote_invite_encrypted_room(self) -> None:
+ """
+ Test remote invite with stripped state (encrypted room) shows up in
+ `sliding_sync_membership_snapshots`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+
+ # Create a remote invite room with some `unsigned.invite_room_state`
+ # indicating that the room is encrypted.
+ remote_invite_room_id, remote_invite_event = (
+ self._create_remote_invite_room_for_user(
+ user1_id,
+ [
+ StrippedStateEvent(
+ type=EventTypes.Create,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+ EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.RoomEncryption,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+ },
+ ),
+ # This is not one of the stripped state events according to the state
+ # but we still handle it.
+ StrippedStateEvent(
+ type=EventTypes.Tombstone,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room",
+ },
+ ),
+ # Also test a random event that we don't care about
+ StrippedStateEvent(
+ type="org.matrix.foo_state",
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ "foo": "qux",
+ },
+ ),
+ ],
+ )
+ )
+
+ # No one local is joined to the remote room
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (remote_invite_room_id, user1_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (remote_invite_room_id, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=remote_invite_room_id,
+ user_id=user1_id,
+ sender="@inviter:remote_server",
+ membership_event_id=remote_invite_event.event_id,
+ membership=Membership.INVITE,
+ event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=True,
+ tombstone_successor_room_id="another_room",
+ ),
+ )
+
+ def test_non_join_remote_invite_space_room(self) -> None:
+ """
+ Test remote invite with stripped state (encrypted space room with name) shows up in
+ `sliding_sync_membership_snapshots`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+
+ # Create a remote invite room with some `unsigned.invite_room_state`
+ # indicating that the room is encrypted.
+ remote_invite_room_id, remote_invite_event = (
+ self._create_remote_invite_room_for_user(
+ user1_id,
+ [
+ StrippedStateEvent(
+ type=EventTypes.Create,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+ EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+ # Specify that it is a space room
+ EventContentFields.ROOM_TYPE: RoomTypes.SPACE,
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.RoomEncryption,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.Name,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_NAME: "my super duper space",
+ },
+ ),
+ ],
+ )
+ )
+
+ # No one local is joined to the remote room
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (remote_invite_room_id, user1_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (remote_invite_room_id, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=remote_invite_room_id,
+ user_id=user1_id,
+ sender="@inviter:remote_server",
+ membership_event_id=remote_invite_event.event_id,
+ membership=Membership.INVITE,
+ event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=RoomTypes.SPACE,
+ room_name="my super duper space",
+ is_encrypted=True,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_non_join_reject_remote_invite(self) -> None:
+ """
+ Test rejected remote invite (user decided to leave the room) inherits meta data
+ from when the remote invite stripped state and shows up in
+ `sliding_sync_membership_snapshots`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ # Create a remote invite room with some `unsigned.invite_room_state`
+ # indicating that the room is encrypted.
+ remote_invite_room_id, remote_invite_event = (
+ self._create_remote_invite_room_for_user(
+ user1_id,
+ [
+ StrippedStateEvent(
+ type=EventTypes.Create,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+ EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.RoomEncryption,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+ },
+ ),
+ ],
+ )
+ )
+
+ # User1 decides to leave the room (reject the invite)
+ user1_leave_response = self.helper.leave(
+ remote_invite_room_id, user1_id, tok=user1_tok
+ )
+ user1_leave_pos = self.get_success(
+ self.store.get_position_for_event(user1_leave_response["event_id"])
+ )
+
+ # No one local is joined to the remote room
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (remote_invite_room_id, user1_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (remote_invite_room_id, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=remote_invite_room_id,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=user1_leave_response["event_id"],
+ membership=Membership.LEAVE,
+ event_stream_ordering=user1_leave_pos.stream,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=True,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_non_join_retracted_remote_invite(self) -> None:
+ """
+ Test retracted remote invite (Remote inviter kicks the person who was invited)
+ inherits meta data from when the remote invite stripped state and shows up in
+ `sliding_sync_membership_snapshots`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+
+ # Create a remote invite room with some `unsigned.invite_room_state`
+ # indicating that the room is encrypted.
+ remote_invite_room_id, remote_invite_event = (
+ self._create_remote_invite_room_for_user(
+ user1_id,
+ [
+ StrippedStateEvent(
+ type=EventTypes.Create,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+ EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.RoomEncryption,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+ },
+ ),
+ ],
+ )
+ )
+
+ # `@inviter:remote_server` decides to retract the invite (kicks the user).
+ # (Note: A kick is just a leave event with a different sender)
+ remote_invite_retraction_event = self._retract_remote_invite_for_user(
+ user_id=user1_id,
+ remote_room_id=remote_invite_room_id,
+ )
+
+ # No one local is joined to the remote room
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (remote_invite_room_id, user1_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (remote_invite_room_id, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=remote_invite_room_id,
+ user_id=user1_id,
+ sender="@inviter:remote_server",
+ membership_event_id=remote_invite_retraction_event.event_id,
+ membership=Membership.LEAVE,
+ event_stream_ordering=remote_invite_retraction_event.internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=True,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_non_join_state_reset(self) -> None:
+ """
+ Test a state reset that removes someone from the room.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+ # Add a room name
+ self.helper.send_state(
+ room_id,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user2_tok,
+ )
+
+ # User1 joins the room
+ self.helper.join(room_id, user1_id, tok=user1_tok)
+
+ # Make sure we see the new room name
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id},
+ exact=True,
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id,
+ # This should be whatever is the last event in the room
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+ user1_snapshot = _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+ user1_snapshot,
+ )
+ # Holds the info according to the current state when the user joined (no room
+ # name when the room creator joined)
+ user2_snapshot = _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user2_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+ user2_snapshot,
+ )
+
+ # Mock a state reset removing the membership for user1 in the current state
+ message_tuple = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[state_map[(EventTypes.Name, "")].event_id],
+ auth_event_ids=[
+ state_map[(EventTypes.Create, "")].event_id,
+ state_map[(EventTypes.Member, user1_id)].event_id,
+ ],
+ type=EventTypes.Message,
+ content={"body": "foo", "msgtype": "m.text"},
+ sender=user1_id,
+ room_id=room_id,
+ room_version=RoomVersions.V10.identifier,
+ )
+ )
+ event_chunk = [message_tuple]
+ self.get_success(
+ self.persist_events_store._persist_events_and_state_updates(
+ room_id,
+ event_chunk,
+ state_delta_for_room=DeltaState(
+ # This is the state reset part. We're removing the room name state.
+ to_delete=[(EventTypes.Member, user1_id)],
+ to_insert={},
+ ),
+ new_forward_extremities={message_tuple[0].event_id},
+ use_negative_stream_ordering=False,
+ inhibit_local_membership_updates=False,
+ new_event_links={},
+ )
+ )
+
+ # State reset on membership doesn't affect the`sliding_sync_joined_rooms` table
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id},
+ exact=True,
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id,
+ # This should be whatever is the last event in the room
+ event_stream_ordering=message_tuple[
+ 0
+ ].internal_metadata.stream_ordering,
+ bump_stamp=message_tuple[0].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ # State reset on membership should remove the user's snapshot
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ # We shouldn't see user1 in the snapshots table anymore
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+ # Snapshot for user2 hasn't changed
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+ user2_snapshot,
+ )
+
+ def test_membership_snapshot_forget(self) -> None:
+ """
+ Test forgetting a room will update `sliding_sync_membership_snapshots`
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+ # User1 joins the room
+ self.helper.join(room_id, user1_id, tok=user1_tok)
+ # User1 leaves the room (we have to leave in order to forget the room)
+ self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+
+ # Check on the `sliding_sync_membership_snapshots` table (nothing should be
+ # forgotten yet)
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user joined
+ user1_snapshot = _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.LEAVE,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ # Room is not forgotten
+ forgotten=False,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+ user1_snapshot,
+ )
+ # Holds the info according to the current state when the user joined
+ user2_snapshot = _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user2_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+ user2_snapshot,
+ )
+
+ # Forget the room
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/r0/rooms/{room_id}/forget",
+ content={},
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # Check on the `sliding_sync_membership_snapshots` table
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+ # Room is now forgotten for user1
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+ attr.evolve(user1_snapshot, forgotten=True),
+ )
+ # Nothing changed for user2
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+ user2_snapshot,
+ )
+
+ def test_membership_snapshot_missing_forget(
+ self,
+ ) -> None:
+ """
+ Test forgetting a room with no existing row in `sliding_sync_membership_snapshots`.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+ # User1 joins the room
+ self.helper.join(room_id, user1_id, tok=user1_tok)
+ # User1 leaves the room (we have to leave in order to forget the room)
+ self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+ # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+ # happen during event creation.
+ self.get_success(
+ self.store.db_pool.simple_delete_many(
+ table="sliding_sync_membership_snapshots",
+ column="room_id",
+ iterable=(room_id,),
+ keyvalues={},
+ desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_forgotten_missing",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Forget the room
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/r0/rooms/{room_id}/forget",
+ content={},
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # It doesn't explode
+
+ # We still shouldn't find anything in the table because nothing has re-created them
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ set(),
+ exact=True,
+ )
+
+
+class SlidingSyncTablesBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase):
+ """
+ Test the background updates that populate the `sliding_sync_joined_rooms` and
+ `sliding_sync_membership_snapshots` tables.
+ """
+
+ def test_joined_background_update_missing(self) -> None:
+ """
+ Test that the background update for `sliding_sync_joined_rooms` populates missing rows
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ # Create rooms with various levels of state that should appear in the table
+ #
+ room_id_no_info = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+ room_id_with_info = self.helper.create_room_as(user1_id, tok=user1_tok)
+ # Add a room name
+ self.helper.send_state(
+ room_id_with_info,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user1_tok,
+ )
+ # Encrypt the room
+ self.helper.send_state(
+ room_id_with_info,
+ EventTypes.RoomEncryption,
+ {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+ tok=user1_tok,
+ )
+
+ space_room_id = self.helper.create_room_as(
+ user1_id,
+ tok=user1_tok,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+ },
+ )
+ # Add a room name
+ self.helper.send_state(
+ space_room_id,
+ EventTypes.Name,
+ {"name": "my super duper space"},
+ tok=user1_tok,
+ )
+
+ # Clean-up the `sliding_sync_joined_rooms` table as if the inserts did not
+ # happen during event creation.
+ self.get_success(
+ self.store.db_pool.simple_delete_many(
+ table="sliding_sync_joined_rooms",
+ column="room_id",
+ iterable=(room_id_no_info, room_id_with_info, space_room_id),
+ keyvalues={},
+ desc="sliding_sync_joined_rooms.test_joined_background_update_missing",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Insert and run the background updates.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE,
+ "progress_json": "{}",
+ },
+ )
+ )
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE,
+ "progress_json": "{}",
+ "depends_on": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE,
+ },
+ )
+ )
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Make sure the table is populated
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id_no_info, room_id_with_info, space_room_id},
+ exact=True,
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id_no_info)
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id_no_info],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id_no_info,
+ # History visibility just happens to be the last event sent in the room
+ event_stream_ordering=state_map[
+ (EventTypes.RoomHistoryVisibility, "")
+ ].internal_metadata.stream_ordering,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id_with_info)
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[room_id_with_info],
+ _SlidingSyncJoinedRoomResult(
+ room_id=room_id_with_info,
+ # Lastest event sent in the room
+ event_stream_ordering=state_map[
+ (EventTypes.RoomEncryption, "")
+ ].internal_metadata.stream_ordering,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=True,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(space_room_id)
+ )
+ self.assertEqual(
+ sliding_sync_joined_rooms_results[space_room_id],
+ _SlidingSyncJoinedRoomResult(
+ room_id=space_room_id,
+ # Lastest event sent in the room
+ event_stream_ordering=state_map[
+ (EventTypes.Name, "")
+ ].internal_metadata.stream_ordering,
+ bump_stamp=state_map[
+ (EventTypes.Create, "")
+ ].internal_metadata.stream_ordering,
+ room_type=RoomTypes.SPACE,
+ room_name="my super duper space",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_membership_snapshots_background_update_joined(self) -> None:
+ """
+ Test that the background update for `sliding_sync_membership_snapshots`
+ populates missing rows for join memberships.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ # Create rooms with various levels of state that should appear in the table
+ #
+ room_id_no_info = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+ room_id_with_info = self.helper.create_room_as(user1_id, tok=user1_tok)
+ # Add a room name
+ self.helper.send_state(
+ room_id_with_info,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user1_tok,
+ )
+ # Encrypt the room
+ self.helper.send_state(
+ room_id_with_info,
+ EventTypes.RoomEncryption,
+ {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+ tok=user1_tok,
+ )
+ # Add a tombstone
+ self.helper.send_state(
+ room_id_with_info,
+ EventTypes.Tombstone,
+ {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"},
+ tok=user1_tok,
+ )
+
+ space_room_id = self.helper.create_room_as(
+ user1_id,
+ tok=user1_tok,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+ },
+ )
+ # Add a room name
+ self.helper.send_state(
+ space_room_id,
+ EventTypes.Name,
+ {"name": "my super duper space"},
+ tok=user1_tok,
+ )
+
+ # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+ # happen during event creation.
+ self.get_success(
+ self.store.db_pool.simple_delete_many(
+ table="sliding_sync_membership_snapshots",
+ column="room_id",
+ iterable=(room_id_no_info, room_id_with_info, space_room_id),
+ keyvalues={},
+ desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_joined",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Insert and run the background update.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+ "progress_json": "{}",
+ },
+ )
+ )
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Make sure the table is populated
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id_no_info, user1_id),
+ (room_id_with_info, user1_id),
+ (space_room_id, user1_id),
+ },
+ exact=True,
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id_no_info)
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_no_info,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id_with_info)
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (room_id_with_info, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_with_info,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=True,
+ tombstone_successor_room_id="another_room",
+ ),
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(space_room_id)
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=space_room_id,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=RoomTypes.SPACE,
+ room_name="my super duper space",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_membership_snapshots_background_update_local_invite(self) -> None:
+ """
+ Test that the background update for `sliding_sync_membership_snapshots`
+ populates missing rows for invite memberships.
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Create rooms with various levels of state that should appear in the table
+ #
+ room_id_no_info = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+ room_id_with_info = self.helper.create_room_as(user2_id, tok=user2_tok)
+ # Add a room name
+ self.helper.send_state(
+ room_id_with_info,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user2_tok,
+ )
+ # Encrypt the room
+ self.helper.send_state(
+ room_id_with_info,
+ EventTypes.RoomEncryption,
+ {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+ tok=user2_tok,
+ )
+ # Add a tombstone
+ self.helper.send_state(
+ room_id_with_info,
+ EventTypes.Tombstone,
+ {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"},
+ tok=user2_tok,
+ )
+
+ space_room_id = self.helper.create_room_as(
+ user1_id,
+ tok=user2_tok,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+ },
+ )
+ # Add a room name
+ self.helper.send_state(
+ space_room_id,
+ EventTypes.Name,
+ {"name": "my super duper space"},
+ tok=user2_tok,
+ )
+
+ # Invite user1 to the rooms
+ user1_invite_room_id_no_info_response = self.helper.invite(
+ room_id_no_info, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+ user1_invite_room_id_with_info_response = self.helper.invite(
+ room_id_with_info, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+ user1_invite_space_room_id_response = self.helper.invite(
+ space_room_id, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+
+ # Have user2 leave the rooms to make sure that our background update is not just
+ # reading from `current_state_events`. For invite/knock memberships, we should
+ # be reading from the stripped state on the invite/knock event itself.
+ self.helper.leave(room_id_no_info, user2_id, tok=user2_tok)
+ self.helper.leave(room_id_with_info, user2_id, tok=user2_tok)
+ self.helper.leave(space_room_id, user2_id, tok=user2_tok)
+ # Check to make sure we actually don't have any `current_state_events` for the rooms
+ current_state_check_rows = self.get_success(
+ self.store.db_pool.simple_select_many_batch(
+ table="current_state_events",
+ column="room_id",
+ iterable=[room_id_no_info, room_id_with_info, space_room_id],
+ retcols=("event_id",),
+ keyvalues={},
+ desc="check current_state_events in test",
+ )
+ )
+ self.assertEqual(len(current_state_check_rows), 0)
+
+ # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+ # happen during event creation.
+ self.get_success(
+ self.store.db_pool.simple_delete_many(
+ table="sliding_sync_membership_snapshots",
+ column="room_id",
+ iterable=(room_id_no_info, room_id_with_info, space_room_id),
+ keyvalues={},
+ desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_local_invite",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Insert and run the background update.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+ "progress_json": "{}",
+ },
+ )
+ )
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Make sure the table is populated
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ # The invite memberships for user1
+ (room_id_no_info, user1_id),
+ (room_id_with_info, user1_id),
+ (space_room_id, user1_id),
+ # The leave memberships for user2
+ (room_id_no_info, user2_id),
+ (room_id_with_info, user2_id),
+ (space_room_id, user2_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_no_info,
+ user_id=user1_id,
+ sender=user2_id,
+ membership_event_id=user1_invite_room_id_no_info_response["event_id"],
+ membership=Membership.INVITE,
+ event_stream_ordering=self.get_success(
+ self.store.get_position_for_event(
+ user1_invite_room_id_no_info_response["event_id"]
+ )
+ ).stream,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (room_id_with_info, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_with_info,
+ user_id=user1_id,
+ sender=user2_id,
+ membership_event_id=user1_invite_room_id_with_info_response["event_id"],
+ membership=Membership.INVITE,
+ event_stream_ordering=self.get_success(
+ self.store.get_position_for_event(
+ user1_invite_room_id_with_info_response["event_id"]
+ )
+ ).stream,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=True,
+ # The tombstone isn't showing here ("another_room") because it's not one
+ # of the stripped events that we hand out as part of the invite event.
+ # Even though we handle this scenario from other remote homservers,
+ # Synapse does not include the tombstone in the invite event.
+ tombstone_successor_room_id=None,
+ ),
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=space_room_id,
+ user_id=user1_id,
+ sender=user2_id,
+ membership_event_id=user1_invite_space_room_id_response["event_id"],
+ membership=Membership.INVITE,
+ event_stream_ordering=self.get_success(
+ self.store.get_position_for_event(
+ user1_invite_space_room_id_response["event_id"]
+ )
+ ).stream,
+ has_known_state=True,
+ room_type=RoomTypes.SPACE,
+ room_name="my super duper space",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_membership_snapshots_background_update_remote_invite(
+ self,
+ ) -> None:
+ """
+ Test that the background update for `sliding_sync_membership_snapshots`
+ populates missing rows for remote invites (out-of-band memberships).
+ """
+ user1_id = self.register_user("user1", "pass")
+ _user1_tok = self.login(user1_id, "pass")
+
+ # Create rooms with various levels of state that should appear in the table
+ #
+ room_id_unknown_state, room_id_unknown_state_invite_event = (
+ self._create_remote_invite_room_for_user(user1_id, None)
+ )
+
+ room_id_no_info, room_id_no_info_invite_event = (
+ self._create_remote_invite_room_for_user(
+ user1_id,
+ [
+ StrippedStateEvent(
+ type=EventTypes.Create,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+ EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+ },
+ ),
+ ],
+ )
+ )
+
+ room_id_with_info, room_id_with_info_invite_event = (
+ self._create_remote_invite_room_for_user(
+ user1_id,
+ [
+ StrippedStateEvent(
+ type=EventTypes.Create,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+ EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.Name,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_NAME: "my super duper room",
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.RoomEncryption,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+ },
+ ),
+ ],
+ )
+ )
+
+ space_room_id, space_room_id_invite_event = (
+ self._create_remote_invite_room_for_user(
+ user1_id,
+ [
+ StrippedStateEvent(
+ type=EventTypes.Create,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+ EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+ EventContentFields.ROOM_TYPE: RoomTypes.SPACE,
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.Name,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_NAME: "my super duper space",
+ },
+ ),
+ ],
+ )
+ )
+
+ # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+ # happen during event creation.
+ self.get_success(
+ self.store.db_pool.simple_delete_many(
+ table="sliding_sync_membership_snapshots",
+ column="room_id",
+ iterable=(
+ room_id_unknown_state,
+ room_id_no_info,
+ room_id_with_info,
+ space_room_id,
+ ),
+ keyvalues={},
+ desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_remote_invite",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Insert and run the background update.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+ "progress_json": "{}",
+ },
+ )
+ )
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Make sure the table is populated
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ # The invite memberships for user1
+ (room_id_unknown_state, user1_id),
+ (room_id_no_info, user1_id),
+ (room_id_with_info, user1_id),
+ (space_room_id, user1_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (room_id_unknown_state, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_unknown_state,
+ user_id=user1_id,
+ sender="@inviter:remote_server",
+ membership_event_id=room_id_unknown_state_invite_event.event_id,
+ membership=Membership.INVITE,
+ event_stream_ordering=room_id_unknown_state_invite_event.internal_metadata.stream_ordering,
+ has_known_state=False,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_no_info,
+ user_id=user1_id,
+ sender="@inviter:remote_server",
+ membership_event_id=room_id_no_info_invite_event.event_id,
+ membership=Membership.INVITE,
+ event_stream_ordering=room_id_no_info_invite_event.internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (room_id_with_info, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_with_info,
+ user_id=user1_id,
+ sender="@inviter:remote_server",
+ membership_event_id=room_id_with_info_invite_event.event_id,
+ membership=Membership.INVITE,
+ event_stream_ordering=room_id_with_info_invite_event.internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=True,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=space_room_id,
+ user_id=user1_id,
+ sender="@inviter:remote_server",
+ membership_event_id=space_room_id_invite_event.event_id,
+ membership=Membership.INVITE,
+ event_stream_ordering=space_room_id_invite_event.internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=RoomTypes.SPACE,
+ room_name="my super duper space",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_membership_snapshots_background_update_remote_invite_rejections_and_retractions(
+ self,
+ ) -> None:
+ """
+ Test that the background update for `sliding_sync_membership_snapshots`
+ populates missing rows for remote invite rejections/retractions (out-of-band memberships).
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ # Create rooms with various levels of state that should appear in the table
+ #
+ room_id_unknown_state, room_id_unknown_state_invite_event = (
+ self._create_remote_invite_room_for_user(user1_id, None)
+ )
+
+ room_id_no_info, room_id_no_info_invite_event = (
+ self._create_remote_invite_room_for_user(
+ user1_id,
+ [
+ StrippedStateEvent(
+ type=EventTypes.Create,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+ EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+ },
+ ),
+ ],
+ )
+ )
+
+ room_id_with_info, room_id_with_info_invite_event = (
+ self._create_remote_invite_room_for_user(
+ user1_id,
+ [
+ StrippedStateEvent(
+ type=EventTypes.Create,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+ EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.Name,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_NAME: "my super duper room",
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.RoomEncryption,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2",
+ },
+ ),
+ ],
+ )
+ )
+
+ space_room_id, space_room_id_invite_event = (
+ self._create_remote_invite_room_for_user(
+ user1_id,
+ [
+ StrippedStateEvent(
+ type=EventTypes.Create,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_CREATOR: "@inviter:remote_server",
+ EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier,
+ EventContentFields.ROOM_TYPE: RoomTypes.SPACE,
+ },
+ ),
+ StrippedStateEvent(
+ type=EventTypes.Name,
+ state_key="",
+ sender="@inviter:remote_server",
+ content={
+ EventContentFields.ROOM_NAME: "my super duper space",
+ },
+ ),
+ ],
+ )
+ )
+
+ # Reject the remote invites.
+ # Also try retracting a remote invite.
+ room_id_unknown_state_leave_event_response = self.helper.leave(
+ room_id_unknown_state, user1_id, tok=user1_tok
+ )
+ room_id_no_info_leave_event = self._retract_remote_invite_for_user(
+ user_id=user1_id,
+ remote_room_id=room_id_no_info,
+ )
+ room_id_with_info_leave_event_response = self.helper.leave(
+ room_id_with_info, user1_id, tok=user1_tok
+ )
+ space_room_id_leave_event = self._retract_remote_invite_for_user(
+ user_id=user1_id,
+ remote_room_id=space_room_id,
+ )
+
+ # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+ # happen during event creation.
+ self.get_success(
+ self.store.db_pool.simple_delete_many(
+ table="sliding_sync_membership_snapshots",
+ column="room_id",
+ iterable=(
+ room_id_unknown_state,
+ room_id_no_info,
+ room_id_with_info,
+ space_room_id,
+ ),
+ keyvalues={},
+ desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_remote_invite_rejections_and_retractions",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Insert and run the background update.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+ "progress_json": "{}",
+ },
+ )
+ )
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Make sure the table is populated
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ # The invite memberships for user1
+ (room_id_unknown_state, user1_id),
+ (room_id_no_info, user1_id),
+ (room_id_with_info, user1_id),
+ (space_room_id, user1_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (room_id_unknown_state, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_unknown_state,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=room_id_unknown_state_leave_event_response[
+ "event_id"
+ ],
+ membership=Membership.LEAVE,
+ event_stream_ordering=self.get_success(
+ self.store.get_position_for_event(
+ room_id_unknown_state_leave_event_response["event_id"]
+ )
+ ).stream,
+ has_known_state=False,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_no_info,
+ user_id=user1_id,
+ sender="@inviter:remote_server",
+ membership_event_id=room_id_no_info_leave_event.event_id,
+ membership=Membership.LEAVE,
+ event_stream_ordering=room_id_no_info_leave_event.internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (room_id_with_info, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_with_info,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=room_id_with_info_leave_event_response["event_id"],
+ membership=Membership.LEAVE,
+ event_stream_ordering=self.get_success(
+ self.store.get_position_for_event(
+ room_id_with_info_leave_event_response["event_id"]
+ )
+ ).stream,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=True,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=space_room_id,
+ user_id=user1_id,
+ sender="@inviter:remote_server",
+ membership_event_id=space_room_id_leave_event.event_id,
+ membership=Membership.LEAVE,
+ event_stream_ordering=space_room_id_leave_event.internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=RoomTypes.SPACE,
+ room_name="my super duper space",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ @parameterized.expand(
+ [
+ # We'll do a kick for this
+ (Membership.LEAVE,),
+ (Membership.BAN,),
+ ]
+ )
+ def test_membership_snapshots_background_update_historical_state(
+ self, test_membership: str
+ ) -> None:
+ """
+ Test that the background update for `sliding_sync_membership_snapshots`
+ populates missing rows for leave memberships.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Create rooms with various levels of state that should appear in the table
+ #
+ room_id_no_info = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+ room_id_with_info = self.helper.create_room_as(user2_id, tok=user2_tok)
+ # Add a room name
+ self.helper.send_state(
+ room_id_with_info,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user2_tok,
+ )
+ # Encrypt the room
+ self.helper.send_state(
+ room_id_with_info,
+ EventTypes.RoomEncryption,
+ {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
+ tok=user2_tok,
+ )
+ # Add a tombstone
+ self.helper.send_state(
+ room_id_with_info,
+ EventTypes.Tombstone,
+ {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"},
+ tok=user2_tok,
+ )
+
+ space_room_id = self.helper.create_room_as(
+ user1_id,
+ tok=user2_tok,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+ },
+ )
+ # Add a room name
+ self.helper.send_state(
+ space_room_id,
+ EventTypes.Name,
+ {"name": "my super duper space"},
+ tok=user2_tok,
+ )
+
+ # Join the room in preparation for our test_membership
+ self.helper.join(room_id_no_info, user1_id, tok=user1_tok)
+ self.helper.join(room_id_with_info, user1_id, tok=user1_tok)
+ self.helper.join(space_room_id, user1_id, tok=user1_tok)
+
+ if test_membership == Membership.LEAVE:
+ # Kick user1 from the rooms
+ user1_membership_room_id_no_info_response = self.helper.change_membership(
+ room=room_id_no_info,
+ src=user2_id,
+ targ=user1_id,
+ tok=user2_tok,
+ membership=Membership.LEAVE,
+ extra_data={
+ "reason": "Bad manners",
+ },
+ )
+ user1_membership_room_id_with_info_response = self.helper.change_membership(
+ room=room_id_with_info,
+ src=user2_id,
+ targ=user1_id,
+ tok=user2_tok,
+ membership=Membership.LEAVE,
+ extra_data={
+ "reason": "Bad manners",
+ },
+ )
+ user1_membership_space_room_id_response = self.helper.change_membership(
+ room=space_room_id,
+ src=user2_id,
+ targ=user1_id,
+ tok=user2_tok,
+ membership=Membership.LEAVE,
+ extra_data={
+ "reason": "Bad manners",
+ },
+ )
+ elif test_membership == Membership.BAN:
+ # Ban user1 from the rooms
+ user1_membership_room_id_no_info_response = self.helper.ban(
+ room_id_no_info, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+ user1_membership_room_id_with_info_response = self.helper.ban(
+ room_id_with_info, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+ user1_membership_space_room_id_response = self.helper.ban(
+ space_room_id, src=user2_id, targ=user1_id, tok=user2_tok
+ )
+ else:
+ raise AssertionError("Unknown test_membership")
+
+ # Have user2 leave the rooms to make sure that our background update is not just
+ # reading from `current_state_events`. For leave memberships, we should be
+ # reading from the historical state.
+ self.helper.leave(room_id_no_info, user2_id, tok=user2_tok)
+ self.helper.leave(room_id_with_info, user2_id, tok=user2_tok)
+ self.helper.leave(space_room_id, user2_id, tok=user2_tok)
+ # Check to make sure we actually don't have any `current_state_events` for the rooms
+ current_state_check_rows = self.get_success(
+ self.store.db_pool.simple_select_many_batch(
+ table="current_state_events",
+ column="room_id",
+ iterable=[room_id_no_info, room_id_with_info, space_room_id],
+ retcols=("event_id",),
+ keyvalues={},
+ desc="check current_state_events in test",
+ )
+ )
+ self.assertEqual(len(current_state_check_rows), 0)
+
+ # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+ # happen during event creation.
+ self.get_success(
+ self.store.db_pool.simple_delete_many(
+ table="sliding_sync_membership_snapshots",
+ column="room_id",
+ iterable=(room_id_no_info, room_id_with_info, space_room_id),
+ keyvalues={},
+ desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_historical_state",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Insert and run the background update.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+ "progress_json": "{}",
+ },
+ )
+ )
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Make sure the table is populated
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ # The memberships for user1
+ (room_id_no_info, user1_id),
+ (room_id_with_info, user1_id),
+ (space_room_id, user1_id),
+ # The leave memberships for user2
+ (room_id_no_info, user2_id),
+ (room_id_with_info, user2_id),
+ (space_room_id, user2_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_no_info,
+ user_id=user1_id,
+ # Because user2 kicked/banned user1 from the room
+ sender=user2_id,
+ membership_event_id=user1_membership_room_id_no_info_response[
+ "event_id"
+ ],
+ membership=test_membership,
+ event_stream_ordering=self.get_success(
+ self.store.get_position_for_event(
+ user1_membership_room_id_no_info_response["event_id"]
+ )
+ ).stream,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get(
+ (room_id_with_info, user1_id)
+ ),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id_with_info,
+ user_id=user1_id,
+ # Because user2 kicked/banned user1 from the room
+ sender=user2_id,
+ membership_event_id=user1_membership_room_id_with_info_response[
+ "event_id"
+ ],
+ membership=test_membership,
+ event_stream_ordering=self.get_success(
+ self.store.get_position_for_event(
+ user1_membership_room_id_with_info_response["event_id"]
+ )
+ ).stream,
+ has_known_state=True,
+ room_type=None,
+ room_name="my super duper room",
+ is_encrypted=True,
+ tombstone_successor_room_id="another_room",
+ ),
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=space_room_id,
+ user_id=user1_id,
+ # Because user2 kicked/banned user1 from the room
+ sender=user2_id,
+ membership_event_id=user1_membership_space_room_id_response["event_id"],
+ membership=test_membership,
+ event_stream_ordering=self.get_success(
+ self.store.get_position_for_event(
+ user1_membership_space_room_id_response["event_id"]
+ )
+ ).stream,
+ has_known_state=True,
+ room_type=RoomTypes.SPACE,
+ room_name="my super duper space",
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+ def test_membership_snapshots_background_update_forgotten_missing(self) -> None:
+ """
+ Test that a new row is inserted into `sliding_sync_membership_snapshots` when it
+ doesn't exist in the table yet.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id = self.helper.create_room_as(user2_id, tok=user2_tok)
+
+ # User1 joins the room
+ self.helper.join(room_id, user1_id, tok=user1_tok)
+ # User1 leaves the room (we have to leave in order to forget the room)
+ self.helper.leave(room_id, user1_id, tok=user1_tok)
+
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+
+ # Forget the room
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/r0/rooms/{room_id}/forget",
+ content={},
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not
+ # happen during event creation.
+ self.get_success(
+ self.store.db_pool.simple_delete_many(
+ table="sliding_sync_membership_snapshots",
+ column="room_id",
+ iterable=(room_id,),
+ keyvalues={},
+ desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_forgotten_missing",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Insert and run the background update.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
+ "progress_json": "{}",
+ },
+ )
+ )
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Make sure the table is populated
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.LEAVE,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ # Room is forgotten
+ forgotten=True,
+ ),
+ )
+ # Holds the info according to the current state when the user joined
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id,
+ user_id=user2_id,
+ sender=user2_id,
+ membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user2_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ ),
+ )
+
+
+class SlidingSyncTablesCatchUpBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase):
+ """
+ Test the background updates for catch-up after Synapse downgrade to populate the
+ `sliding_sync_joined_rooms` and `sliding_sync_membership_snapshots` tables.
+
+ This to test the "catch-up" version of the background update vs the "normal"
+ background update to populate the tables with all of the historical data. Both
+ versions share the same background update but just serve different purposes. We
+ check if the "catch-up" version needs to run on start-up based on whether there have
+ been any changes to rooms that aren't reflected in the sliding sync tables.
+
+ FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the
+ foreground update for
+ `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by
+ https://github.com/element-hq/synapse/issues/17623)
+ """
+
+ def test_joined_background_update_catch_up_new_room(self) -> None:
+ """
+ Test that new rooms while Synapse is downgraded (making
+ `sliding_sync_joined_rooms` stale) will be caught when Synapse is upgraded and
+ the catch-up routine is run.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ # Instead of testing with various levels of room state that should appear in the
+ # table, we're only using one room to keep this test simple. Because the
+ # underlying background update to populate these tables is the same as this
+ # catch-up routine, we are going to rely on
+ # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+ # Make sure all of the background updates have finished before we start the
+ # catch-up. Even though it should work fine if the other background update is
+ # still running, we want to see the catch-up routine restore the progress
+ # correctly.
+ #
+ # We also don't want the normal background update messing with our results so we
+ # run this before we do our manual database clean-up to simulate new events
+ # being sent while Synapse was downgraded.
+ self.wait_for_background_updates()
+
+ # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made
+ # it into the table. This is to simulate the a new room while Synapse was
+ # downgraded.
+ self.get_success(
+ self.store.db_pool.simple_delete(
+ table="sliding_sync_joined_rooms",
+ keyvalues={"room_id": room_id},
+ desc="simulate new room while Synapse was downgraded",
+ )
+ )
+
+ # The function under test. It should clear out stale data and start the
+ # background update to catch-up on the missing data.
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_resolve_stale_data_in_sliding_sync_joined_rooms_table",
+ _resolve_stale_data_in_sliding_sync_joined_rooms_table,
+ )
+ )
+
+ # We shouldn't see any new data yet
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Wait for the catch-up background update to finish
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Ensure that the table is populated correctly after the catch-up background
+ # update finishes
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id},
+ exact=True,
+ )
+
+ def test_joined_background_update_catch_up_room_state_change(self) -> None:
+ """
+ Test that new events while Synapse is downgraded (making
+ `sliding_sync_joined_rooms` stale) will be caught when Synapse is upgraded and
+ the catch-up routine is run.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ # Instead of testing with various levels of room state that should appear in the
+ # table, we're only using one room to keep this test simple. Because the
+ # underlying background update to populate these tables is the same as this
+ # catch-up routine, we are going to rely on
+ # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+ # Get a snapshot of the `sliding_sync_joined_rooms` table before we add some state
+ sliding_sync_joined_rooms_results_before_state = (
+ self._get_sliding_sync_joined_rooms()
+ )
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results_before_state.keys()),
+ {room_id},
+ exact=True,
+ )
+
+ # Add a room name
+ self.helper.send_state(
+ room_id,
+ EventTypes.Name,
+ {"name": "my super duper room"},
+ tok=user1_tok,
+ )
+
+ # Make sure all of the background updates have finished before we start the
+ # catch-up. Even though it should work fine if the other background update is
+ # still running, we want to see the catch-up routine restore the progress
+ # correctly.
+ #
+ # We also don't want the normal background update messing with our results so we
+ # run this before we do our manual database clean-up to simulate new events
+ # being sent while Synapse was downgraded.
+ self.wait_for_background_updates()
+
+ # Clean-up the `sliding_sync_joined_rooms` table as if the the room name
+ # never made it into the table. This is to simulate the room name event
+ # being sent while Synapse was downgraded.
+ self.get_success(
+ self.store.db_pool.simple_update(
+ table="sliding_sync_joined_rooms",
+ keyvalues={"room_id": room_id},
+ updatevalues={
+ # Clear the room name
+ "room_name": None,
+ # Reset the `event_stream_ordering` back to the value before the room name
+ "event_stream_ordering": sliding_sync_joined_rooms_results_before_state[
+ room_id
+ ].event_stream_ordering,
+ },
+ desc="simulate new events while Synapse was downgraded",
+ )
+ )
+
+ # The function under test. It should clear out stale data and start the
+ # background update to catch-up on the missing data.
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_resolve_stale_data_in_sliding_sync_joined_rooms_table",
+ _resolve_stale_data_in_sliding_sync_joined_rooms_table,
+ )
+ )
+
+ # Ensure that the stale data is deleted from the table
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Wait for the catch-up background update to finish
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Ensure that the table is populated correctly after the catch-up background
+ # update finishes
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id},
+ exact=True,
+ )
+
+ def test_joined_background_update_catch_up_no_rooms(self) -> None:
+ """
+ Test that if you start your homeserver with no rooms on a Synapse version that
+ supports the sliding sync tables and the historical background update completes
+ (because no rooms to process), then Synapse is downgraded and new rooms are
+ created/joined; when Synapse is upgraded, the rooms will be processed catch-up
+ routine is run.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+
+ # Instead of testing with various levels of room state that should appear in the
+ # table, we're only using one room to keep this test simple. Because the
+ # underlying background update to populate these tables is the same as this
+ # catch-up routine, we are going to rely on
+ # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+
+ # Make sure all of the background updates have finished before we start the
+ # catch-up. Even though it should work fine if the other background update is
+ # still running, we want to see the catch-up routine restore the progress
+ # correctly.
+ #
+ # We also don't want the normal background update messing with our results so we
+ # run this before we do our manual database clean-up to simulate room being
+ # created while Synapse was downgraded.
+ self.wait_for_background_updates()
+
+ # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made
+ # it into the table. This is to simulate the room being created while Synapse
+ # was downgraded.
+ self.get_success(
+ self.store.db_pool.simple_delete_many(
+ table="sliding_sync_joined_rooms",
+ column="room_id",
+ iterable=(room_id,),
+ keyvalues={},
+ desc="simulate room being created while Synapse was downgraded",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # The function under test. It should clear out stale data and start the
+ # background update to catch-up on the missing data.
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_resolve_stale_data_in_sliding_sync_joined_rooms_table",
+ _resolve_stale_data_in_sliding_sync_joined_rooms_table,
+ )
+ )
+
+ # We still shouldn't find any data yet
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Wait for the catch-up background update to finish
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Ensure that the table is populated correctly after the catch-up background
+ # update finishes
+ sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms()
+ self.assertIncludes(
+ set(sliding_sync_joined_rooms_results.keys()),
+ {room_id},
+ exact=True,
+ )
+
+ def test_membership_snapshots_background_update_catch_up_new_membership(
+ self,
+ ) -> None:
+ """
+ Test that completely new membership while Synapse is downgraded (making
+ `sliding_sync_membership_snapshots` stale) will be caught when Synapse is
+ upgraded and the catch-up routine is run.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Instead of testing with various levels of room state that should appear in the
+ # table, we're only using one room to keep this test simple. Because the
+ # underlying background update to populate these tables is the same as this
+ # catch-up routine, we are going to rely on
+ # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+ # User2 joins the room
+ self.helper.join(room_id, user2_id, tok=user2_tok)
+
+ # Both users are joined to the room
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+
+ # Make sure all of the background updates have finished before we start the
+ # catch-up. Even though it should work fine if the other background update is
+ # still running, we want to see the catch-up routine restore the progress
+ # correctly.
+ #
+ # We also don't want the normal background update messing with our results so we
+ # run this before we do our manual database clean-up to simulate new events
+ # being sent while Synapse was downgraded.
+ self.wait_for_background_updates()
+
+ # Clean-up the `sliding_sync_membership_snapshots` table as if the user2
+ # membership never made it into the table. This is to simulate a membership
+ # change while Synapse was downgraded.
+ self.get_success(
+ self.store.db_pool.simple_delete(
+ table="sliding_sync_membership_snapshots",
+ keyvalues={"room_id": room_id, "user_id": user2_id},
+ desc="simulate new membership while Synapse was downgraded",
+ )
+ )
+
+ # We shouldn't find the user2 membership in the table because we just deleted it
+ # in preparation for the test.
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ },
+ exact=True,
+ )
+
+ # The function under test. It should clear out stale data and start the
+ # background update to catch-up on the missing data.
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_resolve_stale_data_in_sliding_sync_membership_snapshots_table",
+ _resolve_stale_data_in_sliding_sync_membership_snapshots_table,
+ )
+ )
+
+ # We still shouldn't find any data yet
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ },
+ exact=True,
+ )
+
+ # Wait for the catch-up background update to finish
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Ensure that the table is populated correctly after the catch-up background
+ # update finishes
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+
+ def test_membership_snapshots_background_update_catch_up_membership_change(
+ self,
+ ) -> None:
+ """
+ Test that membership changes while Synapse is downgraded (making
+ `sliding_sync_membership_snapshots` stale) will be caught when Synapse is upgraded and
+ the catch-up routine is run.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Instead of testing with various levels of room state that should appear in the
+ # table, we're only using one room to keep this test simple. Because the
+ # underlying background update to populate these tables is the same as this
+ # catch-up routine, we are going to rely on
+ # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+ # User2 joins the room
+ self.helper.join(room_id, user2_id, tok=user2_tok)
+
+ # Both users are joined to the room
+ sliding_sync_membership_snapshots_results_before_membership_changes = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(
+ sliding_sync_membership_snapshots_results_before_membership_changes.keys()
+ ),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+
+ # User2 leaves the room
+ self.helper.leave(room_id, user2_id, tok=user2_tok)
+
+ # Make sure all of the background updates have finished before we start the
+ # catch-up. Even though it should work fine if the other background update is
+ # still running, we want to see the catch-up routine restore the progress
+ # correctly.
+ #
+ # We also don't want the normal background update messing with our results so we
+ # run this before we do our manual database clean-up to simulate new events
+ # being sent while Synapse was downgraded.
+ self.wait_for_background_updates()
+
+ # Rollback the `sliding_sync_membership_snapshots` table as if the user2
+ # membership never made it into the table. This is to simulate a membership
+ # change while Synapse was downgraded.
+ self.get_success(
+ self.store.db_pool.simple_update(
+ table="sliding_sync_membership_snapshots",
+ keyvalues={"room_id": room_id, "user_id": user2_id},
+ updatevalues={
+ # Reset everything back to the value before user2 left the room
+ "membership": sliding_sync_membership_snapshots_results_before_membership_changes[
+ (room_id, user2_id)
+ ].membership,
+ "membership_event_id": sliding_sync_membership_snapshots_results_before_membership_changes[
+ (room_id, user2_id)
+ ].membership_event_id,
+ "event_stream_ordering": sliding_sync_membership_snapshots_results_before_membership_changes[
+ (room_id, user2_id)
+ ].event_stream_ordering,
+ },
+ desc="simulate membership change while Synapse was downgraded",
+ )
+ )
+
+ # We should see user2 still joined to the room because we made that change in
+ # preparation for the test.
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+ sliding_sync_membership_snapshots_results_before_membership_changes[
+ (room_id, user1_id)
+ ],
+ )
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user2_id)),
+ sliding_sync_membership_snapshots_results_before_membership_changes[
+ (room_id, user2_id)
+ ],
+ )
+
+ # The function under test. It should clear out stale data and start the
+ # background update to catch-up on the missing data.
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_resolve_stale_data_in_sliding_sync_membership_snapshots_table",
+ _resolve_stale_data_in_sliding_sync_membership_snapshots_table,
+ )
+ )
+
+ # Ensure that the stale data is deleted from the table
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ },
+ exact=True,
+ )
+
+ # Wait for the catch-up background update to finish
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Ensure that the table is populated correctly after the catch-up background
+ # update finishes
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+
+ def test_membership_snapshots_background_update_catch_up_no_membership(
+ self,
+ ) -> None:
+ """
+ Test that if you start your homeserver with no rooms on a Synapse version that
+ supports the sliding sync tables and the historical background update completes
+ (because no rooms/membership to process), then Synapse is downgraded and new
+ rooms are created/joined; when Synapse is upgraded, the rooms will be processed
+ catch-up routine is run.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Instead of testing with various levels of room state that should appear in the
+ # table, we're only using one room to keep this test simple. Because the
+ # underlying background update to populate these tables is the same as this
+ # catch-up routine, we are going to rely on
+ # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic.
+ room_id = self.helper.create_room_as(user1_id, tok=user1_tok)
+ # User2 joins the room
+ self.helper.join(room_id, user2_id, tok=user2_tok)
+
+ # Make sure all of the background updates have finished before we start the
+ # catch-up. Even though it should work fine if the other background update is
+ # still running, we want to see the catch-up routine restore the progress
+ # correctly.
+ #
+ # We also don't want the normal background update messing with our results so we
+ # run this before we do our manual database clean-up to simulate new events
+ # being sent while Synapse was downgraded.
+ self.wait_for_background_updates()
+
+ # Rollback the `sliding_sync_membership_snapshots` table as if the user2
+ # membership never made it into the table. This is to simulate a membership
+ # change while Synapse was downgraded.
+ self.get_success(
+ self.store.db_pool.simple_delete_many(
+ table="sliding_sync_membership_snapshots",
+ column="room_id",
+ iterable=(room_id,),
+ keyvalues={},
+ desc="simulate room being created while Synapse was downgraded",
+ )
+ )
+
+ # We shouldn't find anything in the table because we just deleted them in
+ # preparation for the test.
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # The function under test. It should clear out stale data and start the
+ # background update to catch-up on the missing data.
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_resolve_stale_data_in_sliding_sync_membership_snapshots_table",
+ _resolve_stale_data_in_sliding_sync_membership_snapshots_table,
+ )
+ )
+
+ # We still shouldn't find any data yet
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ set(),
+ exact=True,
+ )
+
+ # Wait for the catch-up background update to finish
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Ensure that the table is populated correctly after the catch-up background
+ # update finishes
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+
+
+class SlidingSyncMembershipSnapshotsTableFixForgottenColumnBackgroundUpdatesTestCase(
+ SlidingSyncTablesTestCaseBase
+):
+ """
+ Test the background updates that fixes `sliding_sync_membership_snapshots` ->
+ `forgotten` column.
+ """
+
+ def test_membership_snapshots_fix_forgotten_column_background_update(self) -> None:
+ """
+ Test that the background update, updates the `sliding_sync_membership_snapshots`
+ -> `forgotten` column to be in sync with the `room_memberships` table.
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ # User1 joins the room
+ self.helper.join(room_id, user1_id, tok=user1_tok)
+
+ # Leave and forget the room
+ self.helper.leave(room_id, user1_id, tok=user1_tok)
+ # User1 forgets the room
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/r0/rooms/{room_id}/forget",
+ content={},
+ access_token=user1_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # Re-join the room
+ self.helper.join(room_id, user1_id, tok=user1_tok)
+
+ # Reset `sliding_sync_membership_snapshots` table as if the `forgotten` column
+ # got out of sync from the `room_memberships` table from the previous flawed
+ # code.
+ self.get_success(
+ self.store.db_pool.simple_update_one(
+ table="sliding_sync_membership_snapshots",
+ keyvalues={"room_id": room_id, "user_id": user1_id},
+ updatevalues={"forgotten": 1},
+ desc="sliding_sync_membership_snapshots.test_membership_snapshots_fix_forgotten_column_background_update",
+ )
+ )
+
+ # Insert and run the background update.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE,
+ "progress_json": "{}",
+ },
+ )
+ )
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # Make sure the table is populated
+
+ sliding_sync_membership_snapshots_results = (
+ self._get_sliding_sync_membership_snapshots()
+ )
+ self.assertIncludes(
+ set(sliding_sync_membership_snapshots_results.keys()),
+ {
+ (room_id, user1_id),
+ (room_id, user2_id),
+ },
+ exact=True,
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+ # Holds the info according to the current state when the user joined.
+ #
+ # We only care about checking on user1 as that's what we reset and expect to be
+ # correct now
+ self.assertEqual(
+ sliding_sync_membership_snapshots_results.get((room_id, user1_id)),
+ _SlidingSyncMembershipSnapshotResult(
+ room_id=room_id,
+ user_id=user1_id,
+ sender=user1_id,
+ membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id,
+ membership=Membership.JOIN,
+ event_stream_ordering=state_map[
+ (EventTypes.Member, user1_id)
+ ].internal_metadata.stream_ordering,
+ has_known_state=True,
+ room_type=None,
+ room_name=None,
+ is_encrypted=False,
+ tombstone_successor_room_id=None,
+ # We should see the room as no longer forgotten
+ forgotten=False,
+ ),
+ )
diff --git a/tests/storage/test_state_deletion.py b/tests/storage/test_state_deletion.py
new file mode 100644
index 0000000000..a4d318ae20
--- /dev/null
+++ b/tests/storage/test_state_deletion.py
@@ -0,0 +1,475 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2025 New Vector, Ltd
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# <https://www.gnu.org/licenses/agpl-3.0.html>.
+#
+
+
+import logging
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests.test_utils.event_injection import create_event
+from tests.unittest import HomeserverTestCase
+
+logger = logging.getLogger(__name__)
+
+
+class StateDeletionStoreTestCase(HomeserverTestCase):
+ """Tests for the StateDeletionStore."""
+
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.state_store = hs.get_datastores().state
+ self.state_deletion_store = hs.get_datastores().state_deletion
+ self.purge_events = hs.get_storage_controllers().purge_events
+
+ # We want to disable the automatic deletion of state groups in the
+ # background, so we can do controlled tests.
+ self.purge_events._delete_state_loop_call.stop()
+
+ self.user_id = self.register_user("test", "password")
+ tok = self.login("test", "password")
+ self.room_id = self.helper.create_room_as(self.user_id, tok=tok)
+
+ def check_if_can_be_deleted(self, state_group: int) -> bool:
+ """Check if the state group is pending deletion."""
+
+ state_group_to_sequence_number = self.get_success(
+ self.state_deletion_store.get_pending_deletions([state_group])
+ )
+
+ can_be_deleted = self.get_success(
+ self.state_deletion_store.db_pool.runInteraction(
+ "test_existing_pending_deletion_is_cleared",
+ self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn,
+ state_group_to_sequence_number,
+ )
+ )
+
+ return state_group in can_be_deleted
+
+ def test_no_deletion(self) -> None:
+ """Test that calling persisting_state_group_references is fine if
+ nothing is pending deletion"""
+ event, context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ type="m.test",
+ sender=self.user_id,
+ )
+ )
+
+ ctx_mgr = self.state_deletion_store.persisting_state_group_references(
+ [(event, context)]
+ )
+
+ self.get_success(ctx_mgr.__aenter__())
+ self.get_success(ctx_mgr.__aexit__(None, None, None))
+
+ def test_no_deletion_error(self) -> None:
+ """Test that calling persisting_state_group_references is fine if
+ nothing is pending deletion, but an error occurs."""
+
+ event, context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ type="m.test",
+ sender=self.user_id,
+ )
+ )
+
+ ctx_mgr = self.state_deletion_store.persisting_state_group_references(
+ [(event, context)]
+ )
+
+ self.get_success(ctx_mgr.__aenter__())
+ self.get_success(ctx_mgr.__aexit__(Exception, Exception("test"), None))
+
+ def test_existing_pending_deletion_is_cleared(self) -> None:
+ """Test that the pending deletion flag gets cleared when the state group
+ gets persisted."""
+
+ event, context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ type="m.test",
+ state_key="",
+ sender=self.user_id,
+ )
+ )
+ assert context.state_group is not None
+
+ # Mark a state group that we're referencing as pending deletion.
+ self.get_success(
+ self.state_deletion_store.mark_state_groups_as_pending_deletion(
+ [context.state_group]
+ )
+ )
+
+ ctx_mgr = self.state_deletion_store.persisting_state_group_references(
+ [(event, context)]
+ )
+
+ self.get_success(ctx_mgr.__aenter__())
+ self.get_success(ctx_mgr.__aexit__(None, None, None))
+
+ # The pending deletion flag should be cleared
+ pending_deletion = self.get_success(
+ self.state_deletion_store.db_pool.simple_select_one_onecol(
+ table="state_groups_pending_deletion",
+ keyvalues={"state_group": context.state_group},
+ retcol="1",
+ allow_none=True,
+ desc="test_existing_pending_deletion_is_cleared",
+ )
+ )
+ self.assertIsNone(pending_deletion)
+
+ def test_pending_deletion_is_cleared_during_persist(self) -> None:
+ """Test that the pending deletion flag is cleared when a state group
+ gets marked for deletion during persistence"""
+
+ event, context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ type="m.test",
+ state_key="",
+ sender=self.user_id,
+ )
+ )
+ assert context.state_group is not None
+
+ ctx_mgr = self.state_deletion_store.persisting_state_group_references(
+ [(event, context)]
+ )
+ self.get_success(ctx_mgr.__aenter__())
+
+ # Mark the state group that we're referencing as pending deletion,
+ # *after* we have started persisting.
+ self.get_success(
+ self.state_deletion_store.mark_state_groups_as_pending_deletion(
+ [context.state_group]
+ )
+ )
+
+ self.get_success(ctx_mgr.__aexit__(None, None, None))
+
+ # The pending deletion flag should be cleared
+ pending_deletion = self.get_success(
+ self.state_deletion_store.db_pool.simple_select_one_onecol(
+ table="state_groups_pending_deletion",
+ keyvalues={"state_group": context.state_group},
+ retcol="1",
+ allow_none=True,
+ desc="test_existing_pending_deletion_is_cleared",
+ )
+ )
+ self.assertIsNone(pending_deletion)
+
+ def test_deletion_check(self) -> None:
+ """Test that the `get_state_groups_that_can_be_purged_txn` check is
+ correct during different points of the lifecycle of persisting an
+ event."""
+ event, context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ type="m.test",
+ state_key="",
+ sender=self.user_id,
+ )
+ )
+ assert context.state_group is not None
+
+ self.get_success(
+ self.state_deletion_store.mark_state_groups_as_pending_deletion(
+ [context.state_group]
+ )
+ )
+
+ # We shouldn't be able to delete the state group as not enough time as passed
+ can_be_deleted = self.check_if_can_be_deleted(context.state_group)
+ self.assertFalse(can_be_deleted)
+
+ # After enough time we can delete the state group
+ self.reactor.advance(
+ 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
+ )
+ can_be_deleted = self.check_if_can_be_deleted(context.state_group)
+ self.assertTrue(can_be_deleted)
+
+ ctx_mgr = self.state_deletion_store.persisting_state_group_references(
+ [(event, context)]
+ )
+ self.get_success(ctx_mgr.__aenter__())
+
+ # But once we start persisting we can't delete the state group
+ can_be_deleted = self.check_if_can_be_deleted(context.state_group)
+ self.assertFalse(can_be_deleted)
+
+ self.get_success(ctx_mgr.__aexit__(None, None, None))
+
+ # The pending deletion flag should remain cleared after persistence has
+ # finished.
+ can_be_deleted = self.check_if_can_be_deleted(context.state_group)
+ self.assertFalse(can_be_deleted)
+
+ def test_deletion_error_during_persistence(self) -> None:
+ """Test that state groups remain marked as pending deletion if persisting
+ the event fails."""
+
+ event, context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ type="m.test",
+ state_key="",
+ sender=self.user_id,
+ )
+ )
+ assert context.state_group is not None
+
+ # Mark a state group that we're referencing as pending deletion.
+ self.get_success(
+ self.state_deletion_store.mark_state_groups_as_pending_deletion(
+ [context.state_group]
+ )
+ )
+
+ ctx_mgr = self.state_deletion_store.persisting_state_group_references(
+ [(event, context)]
+ )
+
+ self.get_success(ctx_mgr.__aenter__())
+ self.get_success(ctx_mgr.__aexit__(Exception, Exception("test"), None))
+
+ # We should be able to delete the state group after a certain amount of
+ # time
+ self.reactor.advance(
+ 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
+ )
+ can_be_deleted = self.check_if_can_be_deleted(context.state_group)
+ self.assertTrue(can_be_deleted)
+
+ def test_race_between_check_and_insert(self) -> None:
+ """Check that we correctly handle the race where we go to delete a
+ state group, check that it is unreferenced, and then it becomes
+ referenced just before we delete it."""
+
+ event, context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ type="m.test",
+ state_key="",
+ sender=self.user_id,
+ )
+ )
+ assert context.state_group is not None
+
+ # Mark a state group that we're referencing as pending deletion.
+ self.get_success(
+ self.state_deletion_store.mark_state_groups_as_pending_deletion(
+ [context.state_group]
+ )
+ )
+
+ # Advance time enough so we can delete the state group
+ self.reactor.advance(
+ 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
+ )
+
+ # Check that we'd be able to delete this state group.
+ state_group_to_sequence_number = self.get_success(
+ self.state_deletion_store.get_pending_deletions([context.state_group])
+ )
+
+ can_be_deleted = self.get_success(
+ self.state_deletion_store.db_pool.runInteraction(
+ "test_existing_pending_deletion_is_cleared",
+ self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn,
+ state_group_to_sequence_number,
+ )
+ )
+ self.assertCountEqual(can_be_deleted, [context.state_group])
+
+ # ... in the real world we'd check that the state group isn't referenced here ...
+
+ # Now we persist the event to reference the state group, *after* we
+ # check that the state group wasn't referenced
+ ctx_mgr = self.state_deletion_store.persisting_state_group_references(
+ [(event, context)]
+ )
+
+ self.get_success(ctx_mgr.__aenter__())
+ self.get_success(ctx_mgr.__aexit__(Exception, Exception("test"), None))
+
+ # We simulate a pause (required to hit the race)
+ self.reactor.advance(
+ 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
+ )
+
+ # We should no longer be able to delete the state group, without having
+ # to recheck if its referenced.
+ can_be_deleted = self.get_success(
+ self.state_deletion_store.db_pool.runInteraction(
+ "test_existing_pending_deletion_is_cleared",
+ self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn,
+ state_group_to_sequence_number,
+ )
+ )
+ self.assertCountEqual(can_be_deleted, [])
+
+ def test_remove_ancestors_from_can_delete(self) -> None:
+ """Test that if a state group is not ready to be deleted, we also don't
+ delete anything that is referenced by it"""
+
+ event, context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ type="m.test",
+ state_key="",
+ sender=self.user_id,
+ )
+ )
+ assert context.state_group is not None
+
+ # Create a new state group that references the one from the event
+ new_state_group = self.get_success(
+ self.state_store.store_state_group(
+ event.event_id,
+ event.room_id,
+ prev_group=context.state_group,
+ delta_ids={},
+ current_state_ids=None,
+ )
+ )
+
+ # Mark them both as pending deletion
+ self.get_success(
+ self.state_deletion_store.mark_state_groups_as_pending_deletion(
+ [context.state_group, new_state_group]
+ )
+ )
+
+ # Advance time enough so we can delete the state group so they're both
+ # ready for deletion.
+ self.reactor.advance(
+ 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
+ )
+
+ # We can now delete both state groups
+ self.assertTrue(self.check_if_can_be_deleted(context.state_group))
+ self.assertTrue(self.check_if_can_be_deleted(new_state_group))
+
+ # Use the new_state_group to bump its deletion time
+ self.get_success(
+ self.state_store.store_state_group(
+ event.event_id,
+ event.room_id,
+ prev_group=new_state_group,
+ delta_ids={},
+ current_state_ids=None,
+ )
+ )
+
+ # We should now not be able to delete either of the state groups.
+ state_group_to_sequence_number = self.get_success(
+ self.state_deletion_store.get_pending_deletions(
+ [context.state_group, new_state_group]
+ )
+ )
+
+ # We shouldn't be able to delete the state group as not enough time has passed
+ can_be_deleted = self.get_success(
+ self.state_deletion_store.db_pool.runInteraction(
+ "test_existing_pending_deletion_is_cleared",
+ self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn,
+ state_group_to_sequence_number,
+ )
+ )
+ self.assertCountEqual(can_be_deleted, [])
+
+ def test_newly_referenced_state_group_gets_removed_from_pending(self) -> None:
+ """Check that if a state group marked for deletion becomes referenced
+ (without being removed from pending deletion table), it gets removed
+ from pending deletion table."""
+
+ event, context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ type="m.test",
+ state_key="",
+ sender=self.user_id,
+ )
+ )
+ assert context.state_group is not None
+
+ # Mark a state group that we're referencing as pending deletion.
+ self.get_success(
+ self.state_deletion_store.mark_state_groups_as_pending_deletion(
+ [context.state_group]
+ )
+ )
+
+ # Advance time enough so we can delete the state group so they're both
+ # ready for deletion.
+ self.reactor.advance(
+ 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000
+ )
+
+ # Manually insert into the table to mimic the state group getting used.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ table="event_to_state_groups",
+ values={"state_group": context.state_group, "event_id": event.event_id},
+ desc="test_newly_referenced_state_group_gets_removed_from_pending",
+ )
+ )
+
+ # Manually run the background task to delete pending state groups.
+ self.get_success(self.purge_events._delete_state_groups_loop())
+
+ # The pending deletion flag should be cleared...
+ pending_deletion = self.get_success(
+ self.state_deletion_store.db_pool.simple_select_one_onecol(
+ table="state_groups_pending_deletion",
+ keyvalues={"state_group": context.state_group},
+ retcol="1",
+ allow_none=True,
+ desc="test_newly_referenced_state_group_gets_removed_from_pending",
+ )
+ )
+ self.assertIsNone(pending_deletion)
+
+ # .. but the state should not have been deleted.
+ state = self.get_success(
+ self.state_store._get_state_for_groups([context.state_group])
+ )
+ self.assertGreater(len(state[context.state_group]), 0)
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index 7b7590da76..0f58dc8a0a 100644
--- a/tests/storage/test_stream.py
+++ b/tests/storage/test_stream.py
@@ -27,7 +27,13 @@ from immutabledict import immutabledict
from twisted.test.proto_helpers import MemoryReactor
-from synapse.api.constants import Direction, EventTypes, Membership, RelationTypes
+from synapse.api.constants import (
+ Direction,
+ EventTypes,
+ JoinRules,
+ Membership,
+ RelationTypes,
+)
from synapse.api.filtering import Filter
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.events import FrozenEventV3
@@ -147,7 +153,7 @@ class PaginationTestCase(HomeserverTestCase):
def _filter_messages(self, filter: JsonDict) -> List[str]:
"""Make a request to /messages with a filter, returns the chunk of events."""
- events, next_key = self.get_success(
+ events, next_key, _ = self.get_success(
self.hs.get_datastores().main.paginate_room_events_by_topological_ordering(
room_id=self.room_id,
from_key=self.from_token.room_key,
@@ -1154,7 +1160,7 @@ class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase):
room_id=room_id1,
event_id=None,
event_pos=dummy_state_pos,
- membership="leave",
+ membership=Membership.LEAVE,
sender=None, # user1_id,
prev_event_id=join_response1["event_id"],
prev_event_pos=join_pos1,
@@ -1164,6 +1170,75 @@ class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase):
],
)
+ def test_state_reset2(self) -> None:
+ """
+ Test a state reset scenario where the user gets removed from the room (when
+ there is no corresponding leave event)
+ """
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, is_public=True, tok=user2_tok)
+
+ event_response = self.helper.send(room_id1, "test", tok=user2_tok)
+ event_id = event_response["event_id"]
+
+ user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
+ user1_join_pos = self.get_success(
+ self.store.get_position_for_event(user1_join_response["event_id"])
+ )
+
+ before_reset_token = self.event_sources.get_current_token()
+
+ # Trigger a state reset
+ join_rule_event, join_rule_context = self.get_success(
+ create_event(
+ self.hs,
+ prev_event_ids=[event_id],
+ type=EventTypes.JoinRules,
+ state_key="",
+ content={"join_rule": JoinRules.INVITE},
+ sender=user2_id,
+ room_id=room_id1,
+ room_version=self.get_success(self.store.get_room_version_id(room_id1)),
+ )
+ )
+ _, join_rule_event_pos, _ = self.get_success(
+ self.persistence.persist_event(join_rule_event, join_rule_context)
+ )
+
+ after_reset_token = self.event_sources.get_current_token()
+
+ membership_changes = self.get_success(
+ self.store.get_current_state_delta_membership_changes_for_user(
+ user1_id,
+ from_key=before_reset_token.room_key,
+ to_key=after_reset_token.room_key,
+ )
+ )
+
+ # Let the whole diff show on failure
+ self.maxDiff = None
+ self.assertEqual(
+ membership_changes,
+ [
+ CurrentStateDeltaMembership(
+ room_id=room_id1,
+ event_id=None,
+ # The position where the state reset happened
+ event_pos=join_rule_event_pos,
+ membership=Membership.LEAVE,
+ sender=None,
+ prev_event_id=user1_join_response["event_id"],
+ prev_event_pos=user1_join_pos,
+ prev_membership="join",
+ prev_sender=user1_id,
+ ),
+ ],
+ )
+
def test_excluded_room_ids(self) -> None:
"""
Test that the `excluded_room_ids` option excludes changes from the specified rooms.
@@ -1384,20 +1459,25 @@ class GetCurrentStateDeltaMembershipChangesForUserFederationTestCase(
)
)
- with patch.object(
- self.room_member_handler.federation_handler.federation_client,
- "make_membership_event",
- mock_make_membership_event,
- ), patch.object(
- self.room_member_handler.federation_handler.federation_client,
- "send_join",
- mock_send_join,
- ), patch(
- "synapse.event_auth._is_membership_change_allowed",
- return_value=None,
- ), patch(
- "synapse.handlers.federation_event.check_state_dependent_auth_rules",
- return_value=None,
+ with (
+ patch.object(
+ self.room_member_handler.federation_handler.federation_client,
+ "make_membership_event",
+ mock_make_membership_event,
+ ),
+ patch.object(
+ self.room_member_handler.federation_handler.federation_client,
+ "send_join",
+ mock_send_join,
+ ),
+ patch(
+ "synapse.event_auth._is_membership_change_allowed",
+ return_value=None,
+ ),
+ patch(
+ "synapse.handlers.federation_event.check_state_dependent_auth_rules",
+ return_value=None,
+ ),
):
self.get_success(
self.room_member_handler.update_membership(
|