diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py
index 46d829b062..5773172ab8 100644
--- a/tests/storage/databases/main/test_events_worker.py
+++ b/tests/storage/databases/main/test_events_worker.py
@@ -35,66 +35,45 @@ from synapse.util import Clock
from synapse.util.async_helpers import yieldable_gather_results
from tests import unittest
+from tests.test_utils.event_injection import create_event, inject_event
class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
def prepare(self, reactor, clock, hs):
+ self.hs = hs
self.store: EventsWorkerStore = hs.get_datastores().main
- # insert some test data
- for rid in ("room1", "room2"):
- self.get_success(
- self.store.db_pool.simple_insert(
- "rooms",
- {"room_id": rid, "room_version": 4},
- )
- )
+ self.user = self.register_user("user", "pass")
+ self.token = self.login(self.user, "pass")
+ self.room_id = self.helper.create_room_as(self.user, tok=self.token)
self.event_ids: List[str] = []
- for idx, rid in enumerate(
- (
- "room1",
- "room1",
- "room1",
- "room2",
- )
- ):
- event_json = {"type": f"test {idx}", "room_id": rid}
- event = make_event_from_dict(event_json, room_version=RoomVersions.V4)
- event_id = event.event_id
-
- self.get_success(
- self.store.db_pool.simple_insert(
- "events",
- {
- "event_id": event_id,
- "room_id": rid,
- "topological_ordering": idx,
- "stream_ordering": idx,
- "type": event.type,
- "processed": True,
- "outlier": False,
- },
- )
- )
- self.get_success(
- self.store.db_pool.simple_insert(
- "event_json",
- {
- "event_id": event_id,
- "room_id": rid,
- "json": json.dumps(event_json),
- "internal_metadata": "{}",
- "format_version": 3,
- },
+ for i in range(3):
+ event = self.get_success(
+ inject_event(
+ hs,
+ room_version=RoomVersions.V7.identifier,
+ room_id=self.room_id,
+ sender=self.user,
+ type="test_event_type",
+ content={"body": f"foobarbaz{i}"},
)
)
- self.event_ids.append(event_id)
+
+ self.event_ids.append(event.event_id)
def test_simple(self):
with LoggingContext(name="test") as ctx:
res = self.get_success(
- self.store.have_seen_events("room1", [self.event_ids[0], "event19"])
+ self.store.have_seen_events(
+ self.room_id, [self.event_ids[0], "eventdoesnotexist"]
+ )
)
self.assertEqual(res, {self.event_ids[0]})
@@ -104,22 +83,87 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
# a second lookup of the same events should cause no queries
with LoggingContext(name="test") as ctx:
res = self.get_success(
- self.store.have_seen_events("room1", [self.event_ids[0], "event19"])
+ self.store.have_seen_events(
+ self.room_id, [self.event_ids[0], "eventdoesnotexist"]
+ )
)
self.assertEqual(res, {self.event_ids[0]})
self.assertEqual(ctx.get_resource_usage().db_txn_count, 0)
- def test_query_via_event_cache(self):
- # fetch an event into the event cache
- self.get_success(self.store.get_event(self.event_ids[0]))
+ def test_persisting_event_invalidates_cache(self):
+ """
+ Test to make sure that the `have_seen_event` cache
+ is invalidated after we persist an event and returns
+ the updated value.
+ """
+ event, event_context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ sender=self.user,
+ type="test_event_type",
+ content={"body": "garply"},
+ )
+ )
- # looking it up should now cause no db hits
with LoggingContext(name="test") as ctx:
+ # First, check `have_seen_event` for an event we have not seen yet
+ # to prime the cache with a `false` value.
res = self.get_success(
- self.store.have_seen_events("room1", [self.event_ids[0]])
+ self.store.have_seen_events(event.room_id, [event.event_id])
)
- self.assertEqual(res, {self.event_ids[0]})
- self.assertEqual(ctx.get_resource_usage().db_txn_count, 0)
+ self.assertEqual(res, set())
+
+ # That should result in a single db query to lookup
+ self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
+
+ # Persist the event which should invalidate or prefill the
+ # `have_seen_event` cache so we don't return stale values.
+ persistence = self.hs.get_storage_controllers().persistence
+ self.get_success(
+ persistence.persist_event(
+ event,
+ event_context,
+ )
+ )
+
+ with LoggingContext(name="test") as ctx:
+ # Check `have_seen_event` again and we should see the updated fact
+ # that we have now seen the event after persisting it.
+ res = self.get_success(
+ self.store.have_seen_events(event.room_id, [event.event_id])
+ )
+ self.assertEqual(res, {event.event_id})
+
+ # That should result in a single db query to lookup
+ self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
+
+ def test_invalidate_cache_by_room_id(self):
+ """
+ Test to make sure that all events associated with the given `(room_id,)`
+ are invalidated in the `have_seen_event` cache.
+ """
+ with LoggingContext(name="test") as ctx:
+ # Prime the cache with some values
+ res = self.get_success(
+ self.store.have_seen_events(self.room_id, self.event_ids)
+ )
+ self.assertEqual(res, set(self.event_ids))
+
+ # That should result in a single db query to lookup
+ self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
+
+ # Clear the cache with any events associated with the `room_id`
+ self.store.have_seen_event.invalidate((self.room_id,))
+
+ with LoggingContext(name="test") as ctx:
+ res = self.get_success(
+ self.store.have_seen_events(self.room_id, self.event_ids)
+ )
+ self.assertEqual(res, set(self.event_ids))
+
+ # Since we cleared the cache, it should result in another db query to lookup
+ self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
class EventCacheTestCase(unittest.HomeserverTestCase):
@@ -254,7 +298,7 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase):
"room_id": self.room_id,
"json": json.dumps(event_json),
"internal_metadata": "{}",
- "format_version": EventFormatVersions.V3,
+ "format_version": EventFormatVersions.ROOM_V4_PLUS,
},
)
)
diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py
new file mode 100644
index 0000000000..c4f12d81d7
--- /dev/null
+++ b/tests/storage/databases/main/test_receipts.py
@@ -0,0 +1,209 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Dict, Optional, Sequence, Tuple
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.storage.database import LoggingTransaction
+from synapse.util import Clock
+
+from tests.unittest import HomeserverTestCase
+
+
+class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase):
+
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer):
+ self.store = hs.get_datastores().main
+ self.user_id = self.register_user("foo", "pass")
+ self.token = self.login("foo", "pass")
+ self.room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+ self.other_room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+ def _test_background_receipts_unique_index(
+ self,
+ update_name: str,
+ index_name: str,
+ table: str,
+ receipts: Dict[Tuple[str, str, str], Sequence[Dict[str, Any]]],
+ expected_unique_receipts: Dict[Tuple[str, str, str], Optional[Dict[str, Any]]],
+ ):
+ """Test that the background update to uniqueify non-thread receipts in
+ the given receipts table works properly.
+
+ Args:
+ update_name: The name of the background update to test.
+ index_name: The name of the index that the background update creates.
+ table: The table of receipts that the background update fixes.
+ receipts: The test data containing duplicate receipts.
+ A list of receipt rows to insert, grouped by
+ `(room_id, receipt_type, user_id)`.
+ expected_unique_receipts: A dictionary of `(room_id, receipt_type, user_id)`
+ keys and expected receipt key-values after duplicate receipts have been
+ removed.
+ """
+ # First, undo the background update.
+ def drop_receipts_unique_index(txn: LoggingTransaction) -> None:
+ txn.execute(f"DROP INDEX IF EXISTS {index_name}")
+
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "drop_receipts_unique_index",
+ drop_receipts_unique_index,
+ )
+ )
+
+ # Populate the receipts table, including duplicates.
+ for (room_id, receipt_type, user_id), rows in receipts.items():
+ for row in rows:
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ table,
+ {
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ "thread_id": None,
+ "data": "{}",
+ **row,
+ },
+ )
+ )
+
+ # Insert and run the background update.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": update_name,
+ "progress_json": "{}",
+ },
+ )
+ )
+
+ self.store.db_pool.updates._all_done = False
+
+ self.wait_for_background_updates()
+
+ # Check that the remaining receipts match expectations.
+ for (
+ room_id,
+ receipt_type,
+ user_id,
+ ), expected_row in expected_unique_receipts.items():
+ # Include the receipt key in the returned columns, for more informative
+ # assertion messages.
+ columns = ["room_id", "receipt_type", "user_id"]
+ if expected_row is not None:
+ columns += expected_row.keys()
+
+ rows = self.get_success(
+ self.store.db_pool.simple_select_list(
+ table=table,
+ keyvalues={
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ # `simple_select_onecol` does not support NULL filters,
+ # so skip the filter on `thread_id`.
+ },
+ retcols=columns,
+ desc="get_receipt",
+ )
+ )
+
+ if expected_row is not None:
+ self.assertEqual(
+ len(rows),
+ 1,
+ f"Background update did not leave behind latest receipt in {table}",
+ )
+ self.assertEqual(
+ rows[0],
+ {
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ **expected_row,
+ },
+ )
+ else:
+ self.assertEqual(
+ len(rows),
+ 0,
+ f"Background update did not remove all duplicate receipts from {table}",
+ )
+
+ def test_background_receipts_linearized_unique_index(self):
+ """Test that the background update to uniqueify non-thread receipts in
+ `receipts_linearized` works properly.
+ """
+ self._test_background_receipts_unique_index(
+ "receipts_linearized_unique_index",
+ "receipts_linearized_unique_index",
+ "receipts_linearized",
+ receipts={
+ (self.room_id, "m.read", self.user_id): [
+ {"stream_id": 5, "event_id": "$some_event"},
+ {"stream_id": 6, "event_id": "$some_event"},
+ ],
+ (self.other_room_id, "m.read", self.user_id): [
+ {"stream_id": 7, "event_id": "$some_event"}
+ ],
+ },
+ expected_unique_receipts={
+ (self.room_id, "m.read", self.user_id): {"stream_id": 6},
+ (self.other_room_id, "m.read", self.user_id): {"stream_id": 7},
+ },
+ )
+
+ def test_background_receipts_graph_unique_index(self):
+ """Test that the background update to uniqueify non-thread receipts in
+ `receipts_graph` works properly.
+ """
+ self._test_background_receipts_unique_index(
+ "receipts_graph_unique_index",
+ "receipts_graph_unique_index",
+ "receipts_graph",
+ receipts={
+ (self.room_id, "m.read", self.user_id): [
+ {
+ "event_ids": '["$some_event"]',
+ },
+ {
+ "event_ids": '["$some_event"]',
+ },
+ ],
+ (self.other_room_id, "m.read", self.user_id): [
+ {
+ "event_ids": '["$some_event"]',
+ }
+ ],
+ },
+ expected_unique_receipts={
+ (self.room_id, "m.read", self.user_id): None,
+ (self.other_room_id, "m.read", self.user_id): {
+ "event_ids": '["$some_event"]'
+ },
+ },
+ )
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index cce8e75c74..40e58f8199 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -54,7 +54,6 @@ class SQLBaseStoreTestCase(unittest.TestCase):
sqlite_config = {"name": "sqlite3"}
engine = create_engine(sqlite_config)
fake_engine = Mock(wraps=engine)
- fake_engine.can_native_upsert = False
fake_engine.in_transaction.return_value = False
db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine)
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index f37505b6cf..8e7db2c4ec 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -28,7 +28,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
"""
for device_id in device_ids:
- stream_id = self.get_success(
+ self.get_success(
self.store.add_device_change_to_streams(
user_id, [device_id], ["!some:room"]
)
@@ -39,7 +39,6 @@ class DeviceStoreTestCase(HomeserverTestCase):
user_id=user_id,
device_id=device_id,
room_id="!some:room",
- stream_id=stream_id,
hosts=[host],
context={},
)
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index a0ce077a99..de9f4af2de 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -531,7 +531,9 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
)
)
self.get_success(
- event_handler.handle_new_client_event(self.requester, event, context)
+ event_handler.handle_new_client_event(
+ self.requester, events_and_context=[(event, context)]
+ )
)
state1 = set(self.get_success(context.get_current_state_ids()).values())
@@ -549,7 +551,9 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
)
)
self.get_success(
- event_handler.handle_new_client_event(self.requester, event, context)
+ event_handler.handle_new_client_event(
+ self.requester, events_and_context=[(event, context)]
+ )
)
state2 = set(self.get_success(context.get_current_state_ids()).values())
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index d92a9ac5b7..853db930d6 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -12,25 +12,46 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Tuple, Union
+import datetime
+from typing import Dict, List, Tuple, Union
import attr
from parameterized import parameterized
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.constants import EventTypes
from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
RoomVersion,
)
from synapse.events import _EventInternalMetadata
-from synapse.util import json_encoder
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.storage.database import LoggingTransaction
+from synapse.types import JsonDict
+from synapse.util import Clock, json_encoder
import tests.unittest
import tests.utils
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class _BackfillSetupInfo:
+ room_id: str
+ depth_map: Dict[str, int]
+
+
class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
def test_get_prev_events_for_room(self):
@@ -513,7 +534,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
def prev_event_format(prev_event_id: str) -> Union[Tuple[str, dict], str]:
"""Account for differences in prev_events format across room versions"""
- if room_version.event_format == EventFormatVersions.V1:
+ if room_version.event_format == EventFormatVersions.ROOM_V1_V2:
return prev_event_id, {}
return prev_event_id
@@ -571,11 +592,600 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
)
self.assertEqual(count, 1)
- _, event_id = self.get_success(
+ next_staged_event_info = self.get_success(
self.store.get_next_staged_event_id_for_room(room_id)
)
+ assert next_staged_event_info
+ _, event_id = next_staged_event_info
self.assertEqual(event_id, "$fake_event_id_500")
+ def _setup_room_for_backfill_tests(self) -> _BackfillSetupInfo:
+ """
+ Sets up a room with various events and backward extremities to test
+ backfill functions against.
+
+ Returns:
+ _BackfillSetupInfo including the `room_id` to test against and
+ `depth_map` of events in the room
+ """
+ room_id = "!backfill-room-test:some-host"
+
+ # The silly graph we use to test grabbing backward extremities,
+ # where the top is the oldest events.
+ # 1 (oldest)
+ # |
+ # 2 ⹁
+ # | \
+ # | [b1, b2, b3]
+ # | |
+ # | A
+ # | /
+ # 3 {
+ # | \
+ # | [b4, b5, b6]
+ # | |
+ # | B
+ # | /
+ # 4 ´
+ # |
+ # 5 (newest)
+
+ event_graph: Dict[str, List[str]] = {
+ "1": [],
+ "2": ["1"],
+ "3": ["2", "A"],
+ "4": ["3", "B"],
+ "5": ["4"],
+ "A": ["b1", "b2", "b3"],
+ "b1": ["2"],
+ "b2": ["2"],
+ "b3": ["2"],
+ "B": ["b4", "b5", "b6"],
+ "b4": ["3"],
+ "b5": ["3"],
+ "b6": ["3"],
+ }
+
+ depth_map: Dict[str, int] = {
+ "1": 1,
+ "2": 2,
+ "b1": 3,
+ "b2": 3,
+ "b3": 3,
+ "A": 4,
+ "3": 5,
+ "b4": 6,
+ "b5": 6,
+ "b6": 6,
+ "B": 7,
+ "4": 8,
+ "5": 9,
+ }
+
+ # The events we have persisted on our server.
+ # The rest are events in the room but not backfilled tet.
+ our_server_events = {"5", "4", "B", "3", "A"}
+
+ complete_event_dict_map: Dict[str, JsonDict] = {}
+ stream_ordering = 0
+ for (event_id, prev_event_ids) in event_graph.items():
+ depth = depth_map[event_id]
+
+ complete_event_dict_map[event_id] = {
+ "event_id": event_id,
+ "type": "test_regular_type",
+ "room_id": room_id,
+ "sender": "@sender",
+ "prev_event_ids": prev_event_ids,
+ "auth_event_ids": [],
+ "origin_server_ts": stream_ordering,
+ "depth": depth,
+ "stream_ordering": stream_ordering,
+ "content": {"body": "event" + event_id},
+ }
+
+ stream_ordering += 1
+
+ def populate_db(txn: LoggingTransaction):
+ # Insert the room to satisfy the foreign key constraint of
+ # `event_failed_pull_attempts`
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ "rooms",
+ {
+ "room_id": room_id,
+ "creator": "room_creator_user_id",
+ "is_public": True,
+ "room_version": "6",
+ },
+ )
+
+ # Insert our server events
+ for event_id in our_server_events:
+ event_dict = complete_event_dict_map[event_id]
+
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="events",
+ values={
+ "event_id": event_dict.get("event_id"),
+ "type": event_dict.get("type"),
+ "room_id": event_dict.get("room_id"),
+ "depth": event_dict.get("depth"),
+ "topological_ordering": event_dict.get("depth"),
+ "stream_ordering": event_dict.get("stream_ordering"),
+ "processed": True,
+ "outlier": False,
+ },
+ )
+
+ # Insert the event edges
+ for event_id in our_server_events:
+ for prev_event_id in event_graph[event_id]:
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="event_edges",
+ values={
+ "event_id": event_id,
+ "prev_event_id": prev_event_id,
+ "room_id": room_id,
+ },
+ )
+
+ # Insert the backward extremities
+ prev_events_of_our_events = {
+ prev_event_id
+ for our_server_event in our_server_events
+ for prev_event_id in complete_event_dict_map[our_server_event][
+ "prev_event_ids"
+ ]
+ }
+ backward_extremities = prev_events_of_our_events - our_server_events
+ for backward_extremity in backward_extremities:
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="event_backward_extremities",
+ values={
+ "event_id": backward_extremity,
+ "room_id": room_id,
+ },
+ )
+
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_setup_room_for_backfill_tests_populate_db",
+ populate_db,
+ )
+ )
+
+ return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
+
+ def test_get_backfill_points_in_room(self):
+ """
+ Test to make sure only backfill points that are older and come before
+ the `current_depth` are returned.
+ """
+ setup_info = self._setup_room_for_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Try at "B"
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["B"], limit=100)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"])
+
+ # Try at "A"
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ # Event "2" has a depth of 2 but is not included here because we only
+ # know the approximate depth of 5 from our event "3".
+ self.assertListEqual(backfill_event_ids, ["b3", "b2", "b1"])
+
+ def test_get_backfill_points_in_room_excludes_events_we_have_attempted(
+ self,
+ ):
+ """
+ Test to make sure that events we have attempted to backfill (and within
+ backoff timeout duration) do not show up as an event to backfill again.
+ """
+ setup_info = self._setup_room_for_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Record some attempts to backfill these events which will make
+ # `get_backfill_points_in_room` exclude them because we
+ # haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b5", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b4", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b3", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b2", "fake cause")
+ )
+
+ # No time has passed since we attempted to backfill ^
+
+ # Try at "B"
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["B"], limit=100)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ # Only the backfill points that we didn't record earlier exist here.
+ self.assertEqual(backfill_event_ids, ["b6", "2", "b1"])
+
+ def test_get_backfill_points_in_room_attempted_event_retry_after_backoff_duration(
+ self,
+ ):
+ """
+ Test to make sure after we fake attempt to backfill event "b3" many times,
+ we can see retry and see the "b3" again after the backoff timeout duration
+ has exceeded.
+ """
+ setup_info = self._setup_room_for_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Record some attempts to backfill these events which will make
+ # `get_backfill_points_in_room` exclude them because we
+ # haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b3", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+
+ # Now advance time by 2 hours and we should only be able to see "b3"
+ # because we have waited long enough for the single attempt (2^1 hours)
+ # but we still shouldn't see "b1" because we haven't waited long enough
+ # for this many attempts. We didn't do anything to "b2" so it should be
+ # visible regardless.
+ self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
+
+ # Try at "A" and make sure that "b1" is not in the list because we've
+ # already attempted many times
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["b3", "b2"])
+
+ # Now advance time by 20 hours (above 2^4 because we made 4 attemps) and
+ # see if we can now backfill it
+ self.reactor.advance(datetime.timedelta(hours=20).total_seconds())
+
+ # Try at "A" again after we advanced enough time and we should see "b3" again
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["b3", "b2", "b1"])
+
+ def test_get_backfill_points_in_room_works_after_many_failed_pull_attempts_that_could_naively_overflow(
+ self,
+ ) -> None:
+ """
+ A test that reproduces #13929 (Postgres only).
+
+ Test to make sure we can still get backfill points after many failed pull
+ attempts that cause us to backoff to the limit. Even if the backoff formula
+ would tell us to wait for more seconds than can be expressed in a 32 bit
+ signed int.
+ """
+ setup_info = self._setup_room_for_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Pretend that we have tried and failed 10 times to backfill event b1.
+ for _ in range(10):
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+
+ # If the backoff periods grow without limit:
+ # After the first failed attempt, we would have backed off for 1 << 1 = 2 hours.
+ # After the second failed attempt we would have backed off for 1 << 2 = 4 hours,
+ # so after the 10th failed attempt we should backoff for 1 << 10 == 1024 hours.
+ # Wait 1100 hours just so we have a nice round number.
+ self.reactor.advance(datetime.timedelta(hours=1100).total_seconds())
+
+ # 1024 hours in milliseconds is 1024 * 3600000, which exceeds the largest 32 bit
+ # signed integer. The bug we're reproducing is that this overflow causes an
+ # error in postgres preventing us from fetching a set of backwards extremities
+ # to retry fetching.
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
+ )
+
+ # We should aim to fetch all backoff points: b1's latest backoff period has
+ # expired, and we haven't tried the rest.
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["b3", "b2", "b1"])
+
+ def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo:
+ """
+ Sets up a room with various insertion event backward extremities to test
+ backfill functions against.
+
+ Returns:
+ _BackfillSetupInfo including the `room_id` to test against and
+ `depth_map` of events in the room
+ """
+ room_id = "!backfill-room-test:some-host"
+
+ depth_map: Dict[str, int] = {
+ "1": 1,
+ "2": 2,
+ "insertion_eventA": 3,
+ "3": 4,
+ "insertion_eventB": 5,
+ "4": 6,
+ "5": 7,
+ }
+
+ def populate_db(txn: LoggingTransaction):
+ # Insert the room to satisfy the foreign key constraint of
+ # `event_failed_pull_attempts`
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ "rooms",
+ {
+ "room_id": room_id,
+ "creator": "room_creator_user_id",
+ "is_public": True,
+ "room_version": "6",
+ },
+ )
+
+ # Insert our server events
+ stream_ordering = 0
+ for event_id, depth in depth_map.items():
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="events",
+ values={
+ "event_id": event_id,
+ "type": EventTypes.MSC2716_INSERTION
+ if event_id.startswith("insertion_event")
+ else "test_regular_type",
+ "room_id": room_id,
+ "depth": depth,
+ "topological_ordering": depth,
+ "stream_ordering": stream_ordering,
+ "processed": True,
+ "outlier": False,
+ },
+ )
+
+ if event_id.startswith("insertion_event"):
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="insertion_event_extremities",
+ values={
+ "event_id": event_id,
+ "room_id": room_id,
+ },
+ )
+
+ stream_ordering += 1
+
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_setup_room_for_insertion_backfill_tests_populate_db",
+ populate_db,
+ )
+ )
+
+ return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
+
+ def test_get_insertion_event_backward_extremities_in_room(self):
+ """
+ Test to make sure only insertion event backward extremities that are
+ older and come before the `current_depth` are returned.
+ """
+ setup_info = self._setup_room_for_insertion_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Try at "insertion_eventB"
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(
+ room_id, depth_map["insertion_eventB"], limit=100
+ )
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["insertion_eventB", "insertion_eventA"])
+
+ # Try at "insertion_eventA"
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(
+ room_id, depth_map["insertion_eventA"], limit=100
+ )
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ # Event "2" has a depth of 2 but is not included here because we only
+ # know the approximate depth of 5 from our event "3".
+ self.assertListEqual(backfill_event_ids, ["insertion_eventA"])
+
+ def test_get_insertion_event_backward_extremities_in_room_excludes_events_we_have_attempted(
+ self,
+ ):
+ """
+ Test to make sure that insertion events we have attempted to backfill
+ (and within backoff timeout duration) do not show up as an event to
+ backfill again.
+ """
+ setup_info = self._setup_room_for_insertion_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Record some attempts to backfill these events which will make
+ # `get_insertion_event_backward_extremities_in_room` exclude them
+ # because we haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+
+ # No time has passed since we attempted to backfill ^
+
+ # Try at "insertion_eventB"
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(
+ room_id, depth_map["insertion_eventB"], limit=100
+ )
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ # Only the backfill points that we didn't record earlier exist here.
+ self.assertEqual(backfill_event_ids, ["insertion_eventB"])
+
+ def test_get_insertion_event_backward_extremities_in_room_attempted_event_retry_after_backoff_duration(
+ self,
+ ):
+ """
+ Test to make sure after we fake attempt to backfill event
+ "insertion_eventA" many times, we can see retry and see the
+ "insertion_eventA" again after the backoff timeout duration has
+ exceeded.
+ """
+ setup_info = self._setup_room_for_insertion_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Record some attempts to backfill these events which will make
+ # `get_backfill_points_in_room` exclude them because we
+ # haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventB", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+
+ # Now advance time by 2 hours and we should only be able to see
+ # "insertion_eventB" because we have waited long enough for the single
+ # attempt (2^1 hours) but we still shouldn't see "insertion_eventA"
+ # because we haven't waited long enough for this many attempts.
+ self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
+
+ # Try at "insertion_eventA" and make sure that "insertion_eventA" is not
+ # in the list because we've already attempted many times
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(
+ room_id, depth_map["insertion_eventA"], limit=100
+ )
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, [])
+
+ # Now advance time by 20 hours (above 2^4 because we made 4 attemps) and
+ # see if we can now backfill it
+ self.reactor.advance(datetime.timedelta(hours=20).total_seconds())
+
+ # Try at "insertion_eventA" again after we advanced enough time and we
+ # should see "insertion_eventA" again
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(
+ room_id, depth_map["insertion_eventA"], limit=100
+ )
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["insertion_eventA"])
+
+ def test_get_event_ids_to_not_pull_from_backoff(
+ self,
+ ):
+ """
+ Test to make sure only event IDs we should backoff from are returned.
+ """
+ # Create the room
+ user_id = self.register_user("alice", "test")
+ tok = self.login("alice", "test")
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "$failed_event_id", "fake cause"
+ )
+ )
+
+ event_ids_to_backoff = self.get_success(
+ self.store.get_event_ids_to_not_pull_from_backoff(
+ room_id=room_id, event_ids=["$failed_event_id", "$normal_event_id"]
+ )
+ )
+
+ self.assertEqual(event_ids_to_backoff, ["$failed_event_id"])
+
+ def test_get_event_ids_to_not_pull_from_backoff_retry_after_backoff_duration(
+ self,
+ ):
+ """
+ Test to make sure no event IDs are returned after the backoff duration has
+ elapsed.
+ """
+ # Create the room
+ user_id = self.register_user("alice", "test")
+ tok = self.login("alice", "test")
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "$failed_event_id", "fake cause"
+ )
+ )
+
+ # Now advance time by 2 hours so we wait long enough for the single failed
+ # attempt (2^1 hours).
+ self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
+
+ event_ids_to_backoff = self.get_success(
+ self.store.get_event_ids_to_not_pull_from_backoff(
+ room_id=room_id, event_ids=["$failed_event_id", "$normal_event_id"]
+ )
+ )
+ # Since this function only returns events we should backoff from, time has
+ # elapsed past the backoff range so there is no events to backoff from.
+ self.assertEqual(event_ids_to_backoff, [])
+
@attr.s
class FakeEvent:
diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py
index 088fbb247b..6f1135eef4 100644
--- a/tests/storage/test_event_metrics.py
+++ b/tests/storage/test_event_metrics.py
@@ -11,8 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from prometheus_client import generate_latest
-from synapse.metrics import REGISTRY, generate_latest
+from synapse.metrics import REGISTRY
from synapse.types import UserID, create_requester
from tests.unittest import HomeserverTestCase
@@ -53,8 +54,8 @@ class ExtremStatisticsTestCase(HomeserverTestCase):
items = list(
filter(
- lambda x: b"synapse_forward_extremities_" in x,
- generate_latest(REGISTRY, emit_help=False).split(b"\n"),
+ lambda x: b"synapse_forward_extremities_" in x and b"# HELP" not in x,
+ generate_latest(REGISTRY).split(b"\n"),
)
)
diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
index ba40124c8a..ee48920f84 100644
--- a/tests/storage/test_event_push_actions.py
+++ b/tests/storage/test_event_push_actions.py
@@ -12,18 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Optional, Tuple
+
from twisted.test.proto_helpers import MemoryReactor
+from synapse.api.constants import MAIN_TIMELINE, RelationTypes
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.storage.databases.main.event_push_actions import NotifCounts
+from synapse.types import JsonDict
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
-USER_ID = "@user:example.com"
-
class EventPushActionsStoreTestCase(HomeserverTestCase):
servlets = [
@@ -38,21 +40,13 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
assert persist_events_store is not None
self.persist_events_store = persist_events_store
- def test_get_unread_push_actions_for_user_in_range_for_http(self) -> None:
- self.get_success(
- self.store.get_unread_push_actions_for_user_in_range_for_http(
- USER_ID, 0, 1000, 20
- )
- )
+ def _create_users_and_room(self) -> Tuple[str, str, str, str, str]:
+ """
+ Creates two users and a shared room.
- def test_get_unread_push_actions_for_user_in_range_for_email(self) -> None:
- self.get_success(
- self.store.get_unread_push_actions_for_user_in_range_for_email(
- USER_ID, 0, 1000, 20
- )
- )
-
- def test_count_aggregation(self) -> None:
+ Returns:
+ Tuple of (user 1 ID, user 1 token, user 2 ID, user 2 token, room ID).
+ """
# Create a user to receive notifications and send receipts.
user_id = self.register_user("user1235", "pass")
token = self.login("user1235", "pass")
@@ -65,11 +59,104 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
room_id = self.helper.create_room_as(user_id, tok=token)
self.helper.join(room_id, other_id, tok=other_token)
+ return user_id, token, other_id, other_token, room_id
+
+ def test_get_unread_push_actions_for_user_in_range(self) -> None:
+ """Test getting unread push actions for HTTP and email pushers."""
+ user_id, token, _, other_token, room_id = self._create_users_and_room()
+
+ # Create two events, one of which is a highlight.
+ first_event_id = self.helper.send_event(
+ room_id,
+ type="m.room.message",
+ content={"msgtype": "m.text", "body": "msg"},
+ tok=other_token,
+ )["event_id"]
+ second_event_id = self.helper.send_event(
+ room_id,
+ type="m.room.message",
+ content={
+ "msgtype": "m.text",
+ "body": user_id,
+ "m.relates_to": {
+ "rel_type": RelationTypes.THREAD,
+ "event_id": first_event_id,
+ },
+ },
+ tok=other_token,
+ )["event_id"]
+
+ # Fetch unread actions for HTTP pushers.
+ http_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_http(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual(2, len(http_actions))
+
+ # Fetch unread actions for email pushers.
+ email_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_email(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual(2, len(email_actions))
+
+ # Send a receipt, which should clear the first action.
+ self.get_success(
+ self.store.insert_receipt(
+ room_id,
+ "m.read",
+ user_id=user_id,
+ event_ids=[first_event_id],
+ thread_id=None,
+ data={},
+ )
+ )
+ http_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_http(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual(1, len(http_actions))
+ email_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_email(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual(1, len(email_actions))
+
+ # Send a thread receipt to clear the thread action.
+ self.get_success(
+ self.store.insert_receipt(
+ room_id,
+ "m.read",
+ user_id=user_id,
+ event_ids=[second_event_id],
+ thread_id=first_event_id,
+ data={},
+ )
+ )
+ http_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_http(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual([], http_actions)
+ email_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_email(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual([], email_actions)
+
+ def test_count_aggregation(self) -> None:
+ # Create a user to receive notifications and send receipts.
+ user_id, token, _, other_token, room_id = self._create_users_and_room()
+
last_event_id: str
- def _assert_counts(
- noitf_count: int, unread_count: int, highlight_count: int
- ) -> None:
+ def _assert_counts(noitf_count: int, highlight_count: int) -> None:
counts = self.get_success(
self.store.db_pool.runInteraction(
"get-unread-counts",
@@ -79,13 +166,14 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
)
)
self.assertEqual(
- counts,
+ counts.main_timeline,
NotifCounts(
notify_count=noitf_count,
- unread_count=unread_count,
+ unread_count=0,
highlight_count=highlight_count,
),
)
+ self.assertEqual(counts.threads, {})
def _create_event(highlight: bool = False) -> str:
result = self.helper.send_event(
@@ -108,63 +196,518 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
"m.read",
user_id=user_id,
event_ids=[event_id],
+ thread_id=None,
data={},
)
)
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
_create_event()
- _assert_counts(1, 1, 0)
+ _assert_counts(1, 0)
_rotate()
- _assert_counts(1, 1, 0)
+ _assert_counts(1, 0)
event_id = _create_event()
- _assert_counts(2, 2, 0)
+ _assert_counts(2, 0)
_rotate()
- _assert_counts(2, 2, 0)
+ _assert_counts(2, 0)
_create_event()
_mark_read(event_id)
- _assert_counts(1, 1, 0)
+ _assert_counts(1, 0)
_mark_read(last_event_id)
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
_create_event()
+ _assert_counts(1, 0)
_rotate()
- _assert_counts(1, 1, 0)
+ _assert_counts(1, 0)
# Delete old event push actions, this should not affect the (summarised) count.
+ #
+ # All event push actions are kept for 24 hours, so need to move forward
+ # in time.
+ self.pump(60 * 60 * 24)
self.get_success(self.store._remove_old_push_actions_that_have_rotated())
- _assert_counts(1, 1, 0)
+ # Double check that the event push actions have been cleared (i.e. that
+ # any results *must* come from the summary).
+ result = self.get_success(
+ self.store.db_pool.simple_select_list(
+ table="event_push_actions",
+ keyvalues={"1": 1},
+ retcols=("event_id",),
+ desc="",
+ )
+ )
+ self.assertEqual(result, [])
+ _assert_counts(1, 0)
_mark_read(last_event_id)
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
event_id = _create_event(True)
- _assert_counts(1, 1, 1)
+ _assert_counts(1, 1)
_rotate()
- _assert_counts(1, 1, 1)
+ _assert_counts(1, 1)
# Check that adding another notification and rotating after highlight
# works.
_create_event()
_rotate()
- _assert_counts(2, 2, 1)
+ _assert_counts(2, 1)
# Check that sending read receipts at different points results in the
# right counts.
_mark_read(event_id)
- _assert_counts(1, 1, 0)
+ _assert_counts(1, 0)
_mark_read(last_event_id)
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
_create_event(True)
- _assert_counts(1, 1, 1)
+ _assert_counts(1, 1)
_mark_read(last_event_id)
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
_rotate()
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
+
+ def test_count_aggregation_threads(self) -> None:
+ """
+ This is essentially the same test as test_count_aggregation, but adds
+ events to the main timeline and to a thread.
+ """
+
+ user_id, token, _, other_token, room_id = self._create_users_and_room()
+ thread_id: str
+
+ last_event_id: str
+
+ def _assert_counts(
+ noitf_count: int,
+ highlight_count: int,
+ thread_notif_count: int,
+ thread_highlight_count: int,
+ ) -> None:
+ counts = self.get_success(
+ self.store.db_pool.runInteraction(
+ "get-unread-counts",
+ self.store._get_unread_counts_by_receipt_txn,
+ room_id,
+ user_id,
+ )
+ )
+ self.assertEqual(
+ counts.main_timeline,
+ NotifCounts(
+ notify_count=noitf_count,
+ unread_count=0,
+ highlight_count=highlight_count,
+ ),
+ )
+ if thread_notif_count or thread_highlight_count:
+ self.assertEqual(
+ counts.threads,
+ {
+ thread_id: NotifCounts(
+ notify_count=thread_notif_count,
+ unread_count=0,
+ highlight_count=thread_highlight_count,
+ ),
+ },
+ )
+ else:
+ self.assertEqual(counts.threads, {})
+
+ def _create_event(
+ highlight: bool = False, thread_id: Optional[str] = None
+ ) -> str:
+ content: JsonDict = {
+ "msgtype": "m.text",
+ "body": user_id if highlight else "msg",
+ }
+ if thread_id:
+ content["m.relates_to"] = {
+ "rel_type": "m.thread",
+ "event_id": thread_id,
+ }
+
+ result = self.helper.send_event(
+ room_id,
+ type="m.room.message",
+ content=content,
+ tok=other_token,
+ )
+ nonlocal last_event_id
+ last_event_id = result["event_id"]
+ return last_event_id
+
+ def _rotate() -> None:
+ self.get_success(self.store._rotate_notifs())
+
+ def _mark_read(event_id: str, thread_id: str = MAIN_TIMELINE) -> None:
+ self.get_success(
+ self.store.insert_receipt(
+ room_id,
+ "m.read",
+ user_id=user_id,
+ event_ids=[event_id],
+ thread_id=thread_id,
+ data={},
+ )
+ )
+
+ _assert_counts(0, 0, 0, 0)
+ thread_id = _create_event()
+ _assert_counts(1, 0, 0, 0)
+ _rotate()
+ _assert_counts(1, 0, 0, 0)
+
+ _create_event(thread_id=thread_id)
+ _assert_counts(1, 0, 1, 0)
+ _rotate()
+ _assert_counts(1, 0, 1, 0)
+
+ _create_event()
+ _assert_counts(2, 0, 1, 0)
+ _rotate()
+ _assert_counts(2, 0, 1, 0)
+
+ event_id = _create_event(thread_id=thread_id)
+ _assert_counts(2, 0, 2, 0)
+ _rotate()
+ _assert_counts(2, 0, 2, 0)
+
+ _create_event()
+ _create_event(thread_id=thread_id)
+ _mark_read(event_id)
+ _assert_counts(1, 0, 3, 0)
+ _mark_read(event_id, thread_id)
+ _assert_counts(1, 0, 1, 0)
+
+ _mark_read(last_event_id)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event()
+ _create_event(thread_id=thread_id)
+ _assert_counts(1, 0, 1, 0)
+ _rotate()
+ _assert_counts(1, 0, 1, 0)
+
+ # Delete old event push actions, this should not affect the (summarised) count.
+ self.get_success(self.store._remove_old_push_actions_that_have_rotated())
+ _assert_counts(1, 0, 1, 0)
+
+ _mark_read(last_event_id)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event(True)
+ _assert_counts(1, 1, 0, 0)
+ _rotate()
+ _assert_counts(1, 1, 0, 0)
+
+ event_id = _create_event(True, thread_id)
+ _assert_counts(1, 1, 1, 1)
+ _rotate()
+ _assert_counts(1, 1, 1, 1)
+
+ # Check that adding another notification and rotating after highlight
+ # works.
+ _create_event()
+ _rotate()
+ _assert_counts(2, 1, 1, 1)
+
+ _create_event(thread_id=thread_id)
+ _rotate()
+ _assert_counts(2, 1, 2, 1)
+
+ # Check that sending read receipts at different points results in the
+ # right counts.
+ _mark_read(event_id)
+ _assert_counts(1, 0, 2, 1)
+ _mark_read(event_id, thread_id)
+ _assert_counts(1, 0, 1, 0)
+ _mark_read(last_event_id)
+ _assert_counts(0, 0, 1, 0)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event(True)
+ _create_event(True, thread_id)
+ _assert_counts(1, 1, 1, 1)
+ _mark_read(last_event_id)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+ _rotate()
+ _assert_counts(0, 0, 0, 0)
+
+ def test_count_aggregation_mixed(self) -> None:
+ """
+ This is essentially the same test as test_count_aggregation_threads, but
+ sends both unthreaded and threaded receipts.
+ """
+
+ user_id, token, _, other_token, room_id = self._create_users_and_room()
+ thread_id: str
+
+ last_event_id: str
+
+ def _assert_counts(
+ noitf_count: int,
+ highlight_count: int,
+ thread_notif_count: int,
+ thread_highlight_count: int,
+ ) -> None:
+ counts = self.get_success(
+ self.store.db_pool.runInteraction(
+ "get-unread-counts",
+ self.store._get_unread_counts_by_receipt_txn,
+ room_id,
+ user_id,
+ )
+ )
+ self.assertEqual(
+ counts.main_timeline,
+ NotifCounts(
+ notify_count=noitf_count,
+ unread_count=0,
+ highlight_count=highlight_count,
+ ),
+ )
+ if thread_notif_count or thread_highlight_count:
+ self.assertEqual(
+ counts.threads,
+ {
+ thread_id: NotifCounts(
+ notify_count=thread_notif_count,
+ unread_count=0,
+ highlight_count=thread_highlight_count,
+ ),
+ },
+ )
+ else:
+ self.assertEqual(counts.threads, {})
+
+ def _create_event(
+ highlight: bool = False, thread_id: Optional[str] = None
+ ) -> str:
+ content: JsonDict = {
+ "msgtype": "m.text",
+ "body": user_id if highlight else "msg",
+ }
+ if thread_id:
+ content["m.relates_to"] = {
+ "rel_type": "m.thread",
+ "event_id": thread_id,
+ }
+
+ result = self.helper.send_event(
+ room_id,
+ type="m.room.message",
+ content=content,
+ tok=other_token,
+ )
+ nonlocal last_event_id
+ last_event_id = result["event_id"]
+ return last_event_id
+
+ def _rotate() -> None:
+ self.get_success(self.store._rotate_notifs())
+
+ def _mark_read(event_id: str, thread_id: Optional[str] = None) -> None:
+ self.get_success(
+ self.store.insert_receipt(
+ room_id,
+ "m.read",
+ user_id=user_id,
+ event_ids=[event_id],
+ thread_id=thread_id,
+ data={},
+ )
+ )
+
+ _assert_counts(0, 0, 0, 0)
+ thread_id = _create_event()
+ _assert_counts(1, 0, 0, 0)
+ _rotate()
+ _assert_counts(1, 0, 0, 0)
+
+ _create_event(thread_id=thread_id)
+ _assert_counts(1, 0, 1, 0)
+ _rotate()
+ _assert_counts(1, 0, 1, 0)
+
+ _create_event()
+ _assert_counts(2, 0, 1, 0)
+ _rotate()
+ _assert_counts(2, 0, 1, 0)
+
+ event_id = _create_event(thread_id=thread_id)
+ _assert_counts(2, 0, 2, 0)
+ _rotate()
+ _assert_counts(2, 0, 2, 0)
+
+ _create_event()
+ _create_event(thread_id=thread_id)
+ _mark_read(event_id)
+ _assert_counts(1, 0, 1, 0)
+
+ _mark_read(last_event_id, MAIN_TIMELINE)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event()
+ _create_event(thread_id=thread_id)
+ _assert_counts(1, 0, 1, 0)
+ _rotate()
+ _assert_counts(1, 0, 1, 0)
+
+ # Delete old event push actions, this should not affect the (summarised) count.
+ self.get_success(self.store._remove_old_push_actions_that_have_rotated())
+ _assert_counts(1, 0, 1, 0)
+
+ _mark_read(last_event_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event(True)
+ _assert_counts(1, 1, 0, 0)
+ _rotate()
+ _assert_counts(1, 1, 0, 0)
+
+ event_id = _create_event(True, thread_id)
+ _assert_counts(1, 1, 1, 1)
+ _rotate()
+ _assert_counts(1, 1, 1, 1)
+
+ # Check that adding another notification and rotating after highlight
+ # works.
+ _create_event()
+ _rotate()
+ _assert_counts(2, 1, 1, 1)
+
+ _create_event(thread_id=thread_id)
+ _rotate()
+ _assert_counts(2, 1, 2, 1)
+
+ # Check that sending read receipts at different points results in the
+ # right counts.
+ _mark_read(event_id)
+ _assert_counts(1, 0, 1, 0)
+ _mark_read(event_id, MAIN_TIMELINE)
+ _assert_counts(1, 0, 1, 0)
+ _mark_read(last_event_id, MAIN_TIMELINE)
+ _assert_counts(0, 0, 1, 0)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event(True)
+ _create_event(True, thread_id)
+ _assert_counts(1, 1, 1, 1)
+ _mark_read(last_event_id)
+ _assert_counts(0, 0, 0, 0)
+ _rotate()
+ _assert_counts(0, 0, 0, 0)
+
+ def test_recursive_thread(self) -> None:
+ """
+ Events related to events in a thread should still be considered part of
+ that thread.
+ """
+
+ # Create a user to receive notifications and send receipts.
+ user_id = self.register_user("user1235", "pass")
+ token = self.login("user1235", "pass")
+
+ # And another users to send events.
+ other_id = self.register_user("other", "pass")
+ other_token = self.login("other", "pass")
+
+ # Create a room and put both users in it.
+ room_id = self.helper.create_room_as(user_id, tok=token)
+ self.helper.join(room_id, other_id, tok=other_token)
+
+ # Update the user's push rules to care about reaction events.
+ self.get_success(
+ self.store.add_push_rule(
+ user_id,
+ "related_events",
+ priority_class=5,
+ conditions=[
+ {"kind": "event_match", "key": "type", "pattern": "m.reaction"}
+ ],
+ actions=["notify"],
+ )
+ )
+
+ def _create_event(type: str, content: JsonDict) -> str:
+ result = self.helper.send_event(
+ room_id, type=type, content=content, tok=other_token
+ )
+ return result["event_id"]
+
+ def _assert_counts(noitf_count: int, thread_notif_count: int) -> None:
+ counts = self.get_success(
+ self.store.db_pool.runInteraction(
+ "get-unread-counts",
+ self.store._get_unread_counts_by_receipt_txn,
+ room_id,
+ user_id,
+ )
+ )
+ self.assertEqual(
+ counts.main_timeline,
+ NotifCounts(
+ notify_count=noitf_count, unread_count=0, highlight_count=0
+ ),
+ )
+ if thread_notif_count:
+ self.assertEqual(
+ counts.threads,
+ {
+ thread_id: NotifCounts(
+ notify_count=thread_notif_count,
+ unread_count=0,
+ highlight_count=0,
+ ),
+ },
+ )
+ else:
+ self.assertEqual(counts.threads, {})
+
+ # Create a root event.
+ thread_id = _create_event(
+ "m.room.message", {"msgtype": "m.text", "body": "msg"}
+ )
+ _assert_counts(1, 0)
+
+ # Reply, creating a thread.
+ reply_id = _create_event(
+ "m.room.message",
+ {
+ "msgtype": "m.text",
+ "body": "msg",
+ "m.relates_to": {
+ "rel_type": "m.thread",
+ "event_id": thread_id,
+ },
+ },
+ )
+ _assert_counts(1, 1)
+
+ # Create an event related to a thread event, this should still appear in
+ # the thread.
+ _create_event(
+ type="m.reaction",
+ content={
+ "m.relates_to": {
+ "rel_type": "m.annotation",
+ "event_id": reply_id,
+ "key": "A",
+ }
+ },
+ )
+ _assert_counts(1, 2)
def test_find_first_stream_ordering_after_ts(self) -> None:
def add_event(so: int, ts: int) -> None:
diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py
index 2d8d1f860f..d6a2b8d274 100644
--- a/tests/storage/test_id_generators.py
+++ b/tests/storage/test_id_generators.py
@@ -16,15 +16,157 @@ from typing import List, Optional
from twisted.test.proto_helpers import MemoryReactor
from synapse.server import HomeServer
-from synapse.storage.database import DatabasePool, LoggingTransaction
+from synapse.storage.database import (
+ DatabasePool,
+ LoggingDatabaseConnection,
+ LoggingTransaction,
+)
from synapse.storage.engines import IncorrectDatabaseSetup
-from synapse.storage.util.id_generators import MultiWriterIdGenerator
+from synapse.storage.types import Cursor
+from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
from tests.utils import USE_POSTGRES_FOR_TESTS
+class StreamIdGeneratorTestCase(HomeserverTestCase):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.db_pool: DatabasePool = self.store.db_pool
+
+ self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
+
+ def _setup_db(self, txn: LoggingTransaction) -> None:
+ txn.execute(
+ """
+ CREATE TABLE foobar (
+ stream_id BIGINT NOT NULL,
+ data TEXT
+ );
+ """
+ )
+ txn.execute("INSERT INTO foobar VALUES (123, 'hello world');")
+
+ def _create_id_generator(self) -> StreamIdGenerator:
+ def _create(conn: LoggingDatabaseConnection) -> StreamIdGenerator:
+ return StreamIdGenerator(
+ db_conn=conn,
+ table="foobar",
+ column="stream_id",
+ )
+
+ return self.get_success_or_raise(self.db_pool.runWithConnection(_create))
+
+ def test_initial_value(self) -> None:
+ """Check that we read the current token from the DB."""
+ id_gen = self._create_id_generator()
+ self.assertEqual(id_gen.get_current_token(), 123)
+
+ def test_single_gen_next(self) -> None:
+ """Check that we correctly increment the current token from the DB."""
+ id_gen = self._create_id_generator()
+
+ async def test_gen_next() -> None:
+ async with id_gen.get_next() as next_id:
+ # We haven't persisted `next_id` yet; current token is still 123
+ self.assertEqual(id_gen.get_current_token(), 123)
+ # But we did learn what the next value is
+ self.assertEqual(next_id, 124)
+
+ # Once the context manager closes we assume that the `next_id` has been
+ # written to the DB.
+ self.assertEqual(id_gen.get_current_token(), 124)
+
+ self.get_success(test_gen_next())
+
+ def test_multiple_gen_nexts(self) -> None:
+ """Check that we handle overlapping calls to gen_next sensibly."""
+ id_gen = self._create_id_generator()
+
+ async def test_gen_next() -> None:
+ ctx1 = id_gen.get_next()
+ ctx2 = id_gen.get_next()
+ ctx3 = id_gen.get_next()
+
+ # Request three new stream IDs.
+ self.assertEqual(await ctx1.__aenter__(), 124)
+ self.assertEqual(await ctx2.__aenter__(), 125)
+ self.assertEqual(await ctx3.__aenter__(), 126)
+
+ # None are persisted: current token unchanged.
+ self.assertEqual(id_gen.get_current_token(), 123)
+
+ # Persist each in turn.
+ await ctx1.__aexit__(None, None, None)
+ self.assertEqual(id_gen.get_current_token(), 124)
+ await ctx2.__aexit__(None, None, None)
+ self.assertEqual(id_gen.get_current_token(), 125)
+ await ctx3.__aexit__(None, None, None)
+ self.assertEqual(id_gen.get_current_token(), 126)
+
+ self.get_success(test_gen_next())
+
+ def test_multiple_gen_nexts_closed_in_different_order(self) -> None:
+ """Check that we handle overlapping calls to gen_next, even when their IDs
+ created and persisted in different orders."""
+ id_gen = self._create_id_generator()
+
+ async def test_gen_next() -> None:
+ ctx1 = id_gen.get_next()
+ ctx2 = id_gen.get_next()
+ ctx3 = id_gen.get_next()
+
+ # Request three new stream IDs.
+ self.assertEqual(await ctx1.__aenter__(), 124)
+ self.assertEqual(await ctx2.__aenter__(), 125)
+ self.assertEqual(await ctx3.__aenter__(), 126)
+
+ # None are persisted: current token unchanged.
+ self.assertEqual(id_gen.get_current_token(), 123)
+
+ # Persist them in a different order, starting with 126 from ctx3.
+ await ctx3.__aexit__(None, None, None)
+ # We haven't persisted 124 from ctx1 yet---current token is still 123.
+ self.assertEqual(id_gen.get_current_token(), 123)
+
+ # Now persist 124 from ctx1.
+ await ctx1.__aexit__(None, None, None)
+ # Current token is then 124, waiting for 125 to be persisted.
+ self.assertEqual(id_gen.get_current_token(), 124)
+
+ # Finally persist 125 from ctx2.
+ await ctx2.__aexit__(None, None, None)
+ # Current token is then 126 (skipping over 125).
+ self.assertEqual(id_gen.get_current_token(), 126)
+
+ self.get_success(test_gen_next())
+
+ def test_gen_next_while_still_waiting_for_persistence(self) -> None:
+ """Check that we handle overlapping calls to gen_next."""
+ id_gen = self._create_id_generator()
+
+ async def test_gen_next() -> None:
+ ctx1 = id_gen.get_next()
+ ctx2 = id_gen.get_next()
+ ctx3 = id_gen.get_next()
+
+ # Request two new stream IDs.
+ self.assertEqual(await ctx1.__aenter__(), 124)
+ self.assertEqual(await ctx2.__aenter__(), 125)
+
+ # Persist ctx2 first.
+ await ctx2.__aexit__(None, None, None)
+ # Still waiting on ctx1's ID to be persisted.
+ self.assertEqual(id_gen.get_current_token(), 123)
+
+ # Now request a third stream ID. It should be 126 (the smallest ID that
+ # we've not yet handed out.)
+ self.assertEqual(await ctx3.__aenter__(), 126)
+
+ self.get_success(test_gen_next())
+
+
class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
if not USE_POSTGRES_FOR_TESTS:
skip = "Requires Postgres"
@@ -48,9 +190,9 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
)
def _create_id_generator(
- self, instance_name="master", writers: Optional[List[str]] = None
+ self, instance_name: str = "master", writers: Optional[List[str]] = None
) -> MultiWriterIdGenerator:
- def _create(conn):
+ def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
return MultiWriterIdGenerator(
conn,
self.db_pool,
@@ -446,7 +588,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
self._insert_row_with_id("master", 3)
# Now we add a row *without* updating the stream ID
- def _insert(txn):
+ def _insert(txn: Cursor) -> None:
txn.execute("INSERT INTO foobar VALUES (26, 'master')")
self.get_success(self.db_pool.runInteraction("_insert", _insert))
@@ -481,9 +623,9 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
)
def _create_id_generator(
- self, instance_name="master", writers: Optional[List[str]] = None
+ self, instance_name: str = "master", writers: Optional[List[str]] = None
) -> MultiWriterIdGenerator:
- def _create(conn):
+ def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
return MultiWriterIdGenerator(
conn,
self.db_pool,
@@ -617,9 +759,9 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
)
def _create_id_generator(
- self, instance_name="master", writers: Optional[List[str]] = None
+ self, instance_name: str = "master", writers: Optional[List[str]] = None
) -> MultiWriterIdGenerator:
- def _create(conn):
+ def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
return MultiWriterIdGenerator(
conn,
self.db_pool,
@@ -641,7 +783,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
instance_name: str,
number: int,
update_stream_table: bool = True,
- ):
+ ) -> None:
"""Insert N rows as the given instance, inserting with stream IDs pulled
from the postgres sequence.
"""
diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index e8b4a5644b..c55c4db970 100644
--- a/tests/storage/test_monthly_active_users.py
+++ b/tests/storage/test_monthly_active_users.py
@@ -96,8 +96,12 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
# Test each of the registered users is marked as active
timestamp = self.get_success(self.store.user_last_seen_monthly_active(user1))
+ # Mypy notes that one shouldn't compare Optional[int] to 0 with assertGreater.
+ # Check that timestamp really is an int.
+ assert timestamp is not None
self.assertGreater(timestamp, 0)
timestamp = self.get_success(self.store.user_last_seen_monthly_active(user2))
+ assert timestamp is not None
self.assertGreater(timestamp, 0)
# Test that users with reserved 3pids are not removed from the MAU table
@@ -166,10 +170,11 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.get_success(self.store.upsert_monthly_active_user(user_id2))
result = self.get_success(self.store.user_last_seen_monthly_active(user_id1))
+ assert result is not None
self.assertGreater(result, 0)
result = self.get_success(self.store.user_last_seen_monthly_active(user_id3))
- self.assertNotEqual(result, 0)
+ self.assertIsNone(result)
@override_config({"max_mau_value": 5})
def test_reap_monthly_active_users(self):
diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py
index b1a8f8bba7..81253d0361 100644
--- a/tests/storage/test_receipts.py
+++ b/tests/storage/test_receipts.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Collection, Optional
+
from synapse.api.constants import ReceiptTypes
from synapse.types import UserID, create_requester
@@ -23,7 +25,7 @@ OUR_USER_ID = "@our:test"
class ReceiptTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, homeserver):
+ def prepare(self, reactor, clock, homeserver) -> None:
super().prepare(reactor, clock, homeserver)
self.store = homeserver.get_datastores().main
@@ -83,10 +85,41 @@ class ReceiptTestCase(HomeserverTestCase):
)
)
- def test_return_empty_with_no_data(self):
+ def get_last_unthreaded_receipt(
+ self, receipt_types: Collection[str], room_id: Optional[str] = None
+ ) -> Optional[str]:
+ """
+ Fetch the event ID for the latest unthreaded receipt in the test room for the test user.
+
+ Args:
+ receipt_types: The receipt types to fetch.
+
+ Returns:
+ The latest receipt, if one exists.
+ """
+ result = self.get_success(
+ self.store.db_pool.runInteraction(
+ "get_last_receipt_event_id_for_user",
+ self.store.get_last_unthreaded_receipt_for_user_txn,
+ OUR_USER_ID,
+ room_id or self.room_id1,
+ receipt_types,
+ )
+ )
+ if not result:
+ return None
+
+ event_id, _ = result
+ return event_id
+
+ def test_return_empty_with_no_data(self) -> None:
res = self.get_success(
self.store.get_receipts_for_user(
- OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE]
+ OUR_USER_ID,
+ [
+ ReceiptTypes.READ,
+ ReceiptTypes.READ_PRIVATE,
+ ],
)
)
self.assertEqual(res, {})
@@ -94,21 +127,21 @@ class ReceiptTestCase(HomeserverTestCase):
res = self.get_success(
self.store.get_receipts_for_user_with_orderings(
OUR_USER_ID,
- [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE],
+ [
+ ReceiptTypes.READ,
+ ReceiptTypes.READ_PRIVATE,
+ ],
)
)
self.assertEqual(res, {})
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID,
- self.room_id1,
- [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE],
- )
+ res = self.get_last_unthreaded_receipt(
+ [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE]
)
+
self.assertEqual(res, None)
- def test_get_receipts_for_user(self):
+ def test_get_receipts_for_user(self) -> None:
# Send some events into the first room
event1_1_id = self.create_and_send_event(
self.room_id1, UserID.from_string(OTHER_USER_ID)
@@ -120,13 +153,18 @@ class ReceiptTestCase(HomeserverTestCase):
# Send public read receipt for the first event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {}
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], None, {}
)
)
# Send private read receipt for the second event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {}
+ self.room_id1,
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event1_2_id],
+ None,
+ {},
)
)
@@ -153,7 +191,7 @@ class ReceiptTestCase(HomeserverTestCase):
# Test receipt updating
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {}
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], None, {}
)
)
res = self.get_success(
@@ -169,7 +207,12 @@ class ReceiptTestCase(HomeserverTestCase):
# Test new room is reflected in what the method returns
self.get_success(
self.store.insert_receipt(
- self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {}
+ self.room_id2,
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event2_1_id],
+ None,
+ {},
)
)
res = self.get_success(
@@ -179,7 +222,7 @@ class ReceiptTestCase(HomeserverTestCase):
)
self.assertEqual(res, {self.room_id1: event1_2_id, self.room_id2: event2_1_id})
- def test_get_last_receipt_event_id_for_user(self):
+ def test_get_last_receipt_event_id_for_user(self) -> None:
# Send some events into the first room
event1_1_id = self.create_and_send_event(
self.room_id1, UserID.from_string(OTHER_USER_ID)
@@ -191,53 +234,42 @@ class ReceiptTestCase(HomeserverTestCase):
# Send public read receipt for the first event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {}
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], None, {}
)
)
# Send private read receipt for the second event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {}
+ self.room_id1,
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event1_2_id],
+ None,
+ {},
)
)
# Test we get the latest event when we want both private and public receipts
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID,
- self.room_id1,
- [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE],
- )
+ res = self.get_last_unthreaded_receipt(
+ [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE]
)
self.assertEqual(res, event1_2_id)
# Test we get the older event when we want only public receipt
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID, self.room_id1, [ReceiptTypes.READ]
- )
- )
+ res = self.get_last_unthreaded_receipt([ReceiptTypes.READ])
self.assertEqual(res, event1_1_id)
# Test we get the latest event when we want only the private receipt
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID, self.room_id1, [ReceiptTypes.READ_PRIVATE]
- )
- )
+ res = self.get_last_unthreaded_receipt([ReceiptTypes.READ_PRIVATE])
self.assertEqual(res, event1_2_id)
# Test receipt updating
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {}
- )
- )
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID, self.room_id1, [ReceiptTypes.READ]
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], None, {}
)
)
+ res = self.get_last_unthreaded_receipt([ReceiptTypes.READ])
self.assertEqual(res, event1_2_id)
# Send some events into the second room
@@ -248,14 +280,15 @@ class ReceiptTestCase(HomeserverTestCase):
# Test new room is reflected in what the method returns
self.get_success(
self.store.insert_receipt(
- self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {}
- )
- )
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID,
self.room_id2,
- [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE],
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event2_1_id],
+ None,
+ {},
)
)
+ res = self.get_last_unthreaded_receipt(
+ [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], room_id=self.room_id2
+ )
self.assertEqual(res, event2_1_id)
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index a49ac1525e..05ea802008 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -11,15 +11,19 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import UserTypes
from synapse.api.errors import ThreepidValidationError
+from synapse.server import HomeServer
+from synapse.types import JsonDict, UserID
+from synapse.util import Clock
-from tests.unittest import HomeserverTestCase
+from tests.unittest import HomeserverTestCase, override_config
class RegistrationStoreTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.user_id = "@my-user:test"
@@ -27,7 +31,7 @@ class RegistrationStoreTestCase(HomeserverTestCase):
self.pwhash = "{xx1}123456789"
self.device_id = "akgjhdjklgshg"
- def test_register(self):
+ def test_register(self) -> None:
self.get_success(self.store.register_user(self.user_id, self.pwhash))
self.assertEqual(
@@ -38,17 +42,32 @@ class RegistrationStoreTestCase(HomeserverTestCase):
"admin": 0,
"is_guest": 0,
"consent_version": None,
+ "consent_ts": None,
"consent_server_notice_sent": None,
"appservice_id": None,
"creation_ts": 0,
"user_type": None,
"deactivated": 0,
"shadow_banned": 0,
+ "approved": 1,
},
(self.get_success(self.store.get_user_by_id(self.user_id))),
)
- def test_add_tokens(self):
+ def test_consent(self) -> None:
+ self.get_success(self.store.register_user(self.user_id, self.pwhash))
+ before_consent = self.clock.time_msec()
+ self.reactor.advance(5)
+ self.get_success(self.store.user_set_consent_version(self.user_id, "1"))
+ self.reactor.advance(5)
+
+ user = self.get_success(self.store.get_user_by_id(self.user_id))
+ assert user
+ self.assertEqual(user["consent_version"], "1")
+ self.assertGreater(user["consent_ts"], before_consent)
+ self.assertLess(user["consent_ts"], self.clock.time_msec())
+
+ def test_add_tokens(self) -> None:
self.get_success(self.store.register_user(self.user_id, self.pwhash))
self.get_success(
self.store.add_access_token_to_user(
@@ -58,11 +77,12 @@ class RegistrationStoreTestCase(HomeserverTestCase):
result = self.get_success(self.store.get_user_by_access_token(self.tokens[1]))
+ assert result
self.assertEqual(result.user_id, self.user_id)
self.assertEqual(result.device_id, self.device_id)
self.assertIsNotNone(result.token_id)
- def test_user_delete_access_tokens(self):
+ def test_user_delete_access_tokens(self) -> None:
# add some tokens
self.get_success(self.store.register_user(self.user_id, self.pwhash))
self.get_success(
@@ -87,6 +107,7 @@ class RegistrationStoreTestCase(HomeserverTestCase):
# check the one not associated with the device was not deleted
user = self.get_success(self.store.get_user_by_access_token(self.tokens[0]))
+ assert user
self.assertEqual(self.user_id, user.user_id)
# now delete the rest
@@ -95,11 +116,11 @@ class RegistrationStoreTestCase(HomeserverTestCase):
user = self.get_success(self.store.get_user_by_access_token(self.tokens[0]))
self.assertIsNone(user, "access token was not deleted without device_id")
- def test_is_support_user(self):
+ def test_is_support_user(self) -> None:
TEST_USER = "@test:test"
SUPPORT_USER = "@support:test"
- res = self.get_success(self.store.is_support_user(None))
+ res = self.get_success(self.store.is_support_user(None)) # type: ignore[arg-type]
self.assertFalse(res)
self.get_success(
self.store.register_user(user_id=TEST_USER, password_hash=None)
@@ -115,7 +136,7 @@ class RegistrationStoreTestCase(HomeserverTestCase):
res = self.get_success(self.store.is_support_user(SUPPORT_USER))
self.assertTrue(res)
- def test_3pid_inhibit_invalid_validation_session_error(self):
+ def test_3pid_inhibit_invalid_validation_session_error(self) -> None:
"""Tests that enabling the configuration option to inhibit 3PID errors on
/requestToken also inhibits validation errors caused by an unknown session ID.
"""
@@ -147,3 +168,101 @@ class RegistrationStoreTestCase(HomeserverTestCase):
ThreepidValidationError,
)
self.assertEqual(e.value.msg, "Validation token not found or has expired", e)
+
+
+class ApprovalRequiredRegistrationTestCase(HomeserverTestCase):
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+
+ # If there's already some config for this feature in the default config, it
+ # means we're overriding it with @override_config. In this case we don't want
+ # to do anything more with it.
+ msc3866_config = config.get("experimental_features", {}).get("msc3866")
+ if msc3866_config is not None:
+ return config
+
+ # Require approval for all new accounts.
+ config["experimental_features"] = {
+ "msc3866": {
+ "enabled": True,
+ "require_approval_for_new_accounts": True,
+ }
+ }
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.user_id = "@my-user:test"
+ self.pwhash = "{xx1}123456789"
+
+ @override_config(
+ {
+ "experimental_features": {
+ "msc3866": {
+ "enabled": True,
+ "require_approval_for_new_accounts": False,
+ }
+ }
+ }
+ )
+ def test_approval_not_required(self) -> None:
+ """Tests that if we don't require approval for new accounts, newly created
+ accounts are automatically marked as approved.
+ """
+ self.get_success(self.store.register_user(self.user_id, self.pwhash))
+
+ user = self.get_success(self.store.get_user_by_id(self.user_id))
+ assert user is not None
+ self.assertTrue(user["approved"])
+
+ approved = self.get_success(self.store.is_user_approved(self.user_id))
+ self.assertTrue(approved)
+
+ def test_approval_required(self) -> None:
+ """Tests that if we require approval for new accounts, newly created accounts
+ are not automatically marked as approved.
+ """
+ self.get_success(self.store.register_user(self.user_id, self.pwhash))
+
+ user = self.get_success(self.store.get_user_by_id(self.user_id))
+ assert user is not None
+ self.assertFalse(user["approved"])
+
+ approved = self.get_success(self.store.is_user_approved(self.user_id))
+ self.assertFalse(approved)
+
+ def test_override(self) -> None:
+ """Tests that if we require approval for new accounts, but we explicitly say the
+ new user should be considered approved, they're marked as approved.
+ """
+ self.get_success(
+ self.store.register_user(
+ self.user_id,
+ self.pwhash,
+ approved=True,
+ )
+ )
+
+ user = self.get_success(self.store.get_user_by_id(self.user_id))
+ self.assertIsNotNone(user)
+ assert user is not None
+ self.assertEqual(user["approved"], 1)
+
+ approved = self.get_success(self.store.is_user_approved(self.user_id))
+ self.assertTrue(approved)
+
+ def test_approve_user(self) -> None:
+ """Tests that approving the user updates their approval status."""
+ self.get_success(self.store.register_user(self.user_id, self.pwhash))
+
+ approved = self.get_success(self.store.is_user_approved(self.user_id))
+ self.assertFalse(approved)
+
+ self.get_success(
+ self.store.update_user_approval_status(
+ UserID.from_string(self.user_id), True
+ )
+ )
+
+ approved = self.get_success(self.store.is_user_approved(self.user_id))
+ self.assertTrue(approved)
diff --git a/tests/storage/test_relations.py b/tests/storage/test_relations.py
new file mode 100644
index 0000000000..cd1d00208b
--- /dev/null
+++ b/tests/storage/test_relations.py
@@ -0,0 +1,111 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.constants import MAIN_TIMELINE
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+
+
+class RelationsStoreTestCase(unittest.HomeserverTestCase):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ """
+ Creates a DAG:
+
+ A <---[m.thread]-- B <--[m.annotation]-- C
+ ^
+ |--[m.reference]-- D <--[m.annotation]-- E
+
+ F <--[m.annotation]-- G
+
+ """
+ self._main_store = self.hs.get_datastores().main
+
+ self._create_relation("A", "B", "m.thread")
+ self._create_relation("B", "C", "m.annotation")
+ self._create_relation("A", "D", "m.reference")
+ self._create_relation("D", "E", "m.annotation")
+ self._create_relation("F", "G", "m.annotation")
+
+ def _create_relation(self, parent_id: str, event_id: str, rel_type: str) -> None:
+ self.get_success(
+ self._main_store.db_pool.simple_insert(
+ table="event_relations",
+ values={
+ "event_id": event_id,
+ "relates_to_id": parent_id,
+ "relation_type": rel_type,
+ },
+ )
+ )
+
+ def test_get_thread_id(self) -> None:
+ """
+ Ensure that get_thread_id only searches up the tree for threads.
+ """
+ # The thread itself and children of it return the thread.
+ thread_id = self.get_success(self._main_store.get_thread_id("B"))
+ self.assertEqual("A", thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id("C"))
+ self.assertEqual("A", thread_id)
+
+ # But the root and events related to the root do not.
+ thread_id = self.get_success(self._main_store.get_thread_id("A"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id("D"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id("E"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ # Events which are not related to a thread at all should return the
+ # main timeline.
+ thread_id = self.get_success(self._main_store.get_thread_id("F"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id("G"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ def test_get_thread_id_for_receipts(self) -> None:
+ """
+ Ensure that get_thread_id_for_receipts searches up and down the tree for a thread.
+ """
+ # All of the events are considered related to this thread.
+ thread_id = self.get_success(self._main_store.get_thread_id_for_receipts("A"))
+ self.assertEqual("A", thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id_for_receipts("B"))
+ self.assertEqual("A", thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id_for_receipts("C"))
+ self.assertEqual("A", thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id_for_receipts("D"))
+ self.assertEqual("A", thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id_for_receipts("E"))
+ self.assertEqual("A", thread_id)
+
+ # Events which are not related to a thread at all should return the
+ # main timeline.
+ thread_id = self.get_success(self._main_store.get_thread_id("F"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id("G"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py
index e747c6b50e..ef850daa73 100644
--- a/tests/storage/test_room_search.py
+++ b/tests/storage/test_room_search.py
@@ -12,11 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import List, Tuple
+from unittest.case import SkipTest
+
+from twisted.test.proto_helpers import MemoryReactor
+
import synapse.rest.admin
from synapse.api.constants import EventTypes
from synapse.api.errors import StoreError
from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.storage.databases.main import DataStore
+from synapse.storage.databases.main.search import Phrase, SearchToken, _tokenize_query
from synapse.storage.engines import PostgresEngine
+from synapse.storage.engines.sqlite import Sqlite3Engine
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase, skip_unless
from tests.utils import USE_POSTGRES_FOR_TESTS
@@ -187,3 +197,179 @@ class EventSearchInsertionTest(HomeserverTestCase):
),
)
self.assertCountEqual(values, ["hi", "2"])
+
+
+class MessageSearchTest(HomeserverTestCase):
+ """
+ Check message search.
+
+ A powerful way to check the behaviour is to run the following in Postgres >= 11:
+
+ # SELECT websearch_to_tsquery('english', <your string>);
+
+ The result can be compared to the tokenized version for SQLite and Postgres < 11.
+
+ """
+
+ servlets = [
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ PHRASE = "the quick brown fox jumps over the lazy dog"
+
+ # Each entry is a search query, followed by a boolean of whether it is in the phrase.
+ COMMON_CASES = [
+ ("nope", False),
+ ("brown", True),
+ ("quick brown", True),
+ ("brown quick", True),
+ ("quick \t brown", True),
+ ("jump", True),
+ ("brown nope", False),
+ ('"brown quick"', False),
+ ('"jumps over"', True),
+ ('"quick fox"', False),
+ ("nope OR doublenope", False),
+ ("furphy OR fox", True),
+ ("fox -nope", True),
+ ("fox -brown", False),
+ ('"fox" quick', True),
+ ('"quick brown', True),
+ ('" quick "', True),
+ ('" nope"', False),
+ ]
+ # TODO Test non-ASCII cases.
+
+ # Case that fail on SQLite.
+ POSTGRES_CASES = [
+ # SQLite treats NOT as a binary operator.
+ ("- fox", False),
+ ("- nope", True),
+ ('"-fox quick', False),
+ # PostgreSQL skips stop words.
+ ('"the quick brown"', True),
+ ('"over lazy"', True),
+ ]
+
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
+ # Register a user and create a room, create some messages
+ self.register_user("alice", "password")
+ self.access_token = self.login("alice", "password")
+ self.room_id = self.helper.create_room_as("alice", tok=self.access_token)
+
+ # Send the phrase as a message and check it was created
+ response = self.helper.send(self.room_id, self.PHRASE, tok=self.access_token)
+ self.assertIn("event_id", response)
+
+ # The behaviour of a missing trailing double quote changed in PostgreSQL 14
+ # from ignoring the initial double quote to treating it as a phrase.
+ main_store = homeserver.get_datastores().main
+ found = False
+ if isinstance(main_store.database_engine, PostgresEngine):
+ assert main_store.database_engine._version is not None
+ found = main_store.database_engine._version < 140000
+ self.COMMON_CASES.append(('"fox quick', found))
+
+ def test_tokenize_query(self) -> None:
+ """Test the custom logic to tokenize a user's query."""
+ cases = (
+ ("brown", ["brown"]),
+ ("quick brown", ["quick", SearchToken.And, "brown"]),
+ ("quick \t brown", ["quick", SearchToken.And, "brown"]),
+ ('"brown quick"', [Phrase(["brown", "quick"])]),
+ ("furphy OR fox", ["furphy", SearchToken.Or, "fox"]),
+ ("fox -brown", ["fox", SearchToken.Not, "brown"]),
+ ("- fox", [SearchToken.Not, "fox"]),
+ ('"fox" quick', [Phrase(["fox"]), SearchToken.And, "quick"]),
+ # No trailing double quote.
+ ('"fox quick', [Phrase(["fox", "quick"])]),
+ ('"-fox quick', [Phrase(["-fox", "quick"])]),
+ ('" quick "', [Phrase(["quick"])]),
+ (
+ 'q"uick brow"n',
+ [
+ "q",
+ SearchToken.And,
+ Phrase(["uick", "brow"]),
+ SearchToken.And,
+ "n",
+ ],
+ ),
+ (
+ '-"quick brown"',
+ [SearchToken.Not, Phrase(["quick", "brown"])],
+ ),
+ )
+
+ for query, expected in cases:
+ tokenized = _tokenize_query(query)
+ self.assertEqual(
+ tokenized, expected, f"{tokenized} != {expected} for {query}"
+ )
+
+ def _check_test_cases(
+ self, store: DataStore, cases: List[Tuple[str, bool]]
+ ) -> None:
+ # Run all the test cases versus search_msgs
+ for query, expect_to_contain in cases:
+ result = self.get_success(
+ store.search_msgs([self.room_id], query, ["content.body"])
+ )
+ self.assertEquals(
+ result["count"],
+ 1 if expect_to_contain else 0,
+ f"expected '{query}' to match '{self.PHRASE}'"
+ if expect_to_contain
+ else f"'{query}' unexpectedly matched '{self.PHRASE}'",
+ )
+ self.assertEquals(
+ len(result["results"]),
+ 1 if expect_to_contain else 0,
+ "results array length should match count",
+ )
+
+ # Run them again versus search_rooms
+ for query, expect_to_contain in cases:
+ result = self.get_success(
+ store.search_rooms([self.room_id], query, ["content.body"], 10)
+ )
+ self.assertEquals(
+ result["count"],
+ 1 if expect_to_contain else 0,
+ f"expected '{query}' to match '{self.PHRASE}'"
+ if expect_to_contain
+ else f"'{query}' unexpectedly matched '{self.PHRASE}'",
+ )
+ self.assertEquals(
+ len(result["results"]),
+ 1 if expect_to_contain else 0,
+ "results array length should match count",
+ )
+
+ def test_postgres_web_search_for_phrase(self):
+ """
+ Test searching for phrases using typical web search syntax, as per postgres' websearch_to_tsquery.
+ This test is skipped unless the postgres instance supports websearch_to_tsquery.
+
+ See https://www.postgresql.org/docs/current/textsearch-controls.html
+ """
+
+ store = self.hs.get_datastores().main
+ if not isinstance(store.database_engine, PostgresEngine):
+ raise SkipTest("Test only applies when postgres is used as the database")
+
+ self._check_test_cases(store, self.COMMON_CASES + self.POSTGRES_CASES)
+
+ def test_sqlite_search(self):
+ """
+ Test sqlite searching for phrases.
+ """
+ store = self.hs.get_datastores().main
+ if not isinstance(store.database_engine, Sqlite3Engine):
+ raise SkipTest("Test only applies when sqlite is used as the database")
+
+ self._check_test_cases(store, self.COMMON_CASES)
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 240b02cb9f..8794401823 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -23,6 +23,7 @@ from synapse.util import Clock
from tests import unittest
from tests.server import TestHomeServer
+from tests.test_utils import event_injection
class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
@@ -157,6 +158,75 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
# Check that alice's display name is now None
self.assertEqual(row[0]["display_name"], None)
+ def test_room_is_locally_forgotten(self) -> None:
+ """Test that when the last local user has forgotten a room it is known as forgotten."""
+ # join two local and one remote user
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+ self.get_success(
+ event_injection.inject_member_event(self.hs, self.room, self.u_bob, "join")
+ )
+ self.get_success(
+ event_injection.inject_member_event(
+ self.hs, self.room, self.u_charlie.to_string(), "join"
+ )
+ )
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ # local users leave the room and the room is not forgotten
+ self.get_success(
+ event_injection.inject_member_event(
+ self.hs, self.room, self.u_alice, "leave"
+ )
+ )
+ self.get_success(
+ event_injection.inject_member_event(self.hs, self.room, self.u_bob, "leave")
+ )
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ # first user forgets the room, room is not forgotten
+ self.get_success(self.store.forget(self.u_alice, self.room))
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ # second (last local) user forgets the room and the room is forgotten
+ self.get_success(self.store.forget(self.u_bob, self.room))
+ self.assertTrue(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ def test_join_locally_forgotten_room(self) -> None:
+ """Tests if a user joins a forgotten room the room is not forgotten anymore."""
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ # after leaving and forget the room, it is forgotten
+ self.get_success(
+ event_injection.inject_member_event(
+ self.hs, self.room, self.u_alice, "leave"
+ )
+ )
+ self.get_success(self.store.forget(self.u_alice, self.room))
+ self.assertTrue(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ # after rejoin the room is not forgotten anymore
+ self.get_success(
+ event_injection.inject_member_event(
+ self.hs, self.room, self.u_alice, "join"
+ )
+ )
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index 78663a53fe..34fa810cf6 100644
--- a/tests/storage/test_stream.py
+++ b/tests/storage/test_stream.py
@@ -16,7 +16,6 @@ from typing import List
from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.filtering import Filter
-from synapse.events import EventBase
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.types import JsonDict
@@ -40,7 +39,7 @@ class PaginationTestCase(HomeserverTestCase):
def default_config(self):
config = super().default_config()
- config["experimental_features"] = {"msc3440_enabled": True}
+ config["experimental_features"] = {"msc3874_enabled": True}
return config
def prepare(self, reactor, clock, homeserver):
@@ -58,6 +57,11 @@ class PaginationTestCase(HomeserverTestCase):
self.third_tok = self.login("third", "test")
self.helper.join(room=self.room_id, user=self.third_user_id, tok=self.third_tok)
+ # Store a token which is after all the room creation events.
+ self.from_token = self.get_success(
+ self.hs.get_event_sources().get_current_token_for_pagination(self.room_id)
+ )
+
# An initial event with a relation from second user.
res = self.helper.send_event(
room_id=self.room_id,
@@ -66,7 +70,7 @@ class PaginationTestCase(HomeserverTestCase):
tok=self.tok,
)
self.event_id_1 = res["event_id"]
- self.helper.send_event(
+ res = self.helper.send_event(
room_id=self.room_id,
type="m.reaction",
content={
@@ -78,6 +82,7 @@ class PaginationTestCase(HomeserverTestCase):
},
tok=self.second_tok,
)
+ self.event_id_annotation = res["event_id"]
# Another event with a relation from third user.
res = self.helper.send_event(
@@ -87,7 +92,7 @@ class PaginationTestCase(HomeserverTestCase):
tok=self.tok,
)
self.event_id_2 = res["event_id"]
- self.helper.send_event(
+ res = self.helper.send_event(
room_id=self.room_id,
type="m.reaction",
content={
@@ -98,68 +103,59 @@ class PaginationTestCase(HomeserverTestCase):
},
tok=self.third_tok,
)
+ self.event_id_reference = res["event_id"]
# An event with no relations.
- self.helper.send_event(
+ res = self.helper.send_event(
room_id=self.room_id,
type=EventTypes.Message,
content={"msgtype": "m.text", "body": "No relations"},
tok=self.tok,
)
+ self.event_id_none = res["event_id"]
- def _filter_messages(self, filter: JsonDict) -> List[EventBase]:
+ def _filter_messages(self, filter: JsonDict) -> List[str]:
"""Make a request to /messages with a filter, returns the chunk of events."""
- from_token = self.get_success(
- self.hs.get_event_sources().get_current_token_for_pagination(self.room_id)
- )
-
events, next_key = self.get_success(
self.hs.get_datastores().main.paginate_room_events(
room_id=self.room_id,
- from_key=from_token.room_key,
+ from_key=self.from_token.room_key,
to_key=None,
- direction="b",
+ direction="f",
limit=10,
event_filter=Filter(self.hs, filter),
)
)
- return events
+ return [ev.event_id for ev in events]
def test_filter_relation_senders(self):
# Messages which second user reacted to.
filter = {"related_by_senders": [self.second_user_id]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_1)
+ self.assertEqual(chunk, [self.event_id_1])
# Messages which third user reacted to.
filter = {"related_by_senders": [self.third_user_id]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_2)
+ self.assertEqual(chunk, [self.event_id_2])
# Messages which either user reacted to.
filter = {"related_by_senders": [self.second_user_id, self.third_user_id]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 2, chunk)
- self.assertCountEqual(
- [c.event_id for c in chunk], [self.event_id_1, self.event_id_2]
- )
+ self.assertCountEqual(chunk, [self.event_id_1, self.event_id_2])
def test_filter_relation_type(self):
# Messages which have annotations.
filter = {"related_by_rel_types": [RelationTypes.ANNOTATION]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_1)
+ self.assertEqual(chunk, [self.event_id_1])
# Messages which have references.
filter = {"related_by_rel_types": [RelationTypes.REFERENCE]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_2)
+ self.assertEqual(chunk, [self.event_id_2])
# Messages which have either annotations or references.
filter = {
@@ -169,10 +165,7 @@ class PaginationTestCase(HomeserverTestCase):
]
}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 2, chunk)
- self.assertCountEqual(
- [c.event_id for c in chunk], [self.event_id_1, self.event_id_2]
- )
+ self.assertCountEqual(chunk, [self.event_id_1, self.event_id_2])
def test_filter_relation_senders_and_type(self):
# Messages which second user reacted to.
@@ -181,8 +174,7 @@ class PaginationTestCase(HomeserverTestCase):
"related_by_rel_types": [RelationTypes.ANNOTATION],
}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_1)
+ self.assertEqual(chunk, [self.event_id_1])
def test_duplicate_relation(self):
"""An event should only be returned once if there are multiple relations to it."""
@@ -201,5 +193,65 @@ class PaginationTestCase(HomeserverTestCase):
filter = {"related_by_senders": [self.second_user_id]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_1)
+ self.assertEqual(chunk, [self.event_id_1])
+
+ def test_filter_rel_types(self) -> None:
+ # Messages which are annotations.
+ filter = {"org.matrix.msc3874.rel_types": [RelationTypes.ANNOTATION]}
+ chunk = self._filter_messages(filter)
+ self.assertEqual(chunk, [self.event_id_annotation])
+
+ # Messages which are references.
+ filter = {"org.matrix.msc3874.rel_types": [RelationTypes.REFERENCE]}
+ chunk = self._filter_messages(filter)
+ self.assertEqual(chunk, [self.event_id_reference])
+
+ # Messages which are either annotations or references.
+ filter = {
+ "org.matrix.msc3874.rel_types": [
+ RelationTypes.ANNOTATION,
+ RelationTypes.REFERENCE,
+ ]
+ }
+ chunk = self._filter_messages(filter)
+ self.assertCountEqual(
+ chunk,
+ [self.event_id_annotation, self.event_id_reference],
+ )
+
+ def test_filter_not_rel_types(self) -> None:
+ # Messages which are not annotations.
+ filter = {"org.matrix.msc3874.not_rel_types": [RelationTypes.ANNOTATION]}
+ chunk = self._filter_messages(filter)
+ self.assertEqual(
+ chunk,
+ [
+ self.event_id_1,
+ self.event_id_2,
+ self.event_id_reference,
+ self.event_id_none,
+ ],
+ )
+
+ # Messages which are not references.
+ filter = {"org.matrix.msc3874.not_rel_types": [RelationTypes.REFERENCE]}
+ chunk = self._filter_messages(filter)
+ self.assertEqual(
+ chunk,
+ [
+ self.event_id_1,
+ self.event_id_annotation,
+ self.event_id_2,
+ self.event_id_none,
+ ],
+ )
+
+ # Messages which are neither annotations or references.
+ filter = {
+ "org.matrix.msc3874.not_rel_types": [
+ RelationTypes.ANNOTATION,
+ RelationTypes.REFERENCE,
+ ]
+ }
+ chunk = self._filter_messages(filter)
+ self.assertEqual(chunk, [self.event_id_1, self.event_id_2, self.event_id_none])
|