diff --git a/tests/events/test_snapshot.py b/tests/events/test_snapshot.py
index 6687c28e8f..b5e42f9600 100644
--- a/tests/events/test_snapshot.py
+++ b/tests/events/test_snapshot.py
@@ -101,8 +101,7 @@ class TestEventContext(unittest.HomeserverTestCase):
self.assertEqual(
context.state_group_before_event, d_context.state_group_before_event
)
- self.assertEqual(context.prev_group, d_context.prev_group)
- self.assertEqual(context.delta_ids, d_context.delta_ids)
+ self.assertEqual(context.state_group_deltas, d_context.state_group_deltas)
self.assertEqual(context.app_service, d_context.app_service)
self.assertEqual(
diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py
index 391ae51707..b290b020a2 100644
--- a/tests/federation/test_federation_catch_up.py
+++ b/tests/federation/test_federation_catch_up.py
@@ -431,28 +431,24 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
# ACT: call _wake_destinations_needing_catchup
# patch wake_destination to just count the destinations instead
- woken = []
+ woken = set()
def wake_destination_track(destination: str) -> None:
- woken.append(destination)
+ woken.add(destination)
self.federation_sender.wake_destination = wake_destination_track # type: ignore[assignment]
- # cancel the pre-existing timer for _wake_destinations_needing_catchup
- # this is because we are calling it manually rather than waiting for it
- # to be called automatically
- assert self.federation_sender._catchup_after_startup_timer is not None
- self.federation_sender._catchup_after_startup_timer.cancel()
-
- self.get_success(
- self.federation_sender._wake_destinations_needing_catchup(), by=5.0
- )
+ # We wait quite long so that all dests can be woken up, since there is a delay
+ # between them.
+ self.pump(by=5.0)
# ASSERT (_wake_destinations_needing_catchup):
# - all remotes are woken up, save for zzzerver
self.assertNotIn("zzzerver", woken)
- # - all destinations are woken exactly once; they appear once in woken.
- self.assertCountEqual(woken, server_names[:-1])
+ # - all destinations are woken, potentially more than once, since the
+ # wake up is called regularly and we don't ack in this test that a transaction
+ # has been successfully sent.
+ self.assertCountEqual(woken, set(server_names[:-1]))
def test_not_latest_event(self) -> None:
"""Test that we send the latest event in the room even if its not ours."""
diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py
index 91694e4fca..a45ab83683 100644
--- a/tests/federation/test_federation_client.py
+++ b/tests/federation/test_federation_client.py
@@ -124,7 +124,7 @@ class FederationClientTest(FederatingHomeserverTestCase):
# check the right call got made to the agent
self._mock_agent.request.assert_called_once_with(
b"GET",
- b"matrix://yet.another.server/_matrix/federation/v1/state/%21room_id?event_id=event_id",
+ b"matrix-federation://yet.another.server/_matrix/federation/v1/state/%21room_id?event_id=event_id",
headers=mock.ANY,
bodyProducer=None,
)
@@ -232,7 +232,7 @@ class FederationClientTest(FederatingHomeserverTestCase):
# check the right call got made to the agent
self._mock_agent.request.assert_called_once_with(
b"GET",
- b"matrix://yet.another.server/_matrix/federation/v1/event/event_id",
+ b"matrix-federation://yet.another.server/_matrix/federation/v1/event/event_id",
headers=mock.ANY,
bodyProducer=None,
)
diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
index 105b4caefa..aed2a4c07a 100644
--- a/tests/http/federation/test_matrix_federation_agent.py
+++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -292,7 +292,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.agent = self._make_agent()
self.reactor.lookups["testserv"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv:8448/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv:8448/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -393,7 +393,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.reactor.lookups["testserv"] = "1.2.3.4"
self.reactor.lookups["proxy.com"] = "9.9.9.9"
- test_d = self._make_get_request(b"matrix://testserv:8448/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv:8448/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -532,7 +532,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
# there will be a getaddrinfo on the IP
self.reactor.lookups["1.2.3.4"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://1.2.3.4/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://1.2.3.4/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -568,7 +568,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
# there will be a getaddrinfo on the IP
self.reactor.lookups["::1"] = "::1"
- test_d = self._make_get_request(b"matrix://[::1]/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://[::1]/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -604,7 +604,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
# there will be a getaddrinfo on the IP
self.reactor.lookups["::1"] = "::1"
- test_d = self._make_get_request(b"matrix://[::1]:80/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://[::1]:80/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -639,7 +639,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
self.reactor.lookups["testserv1"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv1/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv1/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -693,7 +693,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
# there will be a getaddrinfo on the IP
self.reactor.lookups["1.2.3.5"] = "1.2.3.5"
- test_d = self._make_get_request(b"matrix://1.2.3.5/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://1.2.3.5/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -725,7 +725,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
self.reactor.lookups["testserv"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -780,7 +780,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.reactor.lookups["testserv"] = "1.2.3.4"
self.reactor.lookups["target-server"] = "1::f"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -844,7 +844,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.reactor.lookups["testserv"] = "1.2.3.4"
self.reactor.lookups["target-server"] = "1::f"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -933,7 +933,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
self.reactor.lookups["testserv"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -1009,7 +1009,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
),
)
- test_d = agent.request(b"GET", b"matrix://testserv/foo/bar")
+ test_d = agent.request(b"GET", b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -1042,7 +1042,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
)
self.reactor.lookups["srvtarget"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -1082,7 +1082,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.reactor.lookups["testserv"] = "1.2.3.4"
self.reactor.lookups["srvtarget"] = "5.6.7.8"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -1143,7 +1143,9 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.reactor.lookups["xn--bcher-kva.com"] = "1.2.3.4"
# this is idna for bücher.com
- test_d = self._make_get_request(b"matrix://xn--bcher-kva.com/foo/bar")
+ test_d = self._make_get_request(
+ b"matrix-federation://xn--bcher-kva.com/foo/bar"
+ )
# Nothing happened yet
self.assertNoResult(test_d)
@@ -1204,7 +1206,9 @@ class MatrixFederationAgentTests(unittest.TestCase):
)
self.reactor.lookups["xn--trget-3qa.com"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://xn--bcher-kva.com/foo/bar")
+ test_d = self._make_get_request(
+ b"matrix-federation://xn--bcher-kva.com/foo/bar"
+ )
# Nothing happened yet
self.assertNoResult(test_d)
@@ -1411,7 +1415,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
)
self.reactor.lookups["target.com"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
diff --git a/tests/media/test_base.py b/tests/media/test_base.py
index 66498c744d..4728c80969 100644
--- a/tests/media/test_base.py
+++ b/tests/media/test_base.py
@@ -20,12 +20,12 @@ from tests import unittest
class GetFileNameFromHeadersTests(unittest.TestCase):
# input -> expected result
TEST_CASES = {
- b"inline; filename=abc.txt": "abc.txt",
- b'inline; filename="azerty"': "azerty",
- b'inline; filename="aze%20rty"': "aze%20rty",
- b'inline; filename="aze"rty"': 'aze"rty',
- b'inline; filename="azer;ty"': "azer;ty",
- b"inline; filename*=utf-8''foo%C2%A3bar": "foo£bar",
+ b"attachment; filename=abc.txt": "abc.txt",
+ b'attachment; filename="azerty"': "azerty",
+ b'attachment; filename="aze%20rty"': "aze%20rty",
+ b'attachment; filename="aze"rty"': 'aze"rty',
+ b'attachment; filename="azer;ty"': "azer;ty",
+ b"attachment; filename*=utf-8''foo%C2%A3bar": "foo£bar",
}
def tests(self) -> None:
diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py
index f0f2da65db..ea0051dde4 100644
--- a/tests/media/test_media_storage.py
+++ b/tests/media/test_media_storage.py
@@ -317,7 +317,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
def test_handle_missing_content_type(self) -> None:
channel = self._req(
- b"inline; filename=out" + self.test_image.extension,
+ b"attachment; filename=out" + self.test_image.extension,
include_content_type=False,
)
headers = channel.headers
@@ -331,7 +331,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
If the filename is filename=<ascii> then Synapse will decode it as an
ASCII string, and use filename= in the response.
"""
- channel = self._req(b"inline; filename=out" + self.test_image.extension)
+ channel = self._req(b"attachment; filename=out" + self.test_image.extension)
headers = channel.headers
self.assertEqual(
@@ -339,7 +339,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
)
self.assertEqual(
headers.getRawHeaders(b"Content-Disposition"),
- [b"inline; filename=out" + self.test_image.extension],
+ [b"attachment; filename=out" + self.test_image.extension],
)
def test_disposition_filenamestar_utf8escaped(self) -> None:
@@ -350,7 +350,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
"""
filename = parse.quote("\u2603".encode()).encode("ascii")
channel = self._req(
- b"inline; filename*=utf-8''" + filename + self.test_image.extension
+ b"attachment; filename*=utf-8''" + filename + self.test_image.extension
)
headers = channel.headers
@@ -359,13 +359,13 @@ class MediaRepoTests(unittest.HomeserverTestCase):
)
self.assertEqual(
headers.getRawHeaders(b"Content-Disposition"),
- [b"inline; filename*=utf-8''" + filename + self.test_image.extension],
+ [b"attachment; filename*=utf-8''" + filename + self.test_image.extension],
)
def test_disposition_none(self) -> None:
"""
- If there is no filename, one isn't passed on in the Content-Disposition
- of the request.
+ If there is no filename, Content-Disposition should only
+ be a disposition type.
"""
channel = self._req(None)
@@ -373,7 +373,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
self.assertEqual(
headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
)
- self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None)
+ self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), [b"attachment"])
def test_thumbnail_crop(self) -> None:
"""Test that a cropped remote thumbnail is available."""
@@ -612,7 +612,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
Tests that the `X-Robots-Tag` header is present, which informs web crawlers
to not index, archive, or follow links in media.
"""
- channel = self._req(b"inline; filename=out" + self.test_image.extension)
+ channel = self._req(b"attachment; filename=out" + self.test_image.extension)
headers = channel.headers
self.assertEqual(
@@ -625,7 +625,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
Test that the Cross-Origin-Resource-Policy header is set to "cross-origin"
allowing web clients to embed media from the downloads API.
"""
- channel = self._req(b"inline; filename=out" + self.test_image.extension)
+ channel = self._req(b"attachment; filename=out" + self.test_image.extension)
headers = channel.headers
diff --git a/tests/rest/client/test_push_rule_attrs.py b/tests/rest/client/test_push_rule_attrs.py
index 4f875b9289..5aca74475f 100644
--- a/tests/rest/client/test_push_rule_attrs.py
+++ b/tests/rest/client/test_push_rule_attrs.py
@@ -412,3 +412,70 @@ class PushRuleAttributesTestCase(HomeserverTestCase):
)
self.assertEqual(channel.code, 404)
self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND)
+
+ def test_contains_user_name(self) -> None:
+ """
+ Tests that `contains_user_name` rule is present and have proper value in `pattern`.
+ """
+ username = "bob"
+ self.register_user(username, "pass")
+ token = self.login(username, "pass")
+
+ channel = self.make_request(
+ "GET",
+ "/pushrules/global/content/.m.rule.contains_user_name",
+ access_token=token,
+ )
+
+ self.assertEqual(channel.code, 200)
+
+ self.assertEqual(
+ {
+ "rule_id": ".m.rule.contains_user_name",
+ "default": True,
+ "enabled": True,
+ "pattern": username,
+ "actions": [
+ "notify",
+ {"set_tweak": "highlight"},
+ {"set_tweak": "sound", "value": "default"},
+ ],
+ },
+ channel.json_body,
+ )
+
+ def test_is_user_mention(self) -> None:
+ """
+ Tests that `is_user_mention` rule is present and have proper value in `value`.
+ """
+ user = self.register_user("bob", "pass")
+ token = self.login("bob", "pass")
+
+ channel = self.make_request(
+ "GET",
+ "/pushrules/global/override/.m.rule.is_user_mention",
+ access_token=token,
+ )
+
+ self.assertEqual(channel.code, 200)
+
+ self.assertEqual(
+ {
+ "rule_id": ".m.rule.is_user_mention",
+ "default": True,
+ "enabled": True,
+ "conditions": [
+ {
+ "kind": "event_property_contains",
+ "key": "content.m\\.mentions.user_ids",
+ "value": user,
+ }
+ ],
+ "actions": [
+ "notify",
+ {"set_tweak": "highlight"},
+ {"set_tweak": "sound", "value": "default"},
+ ],
+ },
+ channel.json_body,
+ )
diff --git a/tests/rest/client/test_room_batch.py b/tests/rest/client/test_room_batch.py
deleted file mode 100644
index 9d5cb60d16..0000000000
--- a/tests/rest/client/test_room_batch.py
+++ /dev/null
@@ -1,302 +0,0 @@
-import logging
-from typing import List, Tuple
-from unittest.mock import Mock, patch
-
-from twisted.test.proto_helpers import MemoryReactor
-
-from synapse.api.constants import EventContentFields, EventTypes
-from synapse.appservice import ApplicationService
-from synapse.rest import admin
-from synapse.rest.client import login, register, room, room_batch, sync
-from synapse.server import HomeServer
-from synapse.types import JsonDict, RoomStreamToken
-from synapse.util import Clock
-
-from tests import unittest
-
-logger = logging.getLogger(__name__)
-
-
-def _create_join_state_events_for_batch_send_request(
- virtual_user_ids: List[str],
- insert_time: int,
-) -> List[JsonDict]:
- return [
- {
- "type": EventTypes.Member,
- "sender": virtual_user_id,
- "origin_server_ts": insert_time,
- "content": {
- "membership": "join",
- "displayname": "display-name-for-%s" % (virtual_user_id,),
- },
- "state_key": virtual_user_id,
- }
- for virtual_user_id in virtual_user_ids
- ]
-
-
-def _create_message_events_for_batch_send_request(
- virtual_user_id: str, insert_time: int, count: int
-) -> List[JsonDict]:
- return [
- {
- "type": EventTypes.Message,
- "sender": virtual_user_id,
- "origin_server_ts": insert_time,
- "content": {
- "msgtype": "m.text",
- "body": "Historical %d" % (i),
- EventContentFields.MSC2716_HISTORICAL: True,
- },
- }
- for i in range(count)
- ]
-
-
-class RoomBatchTestCase(unittest.HomeserverTestCase):
- """Test importing batches of historical messages."""
-
- servlets = [
- admin.register_servlets_for_client_rest_resource,
- room_batch.register_servlets,
- room.register_servlets,
- register.register_servlets,
- login.register_servlets,
- sync.register_servlets,
- ]
-
- def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- config = self.default_config()
-
- self.appservice = ApplicationService(
- token="i_am_an_app_service",
- id="1234",
- namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]},
- # Note: this user does not have to match the regex above
- sender="@as_main:test",
- )
-
- mock_load_appservices = Mock(return_value=[self.appservice])
- with patch(
- "synapse.storage.databases.main.appservice.load_appservices",
- mock_load_appservices,
- ):
- hs = self.setup_test_homeserver(config=config)
- return hs
-
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
- self.clock = clock
- self._storage_controllers = hs.get_storage_controllers()
-
- self.virtual_user_id, _ = self.register_appservice_user(
- "as_user_potato", self.appservice.token
- )
-
- def _create_test_room(self) -> Tuple[str, str, str, str]:
- room_id = self.helper.create_room_as(
- self.appservice.sender, tok=self.appservice.token
- )
-
- res_a = self.helper.send_event(
- room_id=room_id,
- type=EventTypes.Message,
- content={
- "msgtype": "m.text",
- "body": "A",
- },
- tok=self.appservice.token,
- )
- event_id_a = res_a["event_id"]
-
- res_b = self.helper.send_event(
- room_id=room_id,
- type=EventTypes.Message,
- content={
- "msgtype": "m.text",
- "body": "B",
- },
- tok=self.appservice.token,
- )
- event_id_b = res_b["event_id"]
-
- res_c = self.helper.send_event(
- room_id=room_id,
- type=EventTypes.Message,
- content={
- "msgtype": "m.text",
- "body": "C",
- },
- tok=self.appservice.token,
- )
- event_id_c = res_c["event_id"]
-
- return room_id, event_id_a, event_id_b, event_id_c
-
- @unittest.override_config({"experimental_features": {"msc2716_enabled": True}})
- def test_same_state_groups_for_whole_historical_batch(self) -> None:
- """Make sure that when using the `/batch_send` endpoint to import a
- bunch of historical messages, it re-uses the same `state_group` across
- the whole batch. This is an easy optimization to make sure we're getting
- right because the state for the whole batch is contained in
- `state_events_at_start` and can be shared across everything.
- """
-
- time_before_room = int(self.clock.time_msec())
- room_id, event_id_a, _, _ = self._create_test_room()
-
- channel = self.make_request(
- "POST",
- "/_matrix/client/unstable/org.matrix.msc2716/rooms/%s/batch_send?prev_event_id=%s"
- % (room_id, event_id_a),
- content={
- "events": _create_message_events_for_batch_send_request(
- self.virtual_user_id, time_before_room, 3
- ),
- "state_events_at_start": _create_join_state_events_for_batch_send_request(
- [self.virtual_user_id], time_before_room
- ),
- },
- access_token=self.appservice.token,
- )
- self.assertEqual(channel.code, 200, channel.result)
-
- # Get the historical event IDs that we just imported
- historical_event_ids = channel.json_body["event_ids"]
- self.assertEqual(len(historical_event_ids), 3)
-
- # Fetch the state_groups
- state_group_map = self.get_success(
- self._storage_controllers.state.get_state_groups_ids(
- room_id, historical_event_ids
- )
- )
-
- # We expect all of the historical events to be using the same state_group
- # so there should only be a single state_group here!
- self.assertEqual(
- len(state_group_map.keys()),
- 1,
- "Expected a single state_group to be returned by saw state_groups=%s"
- % (state_group_map.keys(),),
- )
-
- @unittest.override_config({"experimental_features": {"msc2716_enabled": True}})
- def test_sync_while_batch_importing(self) -> None:
- """
- Make sure that /sync correctly returns full room state when a user joins
- during ongoing batch backfilling.
- See: https://github.com/matrix-org/synapse/issues/12281
- """
- # Create user who will be invited & join room
- user_id = self.register_user("beep", "test")
- user_tok = self.login("beep", "test")
-
- time_before_room = int(self.clock.time_msec())
-
- # Create a room with some events
- room_id, _, _, _ = self._create_test_room()
- # Invite the user
- self.helper.invite(
- room_id, src=self.appservice.sender, tok=self.appservice.token, targ=user_id
- )
-
- # Create another room, send a bunch of events to advance the stream token
- other_room_id = self.helper.create_room_as(
- self.appservice.sender, tok=self.appservice.token
- )
- for _ in range(5):
- self.helper.send_event(
- room_id=other_room_id,
- type=EventTypes.Message,
- content={"msgtype": "m.text", "body": "C"},
- tok=self.appservice.token,
- )
-
- # Join the room as the normal user
- self.helper.join(room_id, user_id, tok=user_tok)
-
- # Create an event to hang the historical batch from - In order to see
- # the failure case originally reported in #12281, the historical batch
- # must be hung from the most recent event in the room so the base
- # insertion event ends up with the highest `topogological_ordering`
- # (`depth`) in the room but will have a negative `stream_ordering`
- # because it's a `historical` event. Previously, when assembling the
- # `state` for the `/sync` response, the bugged logic would sort by
- # `topological_ordering` descending and pick up the base insertion
- # event because it has a negative `stream_ordering` below the given
- # pagination token. Now we properly sort by `stream_ordering`
- # descending which puts `historical` events with a negative
- # `stream_ordering` way at the bottom and aren't selected as expected.
- response = self.helper.send_event(
- room_id=room_id,
- type=EventTypes.Message,
- content={
- "msgtype": "m.text",
- "body": "C",
- },
- tok=self.appservice.token,
- )
- event_to_hang_id = response["event_id"]
-
- channel = self.make_request(
- "POST",
- "/_matrix/client/unstable/org.matrix.msc2716/rooms/%s/batch_send?prev_event_id=%s"
- % (room_id, event_to_hang_id),
- content={
- "events": _create_message_events_for_batch_send_request(
- self.virtual_user_id, time_before_room, 3
- ),
- "state_events_at_start": _create_join_state_events_for_batch_send_request(
- [self.virtual_user_id], time_before_room
- ),
- },
- access_token=self.appservice.token,
- )
- self.assertEqual(channel.code, 200, channel.result)
-
- # Now we need to find the invite + join events stream tokens so we can sync between
- main_store = self.hs.get_datastores().main
- events, next_key = self.get_success(
- main_store.get_recent_events_for_room(
- room_id,
- 50,
- end_token=main_store.get_room_max_token(),
- ),
- )
- invite_event_position = None
- for event in events:
- if (
- event.type == "m.room.member"
- and event.content["membership"] == "invite"
- ):
- invite_event_position = self.get_success(
- main_store.get_topological_token_for_event(event.event_id)
- )
- break
-
- assert invite_event_position is not None, "No invite event found"
-
- # Remove the topological order from the token by re-creating w/stream only
- invite_event_position = RoomStreamToken(None, invite_event_position.stream)
-
- # Sync everything after this token
- since_token = self.get_success(invite_event_position.to_string(main_store))
- sync_response = self.make_request(
- "GET",
- f"/sync?since={since_token}",
- access_token=user_tok,
- )
-
- # Assert that, for this room, the user was considered to have joined and thus
- # receives the full state history
- state_event_types = [
- event["type"]
- for event in sync_response.json_body["rooms"]["join"][room_id]["state"][
- "events"
- ]
- ]
-
- assert (
- "m.room.create" in state_event_types
- ), "Missing room full state in sync response"
diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py
index 788500e38f..b223dc750b 100644
--- a/tests/storage/databases/main/test_events_worker.py
+++ b/tests/storage/databases/main/test_events_worker.py
@@ -139,6 +139,55 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
# That should result in a single db query to lookup
self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
+ def test_persisting_event_prefills_get_event_cache(self) -> None:
+ """
+ Test to make sure that the `_get_event_cache` is prefilled after we persist an
+ event and returns the updated value.
+ """
+ event, event_context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ sender=self.user,
+ type="test_event_type",
+ content={"body": "conflabulation"},
+ )
+ )
+
+ # First, check `_get_event_cache` for the event we just made
+ # to verify it's not in the cache.
+ res = self.store._get_event_cache.get_local((event.event_id,))
+ self.assertEqual(res, None, "Event was cached when it should not have been.")
+
+ with LoggingContext(name="test") as ctx:
+ # Persist the event which should invalidate then prefill the
+ # `_get_event_cache` so we don't return stale values.
+ # Side Note: Apparently, persisting an event isn't a transaction in the
+ # sense that it is recorded in the LoggingContext
+ persistence = self.hs.get_storage_controllers().persistence
+ assert persistence is not None
+ self.get_success(
+ persistence.persist_event(
+ event,
+ event_context,
+ )
+ )
+
+ # Check `_get_event_cache` again and we should see the updated fact
+ # that we now have the event cached after persisting it.
+ res = self.store._get_event_cache.get_local((event.event_id,))
+ self.assertEqual(res.event, event, "Event not cached as expected.") # type: ignore
+
+ # Try and fetch the event from the database.
+ self.get_success(self.store.get_event(event.event_id))
+
+ # Verify that the database hit was avoided.
+ self.assertEqual(
+ ctx.get_resource_usage().evt_db_fetch_count,
+ 0,
+ "Database was hit, which would not happen if event was cached.",
+ )
+
def test_invalidate_cache_by_room_id(self) -> None:
"""
Test to make sure that all events associated with the given `(room_id,)`
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index e39b63edac..48ebfadaab 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -401,7 +401,10 @@ class EventChainStoreTestCase(HomeserverTestCase):
assert persist_events_store is not None
persist_events_store._store_event_txn(
txn,
- [(e, EventContext(self.hs.get_storage_controllers())) for e in events],
+ [
+ (e, EventContext(self.hs.get_storage_controllers(), {}))
+ for e in events
+ ],
)
# Actually call the function that calculates the auth chain stuff.
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index 4b8d8328d7..0f3b0744f1 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -20,7 +20,6 @@ from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
-from synapse.api.constants import EventTypes
from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
@@ -924,216 +923,6 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
self.assertEqual(backfill_event_ids, ["b3", "b2", "b1"])
- def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo:
- """
- Sets up a room with various insertion event backward extremities to test
- backfill functions against.
-
- Returns:
- _BackfillSetupInfo including the `room_id` to test against and
- `depth_map` of events in the room
- """
- room_id = "!backfill-room-test:some-host"
-
- depth_map: Dict[str, int] = {
- "1": 1,
- "2": 2,
- "insertion_eventA": 3,
- "3": 4,
- "insertion_eventB": 5,
- "4": 6,
- "5": 7,
- }
-
- def populate_db(txn: LoggingTransaction) -> None:
- # Insert the room to satisfy the foreign key constraint of
- # `event_failed_pull_attempts`
- self.store.db_pool.simple_insert_txn(
- txn,
- "rooms",
- {
- "room_id": room_id,
- "creator": "room_creator_user_id",
- "is_public": True,
- "room_version": "6",
- },
- )
-
- # Insert our server events
- stream_ordering = 0
- for event_id, depth in depth_map.items():
- self.store.db_pool.simple_insert_txn(
- txn,
- table="events",
- values={
- "event_id": event_id,
- "type": EventTypes.MSC2716_INSERTION
- if event_id.startswith("insertion_event")
- else "test_regular_type",
- "room_id": room_id,
- "depth": depth,
- "topological_ordering": depth,
- "stream_ordering": stream_ordering,
- "processed": True,
- "outlier": False,
- },
- )
-
- if event_id.startswith("insertion_event"):
- self.store.db_pool.simple_insert_txn(
- txn,
- table="insertion_event_extremities",
- values={
- "event_id": event_id,
- "room_id": room_id,
- },
- )
-
- stream_ordering += 1
-
- self.get_success(
- self.store.db_pool.runInteraction(
- "_setup_room_for_insertion_backfill_tests_populate_db",
- populate_db,
- )
- )
-
- return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
-
- def test_get_insertion_event_backward_extremities_in_room(self) -> None:
- """
- Test to make sure only insertion event backward extremities that are
- older and come before the `current_depth` are returned.
- """
- setup_info = self._setup_room_for_insertion_backfill_tests()
- room_id = setup_info.room_id
- depth_map = setup_info.depth_map
-
- # Try at "insertion_eventB"
- backfill_points = self.get_success(
- self.store.get_insertion_event_backward_extremities_in_room(
- room_id, depth_map["insertion_eventB"], limit=100
- )
- )
- backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
- self.assertEqual(backfill_event_ids, ["insertion_eventB", "insertion_eventA"])
-
- # Try at "insertion_eventA"
- backfill_points = self.get_success(
- self.store.get_insertion_event_backward_extremities_in_room(
- room_id, depth_map["insertion_eventA"], limit=100
- )
- )
- backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
- # Event "2" has a depth of 2 but is not included here because we only
- # know the approximate depth of 5 from our event "3".
- self.assertListEqual(backfill_event_ids, ["insertion_eventA"])
-
- def test_get_insertion_event_backward_extremities_in_room_excludes_events_we_have_attempted(
- self,
- ) -> None:
- """
- Test to make sure that insertion events we have attempted to backfill
- (and within backoff timeout duration) do not show up as an event to
- backfill again.
- """
- setup_info = self._setup_room_for_insertion_backfill_tests()
- room_id = setup_info.room_id
- depth_map = setup_info.depth_map
-
- # Record some attempts to backfill these events which will make
- # `get_insertion_event_backward_extremities_in_room` exclude them
- # because we haven't passed the backoff interval.
- self.get_success(
- self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventA", "fake cause"
- )
- )
-
- # No time has passed since we attempted to backfill ^
-
- # Try at "insertion_eventB"
- backfill_points = self.get_success(
- self.store.get_insertion_event_backward_extremities_in_room(
- room_id, depth_map["insertion_eventB"], limit=100
- )
- )
- backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
- # Only the backfill points that we didn't record earlier exist here.
- self.assertEqual(backfill_event_ids, ["insertion_eventB"])
-
- def test_get_insertion_event_backward_extremities_in_room_attempted_event_retry_after_backoff_duration(
- self,
- ) -> None:
- """
- Test to make sure after we fake attempt to backfill event
- "insertion_eventA" many times, we can see retry and see the
- "insertion_eventA" again after the backoff timeout duration has
- exceeded.
- """
- setup_info = self._setup_room_for_insertion_backfill_tests()
- room_id = setup_info.room_id
- depth_map = setup_info.depth_map
-
- # Record some attempts to backfill these events which will make
- # `get_backfill_points_in_room` exclude them because we
- # haven't passed the backoff interval.
- self.get_success(
- self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventB", "fake cause"
- )
- )
- self.get_success(
- self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventA", "fake cause"
- )
- )
- self.get_success(
- self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventA", "fake cause"
- )
- )
- self.get_success(
- self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventA", "fake cause"
- )
- )
- self.get_success(
- self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventA", "fake cause"
- )
- )
-
- # Now advance time by 2 hours and we should only be able to see
- # "insertion_eventB" because we have waited long enough for the single
- # attempt (2^1 hours) but we still shouldn't see "insertion_eventA"
- # because we haven't waited long enough for this many attempts.
- self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
-
- # Try at "insertion_eventA" and make sure that "insertion_eventA" is not
- # in the list because we've already attempted many times
- backfill_points = self.get_success(
- self.store.get_insertion_event_backward_extremities_in_room(
- room_id, depth_map["insertion_eventA"], limit=100
- )
- )
- backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
- self.assertEqual(backfill_event_ids, [])
-
- # Now advance time by 20 hours (above 2^4 because we made 4 attemps) and
- # see if we can now backfill it
- self.reactor.advance(datetime.timedelta(hours=20).total_seconds())
-
- # Try at "insertion_eventA" again after we advanced enough time and we
- # should see "insertion_eventA" again
- backfill_points = self.get_success(
- self.store.get_insertion_event_backward_extremities_in_room(
- room_id, depth_map["insertion_eventA"], limit=100
- )
- )
- backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
- self.assertEqual(backfill_event_ids, ["insertion_eventA"])
-
def test_get_event_ids_with_failed_pull_attempts(self) -> None:
"""
Test to make sure we properly get event_ids based on whether they have any
diff --git a/tests/test_state.py b/tests/test_state.py
index 7a49b87953..eded38c766 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -555,10 +555,15 @@ class StateTestCase(unittest.TestCase):
(e.event_id for e in old_state + [event]), current_state_ids.values()
)
- self.assertIsNotNone(context.state_group_before_event)
+ assert context.state_group_before_event is not None
+ assert context.state_group is not None
+ self.assertEqual(
+ context.state_group_deltas.get(
+ (context.state_group_before_event, context.state_group)
+ ),
+ {(event.type, event.state_key): event.event_id},
+ )
self.assertNotEqual(context.state_group_before_event, context.state_group)
- self.assertEqual(context.state_group_before_event, context.prev_group)
- self.assertEqual({("state", ""): event.event_id}, context.delta_ids)
@defer.inlineCallbacks
def test_trivial_annotate_message(
|