summary refs log tree commit diff
path: root/tests/replication/test_sharded_event_persister.py
diff options
context:
space:
mode:
authorRichard van der Hoff <1389908+richvdh@users.noreply.github.com>2020-11-16 18:22:24 +0000
committerGitHub <noreply@github.com>2020-11-16 18:22:24 +0000
commit3dc1871219f845954a4b7d31fc06739831d67d2e (patch)
treea54654d07bc289d36fea381102c26e6c63d3e0b5 /tests/replication/test_sharded_event_persister.py
parentMove `wait_until_result` into `FakeChannel` (#8758) (diff)
parentfixup test (diff)
downloadsynapse-3dc1871219f845954a4b7d31fc06739831d67d2e.tar.xz
Merge pull request #8757 from matrix-org/rav/pass_site_to_make_request
Pass a Site into `make_request`
Diffstat (limited to 'tests/replication/test_sharded_event_persister.py')
-rw-r--r--tests/replication/test_sharded_event_persister.py42
1 files changed, 32 insertions, 10 deletions
diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py
index 82cf033d4e..2820dd622f 100644
--- a/tests/replication/test_sharded_event_persister.py
+++ b/tests/replication/test_sharded_event_persister.py
@@ -22,6 +22,7 @@ from synapse.rest.client.v1 import login, room
 from synapse.rest.client.v2_alpha import sync
 
 from tests.replication._base import BaseMultiWorkerStreamTestCase
+from tests.server import make_request
 from tests.utils import USE_POSTGRES_FOR_TESTS
 
 logger = logging.getLogger(__name__)
@@ -148,6 +149,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         sync_hs = self.make_worker_hs(
             "synapse.app.generic_worker", {"worker_name": "sync"},
         )
+        sync_hs_site = self._hs_to_site[sync_hs]
 
         # Specially selected room IDs that get persisted on different workers.
         room_id1 = "!foo:test"
@@ -178,7 +180,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         )
 
         # Do an initial sync so that we're up to date.
-        request, channel = self.make_request("GET", "/sync", access_token=access_token)
+        request, channel = make_request(
+            self.reactor, sync_hs_site, "GET", "/sync", access_token=access_token
+        )
         self.render_on_worker(sync_hs, request)
         next_batch = channel.json_body["next_batch"]
 
@@ -203,8 +207,12 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
 
         # Check that syncing still gets the new event, despite the gap in the
         # stream IDs.
-        request, channel = self.make_request(
-            "GET", "/sync?since={}".format(next_batch), access_token=access_token
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
+            "GET",
+            "/sync?since={}".format(next_batch),
+            access_token=access_token,
         )
         self.render_on_worker(sync_hs, request)
 
@@ -230,7 +238,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         response = self.helper.send(room_id2, body="Hi!", tok=self.other_access_token)
         first_event_in_room2 = response["event_id"]
 
-        request, channel = self.make_request(
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
             "GET",
             "/sync?since={}".format(vector_clock_token),
             access_token=access_token,
@@ -254,8 +264,12 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         self.helper.send(room_id1, body="Hi again!", tok=self.other_access_token)
         self.helper.send(room_id2, body="Hi again!", tok=self.other_access_token)
 
-        request, channel = self.make_request(
-            "GET", "/sync?since={}".format(next_batch), access_token=access_token
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
+            "GET",
+            "/sync?since={}".format(next_batch),
+            access_token=access_token,
         )
         self.render_on_worker(sync_hs, request)
 
@@ -269,7 +283,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         # Paginating back in the first room should not produce any results, as
         # no events have happened in it. This tests that we are correctly
         # filtering results based on the vector clock portion.
-        request, channel = self.make_request(
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
             "GET",
             "/rooms/{}/messages?from={}&to={}&dir=b".format(
                 room_id1, prev_batch1, vector_clock_token
@@ -281,7 +297,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
 
         # Paginating back on the second room should produce the first event
         # again. This tests that pagination isn't completely broken.
-        request, channel = self.make_request(
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
             "GET",
             "/rooms/{}/messages?from={}&to={}&dir=b".format(
                 room_id2, prev_batch2, vector_clock_token
@@ -295,7 +313,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         )
 
         # Paginating forwards should give the same results
-        request, channel = self.make_request(
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
             "GET",
             "/rooms/{}/messages?from={}&to={}&dir=f".format(
                 room_id1, vector_clock_token, prev_batch1
@@ -305,7 +325,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         self.render_on_worker(sync_hs, request)
         self.assertListEqual([], channel.json_body["chunk"])
 
-        request, channel = self.make_request(
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
             "GET",
             "/rooms/{}/messages?from={}&to={}&dir=f".format(
                 room_id2, vector_clock_token, prev_batch2,