summary refs log tree commit diff
path: root/tests/replication/test_sharded_event_persister.py
diff options
context:
space:
mode:
authorRichard van der Hoff <richard@matrix.org>2020-11-13 23:11:43 +0000
committerRichard van der Hoff <richard@matrix.org>2020-11-15 23:09:03 +0000
commitcfd895a22e7563b6b22141d14d9c8aebf177df44 (patch)
tree0a2259d1f2d45646b946c7f060ae6dfc97edef8d /tests/replication/test_sharded_event_persister.py
parentfix dict handling for make_request() (diff)
downloadsynapse-cfd895a22e7563b6b22141d14d9c8aebf177df44.tar.xz
use global make_request() directly where we have a custom Resource
Where we want to render a request against a specific Resource, call the global
make_request() function rather than the one in HomeserverTestCase, allowing us
to pass in an appropriate `Site`.
Diffstat (limited to 'tests/replication/test_sharded_event_persister.py')
-rw-r--r--tests/replication/test_sharded_event_persister.py42
1 files changed, 32 insertions, 10 deletions
diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py
index 82cf033d4e..2820dd622f 100644
--- a/tests/replication/test_sharded_event_persister.py
+++ b/tests/replication/test_sharded_event_persister.py
@@ -22,6 +22,7 @@ from synapse.rest.client.v1 import login, room
 from synapse.rest.client.v2_alpha import sync
 
 from tests.replication._base import BaseMultiWorkerStreamTestCase
+from tests.server import make_request
 from tests.utils import USE_POSTGRES_FOR_TESTS
 
 logger = logging.getLogger(__name__)
@@ -148,6 +149,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         sync_hs = self.make_worker_hs(
             "synapse.app.generic_worker", {"worker_name": "sync"},
         )
+        sync_hs_site = self._hs_to_site[sync_hs]
 
         # Specially selected room IDs that get persisted on different workers.
         room_id1 = "!foo:test"
@@ -178,7 +180,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         )
 
         # Do an initial sync so that we're up to date.
-        request, channel = self.make_request("GET", "/sync", access_token=access_token)
+        request, channel = make_request(
+            self.reactor, sync_hs_site, "GET", "/sync", access_token=access_token
+        )
         self.render_on_worker(sync_hs, request)
         next_batch = channel.json_body["next_batch"]
 
@@ -203,8 +207,12 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
 
         # Check that syncing still gets the new event, despite the gap in the
         # stream IDs.
-        request, channel = self.make_request(
-            "GET", "/sync?since={}".format(next_batch), access_token=access_token
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
+            "GET",
+            "/sync?since={}".format(next_batch),
+            access_token=access_token,
         )
         self.render_on_worker(sync_hs, request)
 
@@ -230,7 +238,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         response = self.helper.send(room_id2, body="Hi!", tok=self.other_access_token)
         first_event_in_room2 = response["event_id"]
 
-        request, channel = self.make_request(
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
             "GET",
             "/sync?since={}".format(vector_clock_token),
             access_token=access_token,
@@ -254,8 +264,12 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         self.helper.send(room_id1, body="Hi again!", tok=self.other_access_token)
         self.helper.send(room_id2, body="Hi again!", tok=self.other_access_token)
 
-        request, channel = self.make_request(
-            "GET", "/sync?since={}".format(next_batch), access_token=access_token
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
+            "GET",
+            "/sync?since={}".format(next_batch),
+            access_token=access_token,
         )
         self.render_on_worker(sync_hs, request)
 
@@ -269,7 +283,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         # Paginating back in the first room should not produce any results, as
         # no events have happened in it. This tests that we are correctly
         # filtering results based on the vector clock portion.
-        request, channel = self.make_request(
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
             "GET",
             "/rooms/{}/messages?from={}&to={}&dir=b".format(
                 room_id1, prev_batch1, vector_clock_token
@@ -281,7 +297,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
 
         # Paginating back on the second room should produce the first event
         # again. This tests that pagination isn't completely broken.
-        request, channel = self.make_request(
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
             "GET",
             "/rooms/{}/messages?from={}&to={}&dir=b".format(
                 room_id2, prev_batch2, vector_clock_token
@@ -295,7 +313,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         )
 
         # Paginating forwards should give the same results
-        request, channel = self.make_request(
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
             "GET",
             "/rooms/{}/messages?from={}&to={}&dir=f".format(
                 room_id1, vector_clock_token, prev_batch1
@@ -305,7 +325,9 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
         self.render_on_worker(sync_hs, request)
         self.assertListEqual([], channel.json_body["chunk"])
 
-        request, channel = self.make_request(
+        request, channel = make_request(
+            self.reactor,
+            sync_hs_site,
             "GET",
             "/rooms/{}/messages?from={}&to={}&dir=f".format(
                 room_id2, vector_clock_token, prev_batch2,