diff --git a/tests/handlers/test_worker_lock.py b/tests/handlers/test_worker_lock.py
new file mode 100644
index 0000000000..73e548726c
--- /dev/null
+++ b/tests/handlers/test_worker_lock.py
@@ -0,0 +1,74 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+from tests.replication._base import BaseMultiWorkerStreamTestCase
+
+
+class WorkerLockTestCase(unittest.HomeserverTestCase):
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
+ self.worker_lock_handler = self.hs.get_worker_locks_handler()
+
+ def test_wait_for_lock_locally(self) -> None:
+ """Test waiting for a lock on a single worker"""
+
+ lock1 = self.worker_lock_handler.acquire_lock("name", "key")
+ self.get_success(lock1.__aenter__())
+
+ lock2 = self.worker_lock_handler.acquire_lock("name", "key")
+ d2 = defer.ensureDeferred(lock2.__aenter__())
+ self.assertNoResult(d2)
+
+ self.get_success(lock1.__aexit__(None, None, None))
+
+ self.get_success(d2)
+ self.get_success(lock2.__aexit__(None, None, None))
+
+
+class WorkerLockWorkersTestCase(BaseMultiWorkerStreamTestCase):
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
+ self.main_worker_lock_handler = self.hs.get_worker_locks_handler()
+
+ def test_wait_for_lock_worker(self) -> None:
+ """Test waiting for a lock on another worker"""
+
+ worker = self.make_worker_hs(
+ "synapse.app.generic_worker",
+ extra_config={
+ "redis": {"enabled": True},
+ },
+ )
+ worker_lock_handler = worker.get_worker_locks_handler()
+
+ lock1 = self.main_worker_lock_handler.acquire_lock("name", "key")
+ self.get_success(lock1.__aenter__())
+
+ lock2 = worker_lock_handler.acquire_lock("name", "key")
+ d2 = defer.ensureDeferred(lock2.__aenter__())
+ self.assertNoResult(d2)
+
+ self.get_success(lock1.__aexit__(None, None, None))
+
+ self.get_success(d2)
+ self.get_success(lock2.__aexit__(None, None, None))
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index d013e75d55..4f6347be15 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -711,7 +711,7 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
- self.assertEqual(30, channel.resource_usage.db_txn_count)
+ self.assertEqual(32, channel.resource_usage.db_txn_count)
def test_post_room_initial_state(self) -> None:
# POST with initial_state config key, expect new room id
@@ -724,7 +724,7 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
- self.assertEqual(32, channel.resource_usage.db_txn_count)
+ self.assertEqual(34, channel.resource_usage.db_txn_count)
def test_post_room_visibility_key(self) -> None:
# POST with visibility config key, expect new room id
diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py
index ad454f6dd8..383da83dfb 100644
--- a/tests/storage/databases/main/test_lock.py
+++ b/tests/storage/databases/main/test_lock.py
@@ -448,3 +448,55 @@ class ReadWriteLockTestCase(unittest.HomeserverTestCase):
self.get_success(self.store._on_shutdown())
self.assertEqual(self.store._live_read_write_lock_tokens, {})
+
+ def test_acquire_multiple_locks(self) -> None:
+ """Tests that acquiring multiple locks at once works."""
+
+ # Take out multiple locks and ensure that we can't get those locks out
+ # again.
+ lock = self.get_success(
+ self.store.try_acquire_multi_read_write_lock(
+ [("name1", "key1"), ("name2", "key2")], write=True
+ )
+ )
+ self.assertIsNotNone(lock)
+
+ assert lock is not None
+ self.get_success(lock.__aenter__())
+
+ lock2 = self.get_success(
+ self.store.try_acquire_read_write_lock("name1", "key1", write=True)
+ )
+ self.assertIsNone(lock2)
+
+ lock3 = self.get_success(
+ self.store.try_acquire_read_write_lock("name2", "key2", write=False)
+ )
+ self.assertIsNone(lock3)
+
+ # Overlapping locks attempts will fail, and won't lock any locks.
+ lock4 = self.get_success(
+ self.store.try_acquire_multi_read_write_lock(
+ [("name1", "key1"), ("name3", "key3")], write=True
+ )
+ )
+ self.assertIsNone(lock4)
+
+ lock5 = self.get_success(
+ self.store.try_acquire_read_write_lock("name3", "key3", write=True)
+ )
+ self.assertIsNotNone(lock5)
+ assert lock5 is not None
+ self.get_success(lock5.__aenter__())
+ self.get_success(lock5.__aexit__(None, None, None))
+
+ # Once we release the lock we can take out the locks again.
+ self.get_success(lock.__aexit__(None, None, None))
+
+ lock6 = self.get_success(
+ self.store.try_acquire_read_write_lock("name1", "key1", write=True)
+ )
+ self.assertIsNotNone(lock6)
+ assert lock6 is not None
+ self.get_success(lock6.__aenter__())
+ self.get_success(lock6.__aexit__(None, None, None))
|