diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py
index 272cd35402..72bf5b3d31 100644
--- a/tests/storage/test_account_data.py
+++ b/tests/storage/test_account_data.py
@@ -47,9 +47,18 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
expected_ignorer_user_ids,
)
+ def assert_ignored(
+ self, ignorer_user_id: str, expected_ignored_user_ids: Set[str]
+ ) -> None:
+ self.assertEqual(
+ self.get_success(self.store.ignored_users(ignorer_user_id)),
+ expected_ignored_user_ids,
+ )
+
def test_ignoring_users(self):
"""Basic adding/removing of users from the ignore list."""
self._update_ignore_list("@other:test", "@another:remote")
+ self.assert_ignored(self.user, {"@other:test", "@another:remote"})
# Check a user which no one ignores.
self.assert_ignorers("@user:test", set())
@@ -62,6 +71,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
# Add one user, remove one user, and leave one user.
self._update_ignore_list("@foo:test", "@another:remote")
+ self.assert_ignored(self.user, {"@foo:test", "@another:remote"})
# Check the removed user.
self.assert_ignorers("@other:test", set())
@@ -76,20 +86,24 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
"""Ensure that caching works properly between different users."""
# The first user ignores a user.
self._update_ignore_list("@other:test")
+ self.assert_ignored(self.user, {"@other:test"})
self.assert_ignorers("@other:test", {self.user})
# The second user ignores them.
self._update_ignore_list("@other:test", ignorer_user_id="@second:test")
+ self.assert_ignored("@second:test", {"@other:test"})
self.assert_ignorers("@other:test", {self.user, "@second:test"})
# The first user un-ignores them.
self._update_ignore_list()
+ self.assert_ignored(self.user, set())
self.assert_ignorers("@other:test", {"@second:test"})
def test_invalid_data(self):
"""Invalid data ends up clearing out the ignored users list."""
# Add some data and ensure it is there.
self._update_ignore_list("@other:test")
+ self.assert_ignored(self.user, {"@other:test"})
self.assert_ignorers("@other:test", {self.user})
# No ignored_users key.
@@ -102,10 +116,12 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
)
# No one ignores the user now.
+ self.assert_ignored(self.user, set())
self.assert_ignorers("@other:test", set())
# Add some data and ensure it is there.
self._update_ignore_list("@other:test")
+ self.assert_ignored(self.user, {"@other:test"})
self.assert_ignorers("@other:test", {self.user})
# Invalid data.
@@ -118,4 +134,5 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
)
# No one ignores the user now.
+ self.assert_ignored(self.user, set())
self.assert_ignorers("@other:test", set())
diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py
index 39dcc094bd..fd619b64d4 100644
--- a/tests/storage/test_background_update.py
+++ b/tests/storage/test_background_update.py
@@ -14,16 +14,23 @@
from unittest.mock import Mock
+import yaml
+
from twisted.internet.defer import Deferred, ensureDeferred
+from twisted.test.proto_helpers import MemoryReactor
+from synapse.server import HomeServer
from synapse.storage.background_updates import BackgroundUpdater
+from synapse.types import JsonDict
+from synapse.util import Clock
from tests import unittest
from tests.test_utils import make_awaitable, simple_async_mock
+from tests.unittest import override_config
class BackgroundUpdateTestCase(unittest.HomeserverTestCase):
- def prepare(self, reactor, clock, homeserver):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates
# the base test class should have run the real bg updates for us
self.assertTrue(
@@ -34,50 +41,50 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase):
self.updates.register_background_update_handler(
"test_update", self.update_handler
)
+ self.store = self.hs.get_datastores().main
+
+ async def update(self, progress: JsonDict, count: int) -> int:
+ duration_ms = 10
+ await self.clock.sleep((count * duration_ms) / 1000)
+ progress = {"my_key": progress["my_key"] + 1}
+ await self.store.db_pool.runInteraction(
+ "update_progress",
+ self.updates._background_update_progress_txn,
+ "test_update",
+ progress,
+ )
+ return count
- def test_do_background_update(self):
+ def test_do_background_update(self) -> None:
# the time we claim it takes to update one item when running the update
duration_ms = 10
# the target runtime for each bg update
target_background_update_duration_ms = 100
- store = self.hs.get_datastores().main
self.get_success(
- store.db_pool.simple_insert(
+ self.store.db_pool.simple_insert(
"background_updates",
values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
)
)
- # first step: make a bit of progress
- async def update(progress, count):
- await self.clock.sleep((count * duration_ms) / 1000)
- progress = {"my_key": progress["my_key"] + 1}
- await store.db_pool.runInteraction(
- "update_progress",
- self.updates._background_update_progress_txn,
- "test_update",
- progress,
- )
- return count
-
- self.update_handler.side_effect = update
+ self.update_handler.side_effect = self.update
self.update_handler.reset_mock()
res = self.get_success(
self.updates.do_next_background_update(False),
- by=0.01,
+ by=0.02,
)
self.assertFalse(res)
# on the first call, we should get run with the default background update size
self.update_handler.assert_called_once_with(
- {"my_key": 1}, self.updates.MINIMUM_BACKGROUND_BATCH_SIZE
+ {"my_key": 1}, self.updates.default_background_batch_size
)
# second step: complete the update
# we should now get run with a much bigger number of items to update
- async def update(progress, count):
+ async def update(progress: JsonDict, count: int) -> int:
self.assertEqual(progress, {"my_key": 2})
self.assertAlmostEqual(
count,
@@ -99,16 +106,234 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase):
self.assertTrue(result)
self.assertFalse(self.update_handler.called)
+ @override_config(
+ yaml.safe_load(
+ """
+ background_updates:
+ default_batch_size: 20
+ """
+ )
+ )
+ def test_background_update_default_batch_set_by_config(self) -> None:
+ """
+ Test that the background update is run with the default_batch_size set by the config
+ """
+
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+ )
+ )
+
+ self.update_handler.side_effect = self.update
+ self.update_handler.reset_mock()
+ res = self.get_success(
+ self.updates.do_next_background_update(False),
+ by=0.01,
+ )
+ self.assertFalse(res)
+
+ # on the first call, we should get run with the default background update size specified in the config
+ self.update_handler.assert_called_once_with({"my_key": 1}, 20)
+
+ def test_background_update_default_sleep_behavior(self) -> None:
+ """
+ Test default background update behavior, which is to sleep
+ """
+
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+ )
+ )
+
+ self.update_handler.side_effect = self.update
+ self.update_handler.reset_mock()
+ self.updates.start_doing_background_updates()
+
+ # 2: advance the reactor less than the default sleep duration (1000ms)
+ self.reactor.pump([0.5])
+ # check that an update has not been run
+ self.update_handler.assert_not_called()
+
+ # advance reactor past default sleep duration
+ self.reactor.pump([1])
+ # check that update has been run
+ self.update_handler.assert_called()
+
+ @override_config(
+ yaml.safe_load(
+ """
+ background_updates:
+ sleep_duration_ms: 500
+ """
+ )
+ )
+ def test_background_update_sleep_set_in_config(self) -> None:
+ """
+ Test that changing the sleep time in the config changes how long it sleeps
+ """
+
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+ )
+ )
+
+ self.update_handler.side_effect = self.update
+ self.update_handler.reset_mock()
+ self.updates.start_doing_background_updates()
+
+ # 2: advance the reactor less than the configured sleep duration (500ms)
+ self.reactor.pump([0.45])
+ # check that an update has not been run
+ self.update_handler.assert_not_called()
+
+ # advance reactor past config sleep duration but less than default duration
+ self.reactor.pump([0.75])
+ # check that update has been run
+ self.update_handler.assert_called()
+
+ @override_config(
+ yaml.safe_load(
+ """
+ background_updates:
+ sleep_enabled: false
+ """
+ )
+ )
+ def test_disabling_background_update_sleep(self) -> None:
+ """
+ Test that disabling sleep in the config results in bg update not sleeping
+ """
+
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+ )
+ )
+
+ self.update_handler.side_effect = self.update
+ self.update_handler.reset_mock()
+ self.updates.start_doing_background_updates()
+
+ # 2: advance the reactor very little
+ self.reactor.pump([0.025])
+ # check that an update has run
+ self.update_handler.assert_called()
+
+ @override_config(
+ yaml.safe_load(
+ """
+ background_updates:
+ background_update_duration_ms: 500
+ """
+ )
+ )
+ def test_background_update_duration_set_in_config(self) -> None:
+ """
+ Test that the desired duration set in the config is used in determining batch size
+ """
+ # Duration of one background update item
+ duration_ms = 10
+
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+ )
+ )
+
+ self.update_handler.side_effect = self.update
+ self.update_handler.reset_mock()
+ res = self.get_success(
+ self.updates.do_next_background_update(False),
+ by=0.02,
+ )
+ self.assertFalse(res)
+
+ # the first update was run with the default batch size, this should be run with 500ms as the
+ # desired duration
+ async def update(progress: JsonDict, count: int) -> int:
+ self.assertEqual(progress, {"my_key": 2})
+ self.assertAlmostEqual(
+ count,
+ 500 / duration_ms,
+ places=0,
+ )
+ await self.updates._end_background_update("test_update")
+ return count
+
+ self.update_handler.side_effect = update
+ self.get_success(self.updates.do_next_background_update(False))
+
+ @override_config(
+ yaml.safe_load(
+ """
+ background_updates:
+ min_batch_size: 5
+ """
+ )
+ )
+ def test_background_update_min_batch_set_in_config(self) -> None:
+ """
+ Test that the minimum batch size set in the config is used
+ """
+ # a very long-running individual update
+ duration_ms = 50
+
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
+ )
+ )
+
+ # Run the update with the long-running update item
+ async def update_long(progress: JsonDict, count: int) -> int:
+ await self.clock.sleep((count * duration_ms) / 1000)
+ progress = {"my_key": progress["my_key"] + 1}
+ await self.store.db_pool.runInteraction(
+ "update_progress",
+ self.updates._background_update_progress_txn,
+ "test_update",
+ progress,
+ )
+ return count
+
+ self.update_handler.side_effect = update_long
+ self.update_handler.reset_mock()
+ res = self.get_success(
+ self.updates.do_next_background_update(False),
+ by=1,
+ )
+ self.assertFalse(res)
+
+ # the first update was run with the default batch size, this should be run with minimum batch size
+ # as the first items took a very long time
+ async def update_short(progress: JsonDict, count: int) -> int:
+ self.assertEqual(progress, {"my_key": 2})
+ self.assertEqual(count, 5)
+ await self.updates._end_background_update("test_update")
+ return count
+
+ self.update_handler.side_effect = update_short
+ self.get_success(self.updates.do_next_background_update(False))
+
class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase):
- def prepare(self, reactor, clock, homeserver):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates
# the base test class should have run the real bg updates for us
self.assertTrue(
self.get_success(self.updates.has_completed_background_updates())
)
- self.update_deferred = Deferred()
+ self.update_deferred: Deferred[int] = Deferred()
self.update_handler = Mock(return_value=self.update_deferred)
self.updates.register_background_update_handler(
"test_update", self.update_handler
@@ -137,7 +362,7 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase):
),
)
- def test_controller(self):
+ def test_controller(self) -> None:
store = self.hs.get_datastores().main
self.get_success(
store.db_pool.simple_insert(
@@ -147,7 +372,7 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase):
)
# Set the return value for the context manager.
- enter_defer = Deferred()
+ enter_defer: Deferred[int] = Deferred()
self._update_ctx_manager.__aenter__ = Mock(return_value=enter_defer)
# Start the background update.
diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py
index 6fbac0ab14..a40fc20ef9 100644
--- a/tests/storage/test_database.py
+++ b/tests/storage/test_database.py
@@ -12,25 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.storage.database import make_tuple_comparison_clause
-from synapse.storage.engines import BaseDatabaseEngine
+from typing import Callable, Tuple
+from unittest.mock import Mock, call
-from tests import unittest
+from twisted.internet import defer
+from twisted.internet.defer import CancelledError, Deferred
+from twisted.test.proto_helpers import MemoryReactor
+from synapse.server import HomeServer
+from synapse.storage.database import (
+ DatabasePool,
+ LoggingTransaction,
+ make_tuple_comparison_clause,
+)
+from synapse.util import Clock
-def _stub_db_engine(**kwargs) -> BaseDatabaseEngine:
- # returns a DatabaseEngine, circumventing the abc mechanism
- # any kwargs are set as attributes on the class before instantiating it
- t = type(
- "TestBaseDatabaseEngine",
- (BaseDatabaseEngine,),
- dict(BaseDatabaseEngine.__dict__),
- )
- # defeat the abc mechanism
- t.__abstractmethods__ = set()
- for k, v in kwargs.items():
- setattr(t, k, v)
- return t(None, None)
+from tests import unittest
class TupleComparisonClauseTestCase(unittest.TestCase):
@@ -38,3 +35,150 @@ class TupleComparisonClauseTestCase(unittest.TestCase):
clause, args = make_tuple_comparison_clause([("a", 1), ("b", 2)])
self.assertEqual(clause, "(a,b) > (?,?)")
self.assertEqual(args, [1, 2])
+
+
+class CallbacksTestCase(unittest.HomeserverTestCase):
+ """Tests for transaction callbacks."""
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.db_pool: DatabasePool = self.store.db_pool
+
+ def _run_interaction(
+ self, func: Callable[[LoggingTransaction], object]
+ ) -> Tuple[Mock, Mock]:
+ """Run the given function in a database transaction, with callbacks registered.
+
+ Args:
+ func: The function to be run in a transaction. The transaction will be
+ retried if `func` raises an `OperationalError`.
+
+ Returns:
+ Two mocks, which were registered as an `after_callback` and an
+ `exception_callback` respectively, on every transaction attempt.
+ """
+ after_callback = Mock()
+ exception_callback = Mock()
+
+ def _test_txn(txn: LoggingTransaction) -> None:
+ txn.call_after(after_callback, 123, 456, extra=789)
+ txn.call_on_exception(exception_callback, 987, 654, extra=321)
+ func(txn)
+
+ try:
+ self.get_success_or_raise(
+ self.db_pool.runInteraction("test_transaction", _test_txn)
+ )
+ except Exception:
+ pass
+
+ return after_callback, exception_callback
+
+ def test_after_callback(self) -> None:
+ """Test that the after callback is called when a transaction succeeds."""
+ after_callback, exception_callback = self._run_interaction(lambda txn: None)
+
+ after_callback.assert_called_once_with(123, 456, extra=789)
+ exception_callback.assert_not_called()
+
+ def test_exception_callback(self) -> None:
+ """Test that the exception callback is called when a transaction fails."""
+ _test_txn = Mock(side_effect=ZeroDivisionError)
+ after_callback, exception_callback = self._run_interaction(_test_txn)
+
+ after_callback.assert_not_called()
+ exception_callback.assert_called_once_with(987, 654, extra=321)
+
+ def test_failed_retry(self) -> None:
+ """Test that the exception callback is called for every failed attempt."""
+ # Always raise an `OperationalError`.
+ _test_txn = Mock(side_effect=self.db_pool.engine.module.OperationalError)
+ after_callback, exception_callback = self._run_interaction(_test_txn)
+
+ after_callback.assert_not_called()
+ exception_callback.assert_has_calls(
+ [
+ call(987, 654, extra=321),
+ call(987, 654, extra=321),
+ call(987, 654, extra=321),
+ call(987, 654, extra=321),
+ call(987, 654, extra=321),
+ call(987, 654, extra=321),
+ ]
+ )
+ self.assertEqual(exception_callback.call_count, 6) # no additional calls
+
+ def test_successful_retry(self) -> None:
+ """Test callbacks for a failed transaction followed by a successful attempt."""
+ # Raise an `OperationalError` on the first attempt only.
+ _test_txn = Mock(
+ side_effect=[self.db_pool.engine.module.OperationalError, None]
+ )
+ after_callback, exception_callback = self._run_interaction(_test_txn)
+
+ # Calling both `after_callback`s when the first attempt failed is rather
+ # surprising (#12184). Let's document the behaviour in a test.
+ after_callback.assert_has_calls(
+ [
+ call(123, 456, extra=789),
+ call(123, 456, extra=789),
+ ]
+ )
+ self.assertEqual(after_callback.call_count, 2) # no additional calls
+ exception_callback.assert_not_called()
+
+
+class CancellationTestCase(unittest.HomeserverTestCase):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.db_pool: DatabasePool = self.store.db_pool
+
+ def test_after_callback(self) -> None:
+ """Test that the after callback is called when a transaction succeeds."""
+ d: "Deferred[None]"
+ after_callback = Mock()
+ exception_callback = Mock()
+
+ def _test_txn(txn: LoggingTransaction) -> None:
+ txn.call_after(after_callback, 123, 456, extra=789)
+ txn.call_on_exception(exception_callback, 987, 654, extra=321)
+ d.cancel()
+
+ d = defer.ensureDeferred(
+ self.db_pool.runInteraction("test_transaction", _test_txn)
+ )
+ self.get_failure(d, CancelledError)
+
+ after_callback.assert_called_once_with(123, 456, extra=789)
+ exception_callback.assert_not_called()
+
+ def test_exception_callback(self) -> None:
+ """Test that the exception callback is called when a transaction fails."""
+ d: "Deferred[None]"
+ after_callback = Mock()
+ exception_callback = Mock()
+
+ def _test_txn(txn: LoggingTransaction) -> None:
+ txn.call_after(after_callback, 123, 456, extra=789)
+ txn.call_on_exception(exception_callback, 987, 654, extra=321)
+ d.cancel()
+ # Simulate a retryable failure on every attempt.
+ raise self.db_pool.engine.module.OperationalError()
+
+ d = defer.ensureDeferred(
+ self.db_pool.runInteraction("test_transaction", _test_txn)
+ )
+ self.get_failure(d, CancelledError)
+
+ after_callback.assert_not_called()
+ exception_callback.assert_has_calls(
+ [
+ call(987, 654, extra=321),
+ call(987, 654, extra=321),
+ call(987, 654, extra=321),
+ call(987, 654, extra=321),
+ call(987, 654, extra=321),
+ call(987, 654, extra=321),
+ ]
+ )
+ self.assertEqual(exception_callback.call_count, 6) # no additional calls
diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py
index 6ac4b93f98..395396340b 100644
--- a/tests/storage/test_id_generators.py
+++ b/tests/storage/test_id_generators.py
@@ -13,9 +13,13 @@
# limitations under the License.
from typing import List, Optional
-from synapse.storage.database import DatabasePool
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
+from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.engines import IncorrectDatabaseSetup
from synapse.storage.util.id_generators import MultiWriterIdGenerator
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
from tests.utils import USE_POSTGRES_FOR_TESTS
@@ -25,13 +29,13 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
if not USE_POSTGRES_FOR_TESTS:
skip = "Requires Postgres"
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.db_pool: DatabasePool = self.store.db_pool
self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
- def _setup_db(self, txn):
+ def _setup_db(self, txn: LoggingTransaction) -> None:
txn.execute("CREATE SEQUENCE foobar_seq")
txn.execute(
"""
@@ -59,12 +63,12 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
return self.get_success_or_raise(self.db_pool.runWithConnection(_create))
- def _insert_rows(self, instance_name: str, number: int):
+ def _insert_rows(self, instance_name: str, number: int) -> None:
"""Insert N rows as the given instance, inserting with stream IDs pulled
from the postgres sequence.
"""
- def _insert(txn):
+ def _insert(txn: LoggingTransaction) -> None:
for _ in range(number):
txn.execute(
"INSERT INTO foobar VALUES (nextval('foobar_seq'), ?)",
@@ -80,12 +84,12 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.get_success(self.db_pool.runInteraction("_insert_rows", _insert))
- def _insert_row_with_id(self, instance_name: str, stream_id: int):
+ def _insert_row_with_id(self, instance_name: str, stream_id: int) -> None:
"""Insert one row as the given instance with given stream_id, updating
the postgres sequence position to match.
"""
- def _insert(txn):
+ def _insert(txn: LoggingTransaction) -> None:
txn.execute(
"INSERT INTO foobar VALUES (?, ?)",
(
@@ -104,7 +108,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.get_success(self.db_pool.runInteraction("_insert_row_with_id", _insert))
- def test_empty(self):
+ def test_empty(self) -> None:
"""Test an ID generator against an empty database gives sensible
current positions.
"""
@@ -114,7 +118,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
# The table is empty so we expect an empty map for positions
self.assertEqual(id_gen.get_positions(), {})
- def test_single_instance(self):
+ def test_single_instance(self) -> None:
"""Test that reads and writes from a single process are handled
correctly.
"""
@@ -130,7 +134,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
# Try allocating a new ID gen and check that we only see position
# advanced after we leave the context manager.
- async def _get_next_async():
+ async def _get_next_async() -> None:
async with id_gen.get_next() as stream_id:
self.assertEqual(stream_id, 8)
@@ -142,7 +146,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.assertEqual(id_gen.get_positions(), {"master": 8})
self.assertEqual(id_gen.get_current_token_for_writer("master"), 8)
- def test_out_of_order_finish(self):
+ def test_out_of_order_finish(self) -> None:
"""Test that IDs persisted out of order are correctly handled"""
# Prefill table with 7 rows written by 'master'
@@ -191,7 +195,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.assertEqual(id_gen.get_positions(), {"master": 11})
self.assertEqual(id_gen.get_current_token_for_writer("master"), 11)
- def test_multi_instance(self):
+ def test_multi_instance(self) -> None:
"""Test that reads and writes from multiple processes are handled
correctly.
"""
@@ -215,7 +219,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
# Try allocating a new ID gen and check that we only see position
# advanced after we leave the context manager.
- async def _get_next_async():
+ async def _get_next_async() -> None:
async with first_id_gen.get_next() as stream_id:
self.assertEqual(stream_id, 8)
@@ -233,7 +237,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
# ... but calling `get_next` on the second instance should give a unique
# stream ID
- async def _get_next_async():
+ async def _get_next_async2() -> None:
async with second_id_gen.get_next() as stream_id:
self.assertEqual(stream_id, 9)
@@ -241,7 +245,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
second_id_gen.get_positions(), {"first": 3, "second": 7}
)
- self.get_success(_get_next_async())
+ self.get_success(_get_next_async2())
self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 9})
@@ -249,7 +253,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
second_id_gen.advance("first", 8)
self.assertEqual(second_id_gen.get_positions(), {"first": 8, "second": 9})
- def test_get_next_txn(self):
+ def test_get_next_txn(self) -> None:
"""Test that the `get_next_txn` function works correctly."""
# Prefill table with 7 rows written by 'master'
@@ -263,7 +267,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
# Try allocating a new ID gen and check that we only see position
# advanced after we leave the context manager.
- def _get_next_txn(txn):
+ def _get_next_txn(txn: LoggingTransaction) -> None:
stream_id = id_gen.get_next_txn(txn)
self.assertEqual(stream_id, 8)
@@ -275,7 +279,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.assertEqual(id_gen.get_positions(), {"master": 8})
self.assertEqual(id_gen.get_current_token_for_writer("master"), 8)
- def test_get_persisted_upto_position(self):
+ def test_get_persisted_upto_position(self) -> None:
"""Test that `get_persisted_upto_position` correctly tracks updates to
positions.
"""
@@ -317,7 +321,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
id_gen.advance("second", 15)
self.assertEqual(id_gen.get_persisted_upto_position(), 11)
- def test_get_persisted_upto_position_get_next(self):
+ def test_get_persisted_upto_position_get_next(self) -> None:
"""Test that `get_persisted_upto_position` correctly tracks updates to
positions when `get_next` is called.
"""
@@ -331,7 +335,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.assertEqual(id_gen.get_persisted_upto_position(), 5)
- async def _get_next_async():
+ async def _get_next_async() -> None:
async with id_gen.get_next() as stream_id:
self.assertEqual(stream_id, 6)
self.assertEqual(id_gen.get_persisted_upto_position(), 5)
@@ -344,7 +348,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
# `persisted_upto_position` in this case, then it will be correct in the
# other cases that are tested above (since they'll hit the same code).
- def test_restart_during_out_of_order_persistence(self):
+ def test_restart_during_out_of_order_persistence(self) -> None:
"""Test that restarting a process while another process is writing out
of order updates are handled correctly.
"""
@@ -388,7 +392,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
id_gen_worker.advance("master", 9)
self.assertEqual(id_gen_worker.get_positions(), {"master": 9})
- def test_writer_config_change(self):
+ def test_writer_config_change(self) -> None:
"""Test that changing the writer config correctly works."""
self._insert_row_with_id("first", 3)
@@ -421,7 +425,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
# Check that we get a sane next stream ID with this new config.
- async def _get_next_async():
+ async def _get_next_async() -> None:
async with id_gen_3.get_next() as stream_id:
self.assertEqual(stream_id, 6)
@@ -435,7 +439,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.assertEqual(id_gen_5.get_current_token_for_writer("first"), 6)
self.assertEqual(id_gen_5.get_current_token_for_writer("third"), 6)
- def test_sequence_consistency(self):
+ def test_sequence_consistency(self) -> None:
"""Test that we error out if the table and sequence diverges."""
# Prefill with some rows
@@ -458,13 +462,13 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
if not USE_POSTGRES_FOR_TESTS:
skip = "Requires Postgres"
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.db_pool: DatabasePool = self.store.db_pool
self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
- def _setup_db(self, txn):
+ def _setup_db(self, txn: LoggingTransaction) -> None:
txn.execute("CREATE SEQUENCE foobar_seq")
txn.execute(
"""
@@ -493,10 +497,10 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
return self.get_success(self.db_pool.runWithConnection(_create))
- def _insert_row(self, instance_name: str, stream_id: int):
+ def _insert_row(self, instance_name: str, stream_id: int) -> None:
"""Insert one row as the given instance with given stream_id."""
- def _insert(txn):
+ def _insert(txn: LoggingTransaction) -> None:
txn.execute(
"INSERT INTO foobar VALUES (?, ?)",
(
@@ -514,13 +518,13 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.get_success(self.db_pool.runInteraction("_insert_row", _insert))
- def test_single_instance(self):
+ def test_single_instance(self) -> None:
"""Test that reads and writes from a single process are handled
correctly.
"""
id_gen = self._create_id_generator()
- async def _get_next_async():
+ async def _get_next_async() -> None:
async with id_gen.get_next() as stream_id:
self._insert_row("master", stream_id)
@@ -530,7 +534,7 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.assertEqual(id_gen.get_current_token_for_writer("master"), -1)
self.assertEqual(id_gen.get_persisted_upto_position(), -1)
- async def _get_next_async2():
+ async def _get_next_async2() -> None:
async with id_gen.get_next_mult(3) as stream_ids:
for stream_id in stream_ids:
self._insert_row("master", stream_id)
@@ -548,14 +552,14 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.assertEqual(second_id_gen.get_current_token_for_writer("master"), -4)
self.assertEqual(second_id_gen.get_persisted_upto_position(), -4)
- def test_multiple_instance(self):
+ def test_multiple_instance(self) -> None:
"""Tests that having multiple instances that get advanced over
federation works corretly.
"""
id_gen_1 = self._create_id_generator("first", writers=["first", "second"])
id_gen_2 = self._create_id_generator("second", writers=["first", "second"])
- async def _get_next_async():
+ async def _get_next_async() -> None:
async with id_gen_1.get_next() as stream_id:
self._insert_row("first", stream_id)
id_gen_2.advance("first", stream_id)
@@ -567,7 +571,7 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.assertEqual(id_gen_1.get_persisted_upto_position(), -1)
self.assertEqual(id_gen_2.get_persisted_upto_position(), -1)
- async def _get_next_async2():
+ async def _get_next_async2() -> None:
async with id_gen_2.get_next() as stream_id:
self._insert_row("second", stream_id)
id_gen_1.advance("second", stream_id)
@@ -584,13 +588,13 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
if not USE_POSTGRES_FOR_TESTS:
skip = "Requires Postgres"
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.db_pool: DatabasePool = self.store.db_pool
self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
- def _setup_db(self, txn):
+ def _setup_db(self, txn: LoggingTransaction) -> None:
txn.execute("CREATE SEQUENCE foobar_seq")
txn.execute(
"""
@@ -642,7 +646,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
from the postgres sequence.
"""
- def _insert(txn):
+ def _insert(txn: LoggingTransaction) -> None:
for _ in range(number):
txn.execute(
"INSERT INTO %s VALUES (nextval('foobar_seq'), ?)" % (table,),
@@ -659,7 +663,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
self.get_success(self.db_pool.runInteraction("_insert_rows", _insert))
- def test_load_existing_stream(self):
+ def test_load_existing_stream(self) -> None:
"""Test creating ID gens with multiple tables that have rows from after
the position in `stream_positions` table.
"""
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index 6a1cf33054..eaa0d7d749 100644
--- a/tests/storage/test_stream.py
+++ b/tests/storage/test_stream.py
@@ -129,21 +129,19 @@ class PaginationTestCase(HomeserverTestCase):
def test_filter_relation_senders(self):
# Messages which second user reacted to.
- filter = {"io.element.relation_senders": [self.second_user_id]}
+ filter = {"related_by_senders": [self.second_user_id]}
chunk = self._filter_messages(filter)
self.assertEqual(len(chunk), 1, chunk)
self.assertEqual(chunk[0].event_id, self.event_id_1)
# Messages which third user reacted to.
- filter = {"io.element.relation_senders": [self.third_user_id]}
+ filter = {"related_by_senders": [self.third_user_id]}
chunk = self._filter_messages(filter)
self.assertEqual(len(chunk), 1, chunk)
self.assertEqual(chunk[0].event_id, self.event_id_2)
# Messages which either user reacted to.
- filter = {
- "io.element.relation_senders": [self.second_user_id, self.third_user_id]
- }
+ filter = {"related_by_senders": [self.second_user_id, self.third_user_id]}
chunk = self._filter_messages(filter)
self.assertEqual(len(chunk), 2, chunk)
self.assertCountEqual(
@@ -152,20 +150,20 @@ class PaginationTestCase(HomeserverTestCase):
def test_filter_relation_type(self):
# Messages which have annotations.
- filter = {"io.element.relation_types": [RelationTypes.ANNOTATION]}
+ filter = {"related_by_rel_types": [RelationTypes.ANNOTATION]}
chunk = self._filter_messages(filter)
self.assertEqual(len(chunk), 1, chunk)
self.assertEqual(chunk[0].event_id, self.event_id_1)
# Messages which have references.
- filter = {"io.element.relation_types": [RelationTypes.REFERENCE]}
+ filter = {"related_by_rel_types": [RelationTypes.REFERENCE]}
chunk = self._filter_messages(filter)
self.assertEqual(len(chunk), 1, chunk)
self.assertEqual(chunk[0].event_id, self.event_id_2)
# Messages which have either annotations or references.
filter = {
- "io.element.relation_types": [
+ "related_by_rel_types": [
RelationTypes.ANNOTATION,
RelationTypes.REFERENCE,
]
@@ -179,8 +177,8 @@ class PaginationTestCase(HomeserverTestCase):
def test_filter_relation_senders_and_type(self):
# Messages which second user reacted to.
filter = {
- "io.element.relation_senders": [self.second_user_id],
- "io.element.relation_types": [RelationTypes.ANNOTATION],
+ "related_by_senders": [self.second_user_id],
+ "related_by_rel_types": [RelationTypes.ANNOTATION],
}
chunk = self._filter_messages(filter)
self.assertEqual(len(chunk), 1, chunk)
@@ -201,7 +199,7 @@ class PaginationTestCase(HomeserverTestCase):
tok=self.second_tok,
)
- filter = {"io.element.relation_senders": [self.second_user_id]}
+ filter = {"related_by_senders": [self.second_user_id]}
chunk = self._filter_messages(filter)
self.assertEqual(len(chunk), 1, chunk)
self.assertEqual(chunk[0].event_id, self.event_id_1)
diff --git a/tests/storage/test_unsafe_locale.py b/tests/storage/test_unsafe_locale.py
new file mode 100644
index 0000000000..ba53c22818
--- /dev/null
+++ b/tests/storage/test_unsafe_locale.py
@@ -0,0 +1,46 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from unittest.mock import MagicMock, patch
+
+from synapse.storage.database import make_conn
+from synapse.storage.engines._base import IncorrectDatabaseSetup
+
+from tests.unittest import HomeserverTestCase
+from tests.utils import USE_POSTGRES_FOR_TESTS
+
+
+class UnsafeLocaleTest(HomeserverTestCase):
+ if not USE_POSTGRES_FOR_TESTS:
+ skip = "Requires Postgres"
+
+ @patch("synapse.storage.engines.postgres.PostgresEngine.get_db_locale")
+ def test_unsafe_locale(self, mock_db_locale: MagicMock) -> None:
+ mock_db_locale.return_value = ("B", "B")
+ database = self.hs.get_datastores().databases[0]
+
+ db_conn = make_conn(database._database_config, database.engine, "test_unsafe")
+ with self.assertRaises(IncorrectDatabaseSetup):
+ database.engine.check_database(db_conn)
+ with self.assertRaises(IncorrectDatabaseSetup):
+ database.engine.check_new_database(db_conn)
+ db_conn.close()
+
+ def test_safe_locale(self) -> None:
+ database = self.hs.get_datastores().databases[0]
+
+ db_conn = make_conn(database._database_config, database.engine, "test_unsafe")
+ with db_conn.cursor() as txn:
+ res = database.engine.get_db_locale(txn)
+ self.assertEqual(res, ("C", "C"))
+ db_conn.close()
|