From 90b2327066d2343faa86c464a182b6f3c4422ecd Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 17:52:15 +0000 Subject: Add `delay_cancellation` utility function (#12180) `delay_cancellation` behaves like `stop_cancellation`, except it delays `CancelledError`s until the original `Deferred` resolves. This is handy for unifying cleanup paths and ensuring that uncancelled coroutines don't use finished logcontexts. Signed-off-by: Sean Quah --- changelog.d/12180.misc | 1 + synapse/util/async_helpers.py | 48 +++++++++++++-- tests/util/test_async_helpers.py | 124 +++++++++++++++++++++++++++++++++++++-- 3 files changed, 161 insertions(+), 12 deletions(-) create mode 100644 changelog.d/12180.misc diff --git a/changelog.d/12180.misc b/changelog.d/12180.misc new file mode 100644 index 0000000000..7a347352fd --- /dev/null +++ b/changelog.d/12180.misc @@ -0,0 +1 @@ +Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index a9f67dcbac..69c8c1baa9 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -686,12 +686,48 @@ def stop_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]": Synapse logcontext rules. Returns: - A new `Deferred`, which will contain the result of the original `Deferred`, - but will not propagate cancellation through to the original. When cancelled, - the new `Deferred` will fail with a `CancelledError` and will not follow the - Synapse logcontext rules. `make_deferred_yieldable` should be used to wrap - the new `Deferred`. + A new `Deferred`, which will contain the result of the original `Deferred`. + The new `Deferred` will not propagate cancellation through to the original. + When cancelled, the new `Deferred` will fail with a `CancelledError`. + + The new `Deferred` will not follow the Synapse logcontext rules and should be + wrapped with `make_deferred_yieldable`. + """ + new_deferred: "defer.Deferred[T]" = defer.Deferred() + deferred.chainDeferred(new_deferred) + return new_deferred + + +def delay_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]": + """Delay cancellation of a `Deferred` until it resolves. + + Has the same effect as `stop_cancellation`, but the returned `Deferred` will not + resolve with a `CancelledError` until the original `Deferred` resolves. + + Args: + deferred: The `Deferred` to protect against cancellation. May optionally follow + the Synapse logcontext rules. + + Returns: + A new `Deferred`, which will contain the result of the original `Deferred`. + The new `Deferred` will not propagate cancellation through to the original. + When cancelled, the new `Deferred` will wait until the original `Deferred` + resolves before failing with a `CancelledError`. + + The new `Deferred` will follow the Synapse logcontext rules if `deferred` + follows the Synapse logcontext rules. Otherwise the new `Deferred` should be + wrapped with `make_deferred_yieldable`. """ - new_deferred: defer.Deferred[T] = defer.Deferred() + + def handle_cancel(new_deferred: "defer.Deferred[T]") -> None: + # before the new deferred is cancelled, we `pause` it to stop the cancellation + # propagating. we then `unpause` it once the wrapped deferred completes, to + # propagate the exception. + new_deferred.pause() + new_deferred.errback(Failure(CancelledError())) + + deferred.addBoth(lambda _: new_deferred.unpause()) + + new_deferred: "defer.Deferred[T]" = defer.Deferred(handle_cancel) deferred.chainDeferred(new_deferred) return new_deferred diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index ff53ce114b..e5bc416de1 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -13,6 +13,8 @@ # limitations under the License. import traceback +from parameterized import parameterized_class + from twisted.internet import defer from twisted.internet.defer import CancelledError, Deferred, ensureDeferred from twisted.internet.task import Clock @@ -23,10 +25,12 @@ from synapse.logging.context import ( LoggingContext, PreserveLoggingContext, current_context, + make_deferred_yieldable, ) from synapse.util.async_helpers import ( ObservableDeferred, concurrently_execute, + delay_cancellation, stop_cancellation, timeout_deferred, ) @@ -313,13 +317,27 @@ class ConcurrentlyExecuteTest(TestCase): self.successResultOf(d2) -class StopCancellationTests(TestCase): - """Tests for the `stop_cancellation` function.""" +@parameterized_class( + ("wrapper",), + [("stop_cancellation",), ("delay_cancellation",)], +) +class CancellationWrapperTests(TestCase): + """Common tests for the `stop_cancellation` and `delay_cancellation` functions.""" + + wrapper: str + + def wrap_deferred(self, deferred: "Deferred[str]") -> "Deferred[str]": + if self.wrapper == "stop_cancellation": + return stop_cancellation(deferred) + elif self.wrapper == "delay_cancellation": + return delay_cancellation(deferred) + else: + raise ValueError(f"Unsupported wrapper type: {self.wrapper}") def test_succeed(self): """Test that the new `Deferred` receives the result.""" deferred: "Deferred[str]" = Deferred() - wrapper_deferred = stop_cancellation(deferred) + wrapper_deferred = self.wrap_deferred(deferred) # Success should propagate through. deferred.callback("success") @@ -329,7 +347,7 @@ class StopCancellationTests(TestCase): def test_failure(self): """Test that the new `Deferred` receives the `Failure`.""" deferred: "Deferred[str]" = Deferred() - wrapper_deferred = stop_cancellation(deferred) + wrapper_deferred = self.wrap_deferred(deferred) # Failure should propagate through. deferred.errback(ValueError("abc")) @@ -337,6 +355,10 @@ class StopCancellationTests(TestCase): self.failureResultOf(wrapper_deferred, ValueError) self.assertIsNone(deferred.result, "`Failure` was not consumed") + +class StopCancellationTests(TestCase): + """Tests for the `stop_cancellation` function.""" + def test_cancellation(self): """Test that cancellation of the new `Deferred` leaves the original running.""" deferred: "Deferred[str]" = Deferred() @@ -347,11 +369,101 @@ class StopCancellationTests(TestCase): self.assertTrue(wrapper_deferred.called) self.failureResultOf(wrapper_deferred, CancelledError) self.assertFalse( - deferred.called, "Original `Deferred` was unexpectedly cancelled." + deferred.called, "Original `Deferred` was unexpectedly cancelled" + ) + + # Now make the original `Deferred` fail. + # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed + # in logs. + deferred.errback(ValueError("abc")) + self.assertIsNone(deferred.result, "`Failure` was not consumed") + + +class DelayCancellationTests(TestCase): + """Tests for the `delay_cancellation` function.""" + + def test_cancellation(self): + """Test that cancellation of the new `Deferred` waits for the original.""" + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = delay_cancellation(deferred) + + # Cancel the new `Deferred`. + wrapper_deferred.cancel() + self.assertNoResult(wrapper_deferred) + self.assertFalse( + deferred.called, "Original `Deferred` was unexpectedly cancelled" + ) + + # Now make the original `Deferred` fail. + # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed + # in logs. + deferred.errback(ValueError("abc")) + self.assertIsNone(deferred.result, "`Failure` was not consumed") + + # Now that the original `Deferred` has failed, we should get a `CancelledError`. + self.failureResultOf(wrapper_deferred, CancelledError) + + def test_suppresses_second_cancellation(self): + """Test that a second cancellation is suppressed. + + Identical to `test_cancellation` except the new `Deferred` is cancelled twice. + """ + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = delay_cancellation(deferred) + + # Cancel the new `Deferred`, twice. + wrapper_deferred.cancel() + wrapper_deferred.cancel() + self.assertNoResult(wrapper_deferred) + self.assertFalse( + deferred.called, "Original `Deferred` was unexpectedly cancelled" ) - # Now make the inner `Deferred` fail. + # Now make the original `Deferred` fail. # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed # in logs. deferred.errback(ValueError("abc")) self.assertIsNone(deferred.result, "`Failure` was not consumed") + + # Now that the original `Deferred` has failed, we should get a `CancelledError`. + self.failureResultOf(wrapper_deferred, CancelledError) + + def test_propagates_cancelled_error(self): + """Test that a `CancelledError` from the original `Deferred` gets propagated.""" + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = delay_cancellation(deferred) + + # Fail the original `Deferred` with a `CancelledError`. + cancelled_error = CancelledError() + deferred.errback(cancelled_error) + + # The new `Deferred` should fail with exactly the same `CancelledError`. + self.assertTrue(wrapper_deferred.called) + self.assertIs(cancelled_error, self.failureResultOf(wrapper_deferred).value) + + def test_preserves_logcontext(self): + """Test that logging contexts are preserved.""" + blocking_d: "Deferred[None]" = Deferred() + + async def inner(): + await make_deferred_yieldable(blocking_d) + + async def outer(): + with LoggingContext("c") as c: + try: + await delay_cancellation(defer.ensureDeferred(inner())) + self.fail("`CancelledError` was not raised") + except CancelledError: + self.assertEqual(c, current_context()) + # Succeed with no error, unless the logging context is wrong. + + # Run and block inside `inner()`. + d = defer.ensureDeferred(outer()) + self.assertEqual(SENTINEL_CONTEXT, current_context()) + + d.cancel() + + # Now unblock. `outer()` will consume the `CancelledError` and check the + # logging context. + blocking_d.callback(None) + self.successResultOf(d) -- cgit 1.4.1 From 8e5706d14448c0fe8d1c55eaca38a672c701d7a9 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 17:52:58 +0000 Subject: Fix broken background updates when using sqlite with `enable_search` off (#12215) Signed-off-by: Sean Quah --- changelog.d/12215.bugfix | 1 + synapse/storage/databases/main/search.py | 13 +++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12215.bugfix diff --git a/changelog.d/12215.bugfix b/changelog.d/12215.bugfix new file mode 100644 index 0000000000..593b12556b --- /dev/null +++ b/changelog.d/12215.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index e23b119072..c5e9010c83 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -125,9 +125,6 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): ): super().__init__(database, db_conn, hs) - if not hs.config.server.enable_search: - return - self.db_pool.updates.register_background_update_handler( self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search ) @@ -243,9 +240,13 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): return len(event_search_rows) - result = await self.db_pool.runInteraction( - self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn - ) + if self.hs.config.server.enable_search: + result = await self.db_pool.runInteraction( + self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn + ) + else: + # Don't index anything if search is not enabled. + result = 0 if not result: await self.db_pool.updates._end_background_update( -- cgit 1.4.1 From 605d161d7d585847fd1bb98d14d5281daeac8e86 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 18:49:07 +0000 Subject: Add cancellation support to `ReadWriteLock` (#12120) Also convert `ReadWriteLock` to use async context managers. Signed-off-by: Sean Quah --- changelog.d/12120.misc | 1 + synapse/handlers/pagination.py | 8 +- synapse/util/async_helpers.py | 71 ++++---- tests/util/test_rwlock.py | 395 +++++++++++++++++++++++++++++++++++------ 4 files changed, 382 insertions(+), 93 deletions(-) create mode 100644 changelog.d/12120.misc diff --git a/changelog.d/12120.misc b/changelog.d/12120.misc new file mode 100644 index 0000000000..3603096500 --- /dev/null +++ b/changelog.d/12120.misc @@ -0,0 +1 @@ +Add support for cancellation to `ReadWriteLock`. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 183fabcfc0..60059fec3e 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -350,7 +350,7 @@ class PaginationHandler: """ self._purges_in_progress_by_room.add(room_id) try: - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): await self.storage.purge_events.purge_history( room_id, token, delete_local_events ) @@ -406,7 +406,7 @@ class PaginationHandler: room_id: room to be purged force: set true to skip checking for joined users. """ - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): # first check that we have no users in this room if not force: joined = await self.store.is_host_joined(room_id, self._server_name) @@ -448,7 +448,7 @@ class PaginationHandler: room_token = from_token.room_key - with await self.pagination_lock.read(room_id): + async with self.pagination_lock.read(room_id): ( membership, member_event_id, @@ -615,7 +615,7 @@ class PaginationHandler: self._purges_in_progress_by_room.add(room_id) try: - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN self._delete_by_id[ delete_id diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 69c8c1baa9..6a8e844d63 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -18,9 +18,10 @@ import collections import inspect import itertools import logging -from contextlib import contextmanager +from contextlib import asynccontextmanager, contextmanager from typing import ( Any, + AsyncIterator, Awaitable, Callable, Collection, @@ -40,7 +41,7 @@ from typing import ( ) import attr -from typing_extensions import ContextManager, Literal +from typing_extensions import AsyncContextManager, Literal from twisted.internet import defer from twisted.internet.defer import CancelledError @@ -491,7 +492,7 @@ class ReadWriteLock: Example: - with await read_write_lock.read("test_key"): + async with read_write_lock.read("test_key"): # do some work """ @@ -514,22 +515,24 @@ class ReadWriteLock: # Latest writer queued self.key_to_current_writer: Dict[str, defer.Deferred] = {} - async def read(self, key: str) -> ContextManager: - new_defer: "defer.Deferred[None]" = defer.Deferred() + def read(self, key: str) -> AsyncContextManager: + @asynccontextmanager + async def _ctx_manager() -> AsyncIterator[None]: + new_defer: "defer.Deferred[None]" = defer.Deferred() - curr_readers = self.key_to_current_readers.setdefault(key, set()) - curr_writer = self.key_to_current_writer.get(key, None) + curr_readers = self.key_to_current_readers.setdefault(key, set()) + curr_writer = self.key_to_current_writer.get(key, None) - curr_readers.add(new_defer) + curr_readers.add(new_defer) - # We wait for the latest writer to finish writing. We can safely ignore - # any existing readers... as they're readers. - if curr_writer: - await make_deferred_yieldable(curr_writer) - - @contextmanager - def _ctx_manager() -> Iterator[None]: try: + # We wait for the latest writer to finish writing. We can safely ignore + # any existing readers... as they're readers. + # May raise a `CancelledError` if the `Deferred` wrapping us is + # cancelled. The `Deferred` we are waiting on must not be cancelled, + # since we do not own it. + if curr_writer: + await make_deferred_yieldable(stop_cancellation(curr_writer)) yield finally: with PreserveLoggingContext(): @@ -538,29 +541,35 @@ class ReadWriteLock: return _ctx_manager() - async def write(self, key: str) -> ContextManager: - new_defer: "defer.Deferred[None]" = defer.Deferred() + def write(self, key: str) -> AsyncContextManager: + @asynccontextmanager + async def _ctx_manager() -> AsyncIterator[None]: + new_defer: "defer.Deferred[None]" = defer.Deferred() - curr_readers = self.key_to_current_readers.get(key, set()) - curr_writer = self.key_to_current_writer.get(key, None) + curr_readers = self.key_to_current_readers.get(key, set()) + curr_writer = self.key_to_current_writer.get(key, None) - # We wait on all latest readers and writer. - to_wait_on = list(curr_readers) - if curr_writer: - to_wait_on.append(curr_writer) + # We wait on all latest readers and writer. + to_wait_on = list(curr_readers) + if curr_writer: + to_wait_on.append(curr_writer) - # We can clear the list of current readers since the new writer waits - # for them to finish. - curr_readers.clear() - self.key_to_current_writer[key] = new_defer + # We can clear the list of current readers since `new_defer` waits + # for them to finish. + curr_readers.clear() + self.key_to_current_writer[key] = new_defer - await make_deferred_yieldable(defer.gatherResults(to_wait_on)) - - @contextmanager - def _ctx_manager() -> Iterator[None]: + to_wait_on_defer = defer.gatherResults(to_wait_on) try: + # Wait for all current readers and the latest writer to finish. + # May raise a `CancelledError` immediately after the wait if the + # `Deferred` wrapping us is cancelled. We must only release the lock + # once we have acquired it, hence the use of `delay_cancellation` + # rather than `stop_cancellation`. + await make_deferred_yieldable(delay_cancellation(to_wait_on_defer)) yield finally: + # Release the lock. with PreserveLoggingContext(): new_defer.callback(None) # `self.key_to_current_writer[key]` may be missing if there was another diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py index 0774625b85..0c84226197 100644 --- a/tests/util/test_rwlock.py +++ b/tests/util/test_rwlock.py @@ -12,8 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import AsyncContextManager, Callable, Sequence, Tuple + from twisted.internet import defer -from twisted.internet.defer import Deferred +from twisted.internet.defer import CancelledError, Deferred from synapse.util.async_helpers import ReadWriteLock @@ -21,87 +23,187 @@ from tests import unittest class ReadWriteLockTestCase(unittest.TestCase): - def _assert_called_before_not_after(self, lst, first_false): - for i, d in enumerate(lst[:first_false]): - self.assertTrue(d.called, msg="%d was unexpectedly false" % i) + def _start_reader_or_writer( + self, + read_or_write: Callable[[str], AsyncContextManager], + key: str, + return_value: str, + ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + """Starts a reader or writer which acquires the lock, blocks, then completes. + + Args: + read_or_write: A function returning a context manager for a lock. + Either a bound `ReadWriteLock.read` or `ReadWriteLock.write`. + key: The key to read or write. + return_value: A string that the reader or writer will resolve with when + done. + + Returns: + A tuple of three `Deferred`s: + * A `Deferred` that resolves with `return_value` once the reader or writer + completes successfully. + * A `Deferred` that resolves once the reader or writer acquires the lock. + * A `Deferred` that blocks the reader or writer. Must be resolved by the + caller to allow the reader or writer to release the lock and complete. + """ + acquired_d: "Deferred[None]" = Deferred() + unblock_d: "Deferred[None]" = Deferred() + + async def reader_or_writer(): + async with read_or_write(key): + acquired_d.callback(None) + await unblock_d + return return_value + + d = defer.ensureDeferred(reader_or_writer()) + return d, acquired_d, unblock_d + + def _start_blocking_reader( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + """Starts a reader which acquires the lock, blocks, then releases the lock. + + See the docstring for `_start_reader_or_writer` for details about the arguments + and return values. + """ + return self._start_reader_or_writer(rwlock.read, key, return_value) + + def _start_blocking_writer( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + """Starts a writer which acquires the lock, blocks, then releases the lock. + + See the docstring for `_start_reader_or_writer` for details about the arguments + and return values. + """ + return self._start_reader_or_writer(rwlock.write, key, return_value) + + def _start_nonblocking_reader( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]"]: + """Starts a reader which acquires the lock, then releases it immediately. + + See the docstring for `_start_reader_or_writer` for details about the arguments. + + Returns: + A tuple of two `Deferred`s: + * A `Deferred` that resolves with `return_value` once the reader completes + successfully. + * A `Deferred` that resolves once the reader acquires the lock. + """ + d, acquired_d, unblock_d = self._start_reader_or_writer( + rwlock.read, key, return_value + ) + unblock_d.callback(None) + return d, acquired_d + + def _start_nonblocking_writer( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]"]: + """Starts a writer which acquires the lock, then releases it immediately. + + See the docstring for `_start_reader_or_writer` for details about the arguments. + + Returns: + A tuple of two `Deferred`s: + * A `Deferred` that resolves with `return_value` once the writer completes + successfully. + * A `Deferred` that resolves once the writer acquires the lock. + """ + d, acquired_d, unblock_d = self._start_reader_or_writer( + rwlock.write, key, return_value + ) + unblock_d.callback(None) + return d, acquired_d + + def _assert_first_n_resolved( + self, deferreds: Sequence["defer.Deferred[None]"], n: int + ) -> None: + """Assert that exactly the first n `Deferred`s in the given list are resolved. - for i, d in enumerate(lst[first_false:]): + Args: + deferreds: The list of `Deferred`s to be checked. + n: The number of `Deferred`s at the start of `deferreds` that should be + resolved. + """ + for i, d in enumerate(deferreds[:n]): + self.assertTrue(d.called, msg="deferred %d was unexpectedly unresolved" % i) + + for i, d in enumerate(deferreds[n:]): self.assertFalse( - d.called, msg="%d was unexpectedly true" % (i + first_false) + d.called, msg="deferred %d was unexpectedly resolved" % (i + n) ) def test_rwlock(self): rwlock = ReadWriteLock() - - key = object() + key = "key" ds = [ - rwlock.read(key), # 0 - rwlock.read(key), # 1 - rwlock.write(key), # 2 - rwlock.write(key), # 3 - rwlock.read(key), # 4 - rwlock.read(key), # 5 - rwlock.write(key), # 6 + self._start_blocking_reader(rwlock, key, "0"), + self._start_blocking_reader(rwlock, key, "1"), + self._start_blocking_writer(rwlock, key, "2"), + self._start_blocking_writer(rwlock, key, "3"), + self._start_blocking_reader(rwlock, key, "4"), + self._start_blocking_reader(rwlock, key, "5"), + self._start_blocking_writer(rwlock, key, "6"), ] - ds = [defer.ensureDeferred(d) for d in ds] + # `Deferred`s that resolve when each reader or writer acquires the lock. + acquired_ds = [acquired_d for _, acquired_d, _ in ds] + # `Deferred`s that will trigger the release of locks when resolved. + release_ds = [release_d for _, _, release_d in ds] - self._assert_called_before_not_after(ds, 2) + # The first two readers should acquire their locks. + self._assert_first_n_resolved(acquired_ds, 2) - with ds[0].result: - self._assert_called_before_not_after(ds, 2) - self._assert_called_before_not_after(ds, 2) + # Release one of the read locks. The next writer should not acquire the lock, + # because there is another reader holding the lock. + self._assert_first_n_resolved(acquired_ds, 2) + release_ds[0].callback(None) + self._assert_first_n_resolved(acquired_ds, 2) - with ds[1].result: - self._assert_called_before_not_after(ds, 2) - self._assert_called_before_not_after(ds, 3) + # Release the other read lock. The next writer should acquire the lock. + self._assert_first_n_resolved(acquired_ds, 2) + release_ds[1].callback(None) + self._assert_first_n_resolved(acquired_ds, 3) - with ds[2].result: - self._assert_called_before_not_after(ds, 3) - self._assert_called_before_not_after(ds, 4) + # Release the write lock. The next writer should acquire the lock. + self._assert_first_n_resolved(acquired_ds, 3) + release_ds[2].callback(None) + self._assert_first_n_resolved(acquired_ds, 4) - with ds[3].result: - self._assert_called_before_not_after(ds, 4) - self._assert_called_before_not_after(ds, 6) + # Release the write lock. The next two readers should acquire locks. + self._assert_first_n_resolved(acquired_ds, 4) + release_ds[3].callback(None) + self._assert_first_n_resolved(acquired_ds, 6) - with ds[5].result: - self._assert_called_before_not_after(ds, 6) - self._assert_called_before_not_after(ds, 6) + # Release one of the read locks. The next writer should not acquire the lock, + # because there is another reader holding the lock. + self._assert_first_n_resolved(acquired_ds, 6) + release_ds[5].callback(None) + self._assert_first_n_resolved(acquired_ds, 6) - with ds[4].result: - self._assert_called_before_not_after(ds, 6) - self._assert_called_before_not_after(ds, 7) + # Release the other read lock. The next writer should acquire the lock. + self._assert_first_n_resolved(acquired_ds, 6) + release_ds[4].callback(None) + self._assert_first_n_resolved(acquired_ds, 7) - with ds[6].result: - pass + # Release the write lock. + release_ds[6].callback(None) - d = defer.ensureDeferred(rwlock.write(key)) - self.assertTrue(d.called) - with d.result: - pass + # Acquire and release the write and read locks one last time for good measure. + _, acquired_d = self._start_nonblocking_writer(rwlock, key, "last writer") + self.assertTrue(acquired_d.called) - d = defer.ensureDeferred(rwlock.read(key)) - self.assertTrue(d.called) - with d.result: - pass + _, acquired_d = self._start_nonblocking_reader(rwlock, key, "last reader") + self.assertTrue(acquired_d.called) def test_lock_handoff_to_nonblocking_writer(self): """Test a writer handing the lock to another writer that completes instantly.""" rwlock = ReadWriteLock() key = "key" - unblock: "Deferred[None]" = Deferred() - - async def blocking_write(): - with await rwlock.write(key): - await unblock - - async def nonblocking_write(): - with await rwlock.write(key): - pass - - d1 = defer.ensureDeferred(blocking_write()) - d2 = defer.ensureDeferred(nonblocking_write()) + d1, _, unblock = self._start_blocking_writer(rwlock, key, "write 1 completed") + d2, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed") self.assertFalse(d1.called) self.assertFalse(d2.called) @@ -111,5 +213,182 @@ class ReadWriteLockTestCase(unittest.TestCase): self.assertTrue(d2.called) # The `ReadWriteLock` should operate as normal. - d3 = defer.ensureDeferred(nonblocking_write()) + d3, _ = self._start_nonblocking_writer(rwlock, key, "write 3 completed") self.assertTrue(d3.called) + + def test_cancellation_while_holding_read_lock(self): + """Test cancellation while holding a read lock. + + A waiting writer should be given the lock when the reader holding the lock is + cancelled. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A reader takes the lock and blocks. + reader_d, _, _ = self._start_blocking_reader(rwlock, key, "read completed") + + # 2. A writer waits for the reader to complete. + writer_d, _ = self._start_nonblocking_writer(rwlock, key, "write completed") + self.assertFalse(writer_d.called) + + # 3. The reader is cancelled. + reader_d.cancel() + self.failureResultOf(reader_d, CancelledError) + + # 4. The writer should take the lock and complete. + self.assertTrue( + writer_d.called, "Writer is stuck waiting for a cancelled reader" + ) + self.assertEqual("write completed", self.successResultOf(writer_d)) + + def test_cancellation_while_holding_write_lock(self): + """Test cancellation while holding a write lock. + + A waiting reader should be given the lock when the writer holding the lock is + cancelled. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A writer takes the lock and blocks. + writer_d, _, _ = self._start_blocking_writer(rwlock, key, "write completed") + + # 2. A reader waits for the writer to complete. + reader_d, _ = self._start_nonblocking_reader(rwlock, key, "read completed") + self.assertFalse(reader_d.called) + + # 3. The writer is cancelled. + writer_d.cancel() + self.failureResultOf(writer_d, CancelledError) + + # 4. The reader should take the lock and complete. + self.assertTrue( + reader_d.called, "Reader is stuck waiting for a cancelled writer" + ) + self.assertEqual("read completed", self.successResultOf(reader_d)) + + def test_cancellation_while_waiting_for_read_lock(self): + """Test cancellation while waiting for a read lock. + + Tests that cancelling a waiting reader: + * does not cancel the writer it is waiting on + * does not cancel the next writer waiting on it + * does not allow the next writer to acquire the lock before an earlier writer + has finished + * does not keep the next writer waiting indefinitely + + These correspond to the asserts with explicit messages. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A writer takes the lock and blocks. + writer1_d, _, unblock_writer1 = self._start_blocking_writer( + rwlock, key, "write 1 completed" + ) + + # 2. A reader waits for the first writer to complete. + # This reader will be cancelled later. + reader_d, _ = self._start_nonblocking_reader(rwlock, key, "read completed") + self.assertFalse(reader_d.called) + + # 3. A second writer waits for both the first writer and the reader to complete. + writer2_d, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed") + self.assertFalse(writer2_d.called) + + # 4. The waiting reader is cancelled. + # Neither of the writers should be cancelled. + # The second writer should still be waiting, but only on the first writer. + reader_d.cancel() + self.failureResultOf(reader_d, CancelledError) + self.assertFalse(writer1_d.called, "First writer was unexpectedly cancelled") + self.assertFalse( + writer2_d.called, + "Second writer was unexpectedly cancelled or given the lock before the " + "first writer finished", + ) + + # 5. Unblock the first writer, which should complete. + unblock_writer1.callback(None) + self.assertEqual("write 1 completed", self.successResultOf(writer1_d)) + + # 6. The second writer should take the lock and complete. + self.assertTrue( + writer2_d.called, "Second writer is stuck waiting for a cancelled reader" + ) + self.assertEqual("write 2 completed", self.successResultOf(writer2_d)) + + def test_cancellation_while_waiting_for_write_lock(self): + """Test cancellation while waiting for a write lock. + + Tests that cancelling a waiting writer: + * does not cancel the reader or writer it is waiting on + * does not cancel the next writer waiting on it + * does not allow the next writer to acquire the lock before an earlier reader + and writer have finished + * does not keep the next writer waiting indefinitely + + These correspond to the asserts with explicit messages. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A reader takes the lock and blocks. + reader_d, _, unblock_reader = self._start_blocking_reader( + rwlock, key, "read completed" + ) + + # 2. A writer waits for the reader to complete. + writer1_d, _, unblock_writer1 = self._start_blocking_writer( + rwlock, key, "write 1 completed" + ) + + # 3. A second writer waits for both the reader and first writer to complete. + # This writer will be cancelled later. + writer2_d, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed") + self.assertFalse(writer2_d.called) + + # 4. A third writer waits for the second writer to complete. + writer3_d, _ = self._start_nonblocking_writer(rwlock, key, "write 3 completed") + self.assertFalse(writer3_d.called) + + # 5. The second writer is cancelled, but continues waiting for the lock. + # The reader, first writer and third writer should not be cancelled. + # The first writer should still be waiting on the reader. + # The third writer should still be waiting on the second writer. + writer2_d.cancel() + self.assertNoResult(writer2_d) + self.assertFalse(reader_d.called, "Reader was unexpectedly cancelled") + self.assertFalse(writer1_d.called, "First writer was unexpectedly cancelled") + self.assertFalse( + writer3_d.called, + "Third writer was unexpectedly cancelled or given the lock before the first " + "writer finished", + ) + + # 6. Unblock the reader, which should complete. + # The first writer should be given the lock and block. + # The third writer should still be waiting on the second writer. + unblock_reader.callback(None) + self.assertEqual("read completed", self.successResultOf(reader_d)) + self.assertNoResult(writer2_d) + self.assertFalse( + writer3_d.called, + "Third writer was unexpectedly given the lock before the first writer " + "finished", + ) + + # 7. Unblock the first writer, which should complete. + unblock_writer1.callback(None) + self.assertEqual("write 1 completed", self.successResultOf(writer1_d)) + + # 8. The second writer should take the lock and release it immediately, since it + # has been cancelled. + self.failureResultOf(writer2_d, CancelledError) + + # 9. The third writer should take the lock and complete. + self.assertTrue( + writer3_d.called, "Third writer is stuck waiting for a cancelled writer" + ) + self.assertEqual("write 3 completed", self.successResultOf(writer3_d)) -- cgit 1.4.1 From 2fcf4b3f6cd2a0be6597622664636d2219957c2a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 19:04:29 +0000 Subject: Add cancellation support to `@cached` and `@cachedList` decorators (#12183) These decorators mostly support cancellation already. Add cancellation tests and fix use of finished logging contexts by delaying cancellation, as suggested by @erikjohnston. Signed-off-by: Sean Quah --- changelog.d/12183.misc | 1 + synapse/util/caches/descriptors.py | 11 +++ tests/util/caches/test_descriptors.py | 147 +++++++++++++++++++++++++++++++++- 3 files changed, 157 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12183.misc diff --git a/changelog.d/12183.misc b/changelog.d/12183.misc new file mode 100644 index 0000000000..dd441bb64f --- /dev/null +++ b/changelog.d/12183.misc @@ -0,0 +1 @@ +Add cancellation support to `@cached` and `@cachedList` decorators. diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index c3c5c16db9..eda92d864d 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -41,6 +41,7 @@ from twisted.python.failure import Failure from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.util import unwrapFirstError +from synapse.util.async_helpers import delay_cancellation from synapse.util.caches.deferred_cache import DeferredCache from synapse.util.caches.lrucache import LruCache @@ -350,6 +351,11 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): ret = defer.maybeDeferred(preserve_fn(self.orig), obj, *args, **kwargs) ret = cache.set(cache_key, ret, callback=invalidate_callback) + # We started a new call to `self.orig`, so we must always wait for it to + # complete. Otherwise we might mark our current logging context as + # finished while `self.orig` is still using it in the background. + ret = delay_cancellation(ret) + return make_deferred_yieldable(ret) wrapped = cast(_CachedFunction, _wrapped) @@ -510,6 +516,11 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): d = defer.gatherResults(cached_defers, consumeErrors=True).addCallbacks( lambda _: results, unwrapFirstError ) + if missing: + # We started a new call to `self.orig`, so we must always wait for it to + # complete. Otherwise we might mark our current logging context as + # finished while `self.orig` is still using it in the background. + d = delay_cancellation(d) return make_deferred_yieldable(d) else: return defer.succeed(results) diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 6a4b17527a..48e616ac74 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -17,7 +17,7 @@ from typing import Set from unittest import mock from twisted.internet import defer, reactor -from twisted.internet.defer import Deferred +from twisted.internet.defer import CancelledError, Deferred from synapse.api.errors import SynapseError from synapse.logging.context import ( @@ -28,7 +28,7 @@ from synapse.logging.context import ( make_deferred_yieldable, ) from synapse.util.caches import descriptors -from synapse.util.caches.descriptors import cached, lru_cache +from synapse.util.caches.descriptors import cached, cachedList, lru_cache from tests import unittest from tests.test_utils import get_awaitable_result @@ -493,6 +493,74 @@ class DescriptorTestCase(unittest.TestCase): obj.invalidate() top_invalidate.assert_called_once() + def test_cancel(self): + """Test that cancelling a lookup does not cancel other lookups""" + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + @cached() + async def fn(self, arg1): + await complete_lookup + return str(arg1) + + obj = Cls() + + d1 = obj.fn(123) + d2 = obj.fn(123) + self.assertFalse(d1.called) + self.assertFalse(d2.called) + + # Cancel `d1`, which is the lookup that caused `fn` to run. + d1.cancel() + + # `d2` should complete normally. + complete_lookup.callback(None) + self.failureResultOf(d1, CancelledError) + self.assertEqual(d2.result, "123") + + def test_cancel_logcontexts(self): + """Test that cancellation does not break logcontexts. + + * The `CancelledError` must be raised with the correct logcontext. + * The inner lookup must not resume with a finished logcontext. + * The inner lookup must not restore a finished logcontext when done. + """ + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + inner_context_was_finished = False + + @cached() + async def fn(self, arg1): + await make_deferred_yieldable(complete_lookup) + self.inner_context_was_finished = current_context().finished + return str(arg1) + + obj = Cls() + + async def do_lookup(): + with LoggingContext("c1") as c1: + try: + await obj.fn(123) + self.fail("No CancelledError thrown") + except CancelledError: + self.assertEqual( + current_context(), + c1, + "CancelledError was not raised with the correct logcontext", + ) + # suppress the error and succeed + + d = defer.ensureDeferred(do_lookup()) + d.cancel() + + complete_lookup.callback(None) + self.successResultOf(d) + self.assertFalse( + obj.inner_context_was_finished, "Tried to restart a finished logcontext" + ) + self.assertEqual(current_context(), SENTINEL_CONTEXT) + class CacheDecoratorTestCase(unittest.HomeserverTestCase): """More tests for @cached @@ -865,3 +933,78 @@ class CachedListDescriptorTestCase(unittest.TestCase): obj.fn.invalidate((10, 2)) invalidate0.assert_called_once() invalidate1.assert_called_once() + + def test_cancel(self): + """Test that cancelling a lookup does not cancel other lookups""" + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + @cached() + def fn(self, arg1): + pass + + @cachedList(cached_method_name="fn", list_name="args") + async def list_fn(self, args): + await complete_lookup + return {arg: str(arg) for arg in args} + + obj = Cls() + + d1 = obj.list_fn([123, 456]) + d2 = obj.list_fn([123, 456, 789]) + self.assertFalse(d1.called) + self.assertFalse(d2.called) + + d1.cancel() + + # `d2` should complete normally. + complete_lookup.callback(None) + self.failureResultOf(d1, CancelledError) + self.assertEqual(d2.result, {123: "123", 456: "456", 789: "789"}) + + def test_cancel_logcontexts(self): + """Test that cancellation does not break logcontexts. + + * The `CancelledError` must be raised with the correct logcontext. + * The inner lookup must not resume with a finished logcontext. + * The inner lookup must not restore a finished logcontext when done. + """ + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + inner_context_was_finished = False + + @cached() + def fn(self, arg1): + pass + + @cachedList(cached_method_name="fn", list_name="args") + async def list_fn(self, args): + await make_deferred_yieldable(complete_lookup) + self.inner_context_was_finished = current_context().finished + return {arg: str(arg) for arg in args} + + obj = Cls() + + async def do_lookup(): + with LoggingContext("c1") as c1: + try: + await obj.list_fn([123]) + self.fail("No CancelledError thrown") + except CancelledError: + self.assertEqual( + current_context(), + c1, + "CancelledError was not raised with the correct logcontext", + ) + # suppress the error and succeed + + d = defer.ensureDeferred(do_lookup()) + d.cancel() + + complete_lookup.callback(None) + self.successResultOf(d) + self.assertFalse( + obj.inner_context_was_finished, "Tried to restart a finished logcontext" + ) + self.assertEqual(current_context(), SENTINEL_CONTEXT) -- cgit 1.4.1 From d1130a249b4f462a3e457b783b483d5a6c7486f0 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 15 Mar 2022 11:00:01 +0000 Subject: 1.55.0rc1 --- CHANGES.md | 82 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/11700.removal | 1 - changelog.d/11915.misc | 1 - changelog.d/11980.misc | 1 - changelog.d/11998.doc | 1 - changelog.d/12028.feature | 1 - changelog.d/12042.misc | 1 - changelog.d/12090.bugfix | 1 - changelog.d/12101.misc | 1 - changelog.d/12108.misc | 1 - changelog.d/12113.bugfix | 1 - changelog.d/12118.misc | 1 - changelog.d/12120.misc | 1 - changelog.d/12121.bugfix | 1 - changelog.d/12128.misc | 1 - changelog.d/12130.bugfix | 1 - changelog.d/12131.misc | 1 - changelog.d/12132.feature | 1 - changelog.d/12135.feature | 1 - changelog.d/12136.misc | 1 - changelog.d/12137.misc | 1 - changelog.d/12138.removal | 1 - changelog.d/12140.misc | 1 - changelog.d/12142.misc | 1 - changelog.d/12143.doc | 1 - changelog.d/12144.misc | 1 - changelog.d/12145.misc | 1 - changelog.d/12146.misc | 1 - changelog.d/12149.misc | 1 - changelog.d/12150.misc | 1 - changelog.d/12151.feature | 1 - changelog.d/12152.misc | 1 - changelog.d/12153.misc | 1 - changelog.d/12154.misc | 1 - changelog.d/12155.misc | 1 - changelog.d/12156.misc | 1 - changelog.d/12157.bugfix | 1 - changelog.d/12159.misc | 1 - changelog.d/12161.misc | 1 - changelog.d/12163.misc | 1 - changelog.d/12173.misc | 1 - changelog.d/12175.bugfix | 1 - changelog.d/12179.doc | 1 - changelog.d/12180.misc | 1 - changelog.d/12182.misc | 1 - changelog.d/12183.misc | 1 - changelog.d/12187.misc | 1 - changelog.d/12188.misc | 1 - changelog.d/12189.bugfix | 1 - changelog.d/12192.misc | 1 - changelog.d/12196.doc | 1 - changelog.d/12197.misc | 1 - changelog.d/12200.removal | 1 - changelog.d/12202.misc | 1 - changelog.d/12203.misc | 1 - changelog.d/12204.doc | 1 - changelog.d/12206.misc | 1 - changelog.d/12207.misc | 1 - changelog.d/12208.misc | 1 - changelog.d/12210.misc | 1 - changelog.d/12211.misc | 1 - changelog.d/12212.feature | 1 - changelog.d/12215.bugfix | 1 - debian/changelog | 6 ++++ synapse/__init__.py | 2 +- 65 files changed, 89 insertions(+), 63 deletions(-) delete mode 100644 changelog.d/11700.removal delete mode 100644 changelog.d/11915.misc delete mode 100644 changelog.d/11980.misc delete mode 100644 changelog.d/11998.doc delete mode 100644 changelog.d/12028.feature delete mode 100644 changelog.d/12042.misc delete mode 100644 changelog.d/12090.bugfix delete mode 100644 changelog.d/12101.misc delete mode 100644 changelog.d/12108.misc delete mode 100644 changelog.d/12113.bugfix delete mode 100644 changelog.d/12118.misc delete mode 100644 changelog.d/12120.misc delete mode 100644 changelog.d/12121.bugfix delete mode 100644 changelog.d/12128.misc delete mode 100644 changelog.d/12130.bugfix delete mode 100644 changelog.d/12131.misc delete mode 100644 changelog.d/12132.feature delete mode 100644 changelog.d/12135.feature delete mode 100644 changelog.d/12136.misc delete mode 100644 changelog.d/12137.misc delete mode 100644 changelog.d/12138.removal delete mode 100644 changelog.d/12140.misc delete mode 100644 changelog.d/12142.misc delete mode 100644 changelog.d/12143.doc delete mode 100644 changelog.d/12144.misc delete mode 100644 changelog.d/12145.misc delete mode 100644 changelog.d/12146.misc delete mode 100644 changelog.d/12149.misc delete mode 100644 changelog.d/12150.misc delete mode 100644 changelog.d/12151.feature delete mode 100644 changelog.d/12152.misc delete mode 100644 changelog.d/12153.misc delete mode 100644 changelog.d/12154.misc delete mode 100644 changelog.d/12155.misc delete mode 100644 changelog.d/12156.misc delete mode 100644 changelog.d/12157.bugfix delete mode 100644 changelog.d/12159.misc delete mode 100644 changelog.d/12161.misc delete mode 100644 changelog.d/12163.misc delete mode 100644 changelog.d/12173.misc delete mode 100644 changelog.d/12175.bugfix delete mode 100644 changelog.d/12179.doc delete mode 100644 changelog.d/12180.misc delete mode 100644 changelog.d/12182.misc delete mode 100644 changelog.d/12183.misc delete mode 100644 changelog.d/12187.misc delete mode 100644 changelog.d/12188.misc delete mode 100644 changelog.d/12189.bugfix delete mode 100644 changelog.d/12192.misc delete mode 100644 changelog.d/12196.doc delete mode 100644 changelog.d/12197.misc delete mode 100644 changelog.d/12200.removal delete mode 100644 changelog.d/12202.misc delete mode 100644 changelog.d/12203.misc delete mode 100644 changelog.d/12204.doc delete mode 100644 changelog.d/12206.misc delete mode 100644 changelog.d/12207.misc delete mode 100644 changelog.d/12208.misc delete mode 100644 changelog.d/12210.misc delete mode 100644 changelog.d/12211.misc delete mode 100644 changelog.d/12212.feature delete mode 100644 changelog.d/12215.bugfix diff --git a/CHANGES.md b/CHANGES.md index ef671e73f1..b0311f73bf 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,85 @@ +Synapse 1.55.0rc1 (2022-03-15) +============================== + +Features +-------- + +- Add third-party rules rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) +- Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132)) +- Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135)) +- Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151)) +- Add a new Jinja2 template filter to extract the local part of an email address. ([\#12212](https://github.com/matrix-org/synapse/issues/12212)) + + +Bugfixes +-------- + +- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090)) +- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189)) +- Fix a bug introduced in #4864 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) +- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) +- Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) + + +Improved Documentation +---------------------- + +- Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. ([\#11998](https://github.com/matrix-org/synapse/issues/11998)) +- Improve documentation for demo scripts. ([\#12143](https://github.com/matrix-org/synapse/issues/12143)) +- Updates to the Room DAG concepts development document. ([\#12179](https://github.com/matrix-org/synapse/issues/12179)) +- Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. ([\#12196](https://github.com/matrix-org/synapse/issues/12196)) +- Document that contributors can sign off privately by email. ([\#12204](https://github.com/matrix-org/synapse/issues/12204)) + + +Deprecations and Removals +------------------------- + +- Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700)) +- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) +- The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) + + +Internal Changes +---------------- + +- Simplify the `ApplicationService` class' set of public methods related to interest checking. ([\#11915](https://github.com/matrix-org/synapse/issues/11915)) +- Add config settings for background update parameters. ([\#11980](https://github.com/matrix-org/synapse/issues/11980)) +- Correct type hints for txredis. ([\#12042](https://github.com/matrix-org/synapse/issues/12042)) +- Limit the size of `aggregation_key` on annotations. ([\#12101](https://github.com/matrix-org/synapse/issues/12101)) +- Add type hints to tests files. ([\#12108](https://github.com/matrix-org/synapse/issues/12108), [\#12146](https://github.com/matrix-org/synapse/issues/12146), [\#12207](https://github.com/matrix-org/synapse/issues/12207), [\#12208](https://github.com/matrix-org/synapse/issues/12208)) +- Move scripts to Synapse package and expose as setuptools entry points. ([\#12118](https://github.com/matrix-org/synapse/issues/12118)) +- Add support for cancellation to `ReadWriteLock`. ([\#12120](https://github.com/matrix-org/synapse/issues/12120)) +- Fix data validation to compare to lists, not sequences. ([\#12128](https://github.com/matrix-org/synapse/issues/12128)) +- Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131)) +- Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136)) +- Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137)) +- Move `synctl` into `synapse._scripts` and expose as an entry point. ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) +- Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142)) +- Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144)) +- Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145)) +- Add test for `ObservableDeferred`'s cancellation behaviour. ([\#12149](https://github.com/matrix-org/synapse/issues/12149)) +- Use `ParamSpec` in type hints for `synapse.logging.context`. ([\#12150](https://github.com/matrix-org/synapse/issues/12150)) +- Prune unused jobs from `tox` config. ([\#12152](https://github.com/matrix-org/synapse/issues/12152)) +- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12153](https://github.com/matrix-org/synapse/issues/12153)) +- Avoid generating state groups for local out-of-band leaves. ([\#12154](https://github.com/matrix-org/synapse/issues/12154)) +- Avoid trying to calculate the state at outlier events. ([\#12155](https://github.com/matrix-org/synapse/issues/12155), [\#12173](https://github.com/matrix-org/synapse/issues/12173), [\#12202](https://github.com/matrix-org/synapse/issues/12202)) +- Fix some type annotations. ([\#12156](https://github.com/matrix-org/synapse/issues/12156)) +- Add type hints for `ObservableDeferred` attributes. ([\#12159](https://github.com/matrix-org/synapse/issues/12159)) +- Use a prebuilt Action for the `tests-done` CI job. ([\#12161](https://github.com/matrix-org/synapse/issues/12161)) +- Reduce number of DB queries made during processing of `/sync`. ([\#12163](https://github.com/matrix-org/synapse/issues/12163)) +- Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. ([\#12180](https://github.com/matrix-org/synapse/issues/12180)) +- Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. ([\#12182](https://github.com/matrix-org/synapse/issues/12182)) +- Add cancellation support to `@cached` and `@cachedList` decorators. ([\#12183](https://github.com/matrix-org/synapse/issues/12183)) +- Remove unused variables. ([\#12187](https://github.com/matrix-org/synapse/issues/12187)) +- Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. ([\#12188](https://github.com/matrix-org/synapse/issues/12188)) +- Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. ([\#12192](https://github.com/matrix-org/synapse/issues/12192)) +- Remove some dead code. ([\#12197](https://github.com/matrix-org/synapse/issues/12197)) +- Fix a misleading comment in the function `check_event_for_spam`. ([\#12203](https://github.com/matrix-org/synapse/issues/12203)) +- Remove unnecessary `pass` statements. ([\#12206](https://github.com/matrix-org/synapse/issues/12206)) +- Update the SSO username picker template to comply with SIWA guidelines. ([\#12210](https://github.com/matrix-org/synapse/issues/12210)) +- Improve code documentation for the typing stream over replication. ([\#12211](https://github.com/matrix-org/synapse/issues/12211)) + + Synapse 1.54.0 (2022-03-08) =========================== diff --git a/changelog.d/11700.removal b/changelog.d/11700.removal deleted file mode 100644 index d3d3c48f0f..0000000000 --- a/changelog.d/11700.removal +++ /dev/null @@ -1 +0,0 @@ -Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. diff --git a/changelog.d/11915.misc b/changelog.d/11915.misc deleted file mode 100644 index e3cef1511e..0000000000 --- a/changelog.d/11915.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify the `ApplicationService` class' set of public methods related to interest checking. \ No newline at end of file diff --git a/changelog.d/11980.misc b/changelog.d/11980.misc deleted file mode 100644 index 36e992e645..0000000000 --- a/changelog.d/11980.misc +++ /dev/null @@ -1 +0,0 @@ -Add config settings for background update parameters. \ No newline at end of file diff --git a/changelog.d/11998.doc b/changelog.d/11998.doc deleted file mode 100644 index 33ab7b7880..0000000000 --- a/changelog.d/11998.doc +++ /dev/null @@ -1 +0,0 @@ -Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. \ No newline at end of file diff --git a/changelog.d/12028.feature b/changelog.d/12028.feature deleted file mode 100644 index 5549c8f6fc..0000000000 --- a/changelog.d/12028.feature +++ /dev/null @@ -1 +0,0 @@ -Add third-party rules rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. diff --git a/changelog.d/12042.misc b/changelog.d/12042.misc deleted file mode 100644 index 6ecdc96021..0000000000 --- a/changelog.d/12042.misc +++ /dev/null @@ -1 +0,0 @@ -Correct type hints for txredis. diff --git a/changelog.d/12090.bugfix b/changelog.d/12090.bugfix deleted file mode 100644 index 087065dcb1..0000000000 --- a/changelog.d/12090.bugfix +++ /dev/null @@ -1 +0,0 @@ -Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. diff --git a/changelog.d/12101.misc b/changelog.d/12101.misc deleted file mode 100644 index d165f73d13..0000000000 --- a/changelog.d/12101.misc +++ /dev/null @@ -1 +0,0 @@ -Limit the size of `aggregation_key` on annotations. diff --git a/changelog.d/12108.misc b/changelog.d/12108.misc deleted file mode 100644 index b67a701dbb..0000000000 --- a/changelog.d/12108.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. diff --git a/changelog.d/12113.bugfix b/changelog.d/12113.bugfix deleted file mode 100644 index df9b0dc413..0000000000 --- a/changelog.d/12113.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12118.misc b/changelog.d/12118.misc deleted file mode 100644 index a2c397d907..0000000000 --- a/changelog.d/12118.misc +++ /dev/null @@ -1 +0,0 @@ -Move scripts to Synapse package and expose as setuptools entry points. diff --git a/changelog.d/12120.misc b/changelog.d/12120.misc deleted file mode 100644 index 3603096500..0000000000 --- a/changelog.d/12120.misc +++ /dev/null @@ -1 +0,0 @@ -Add support for cancellation to `ReadWriteLock`. diff --git a/changelog.d/12121.bugfix b/changelog.d/12121.bugfix deleted file mode 100644 index df9b0dc413..0000000000 --- a/changelog.d/12121.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12128.misc b/changelog.d/12128.misc deleted file mode 100644 index 0570a8e327..0000000000 --- a/changelog.d/12128.misc +++ /dev/null @@ -1 +0,0 @@ -Fix data validation to compare to lists, not sequences. diff --git a/changelog.d/12130.bugfix b/changelog.d/12130.bugfix deleted file mode 100644 index df9b0dc413..0000000000 --- a/changelog.d/12130.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12131.misc b/changelog.d/12131.misc deleted file mode 100644 index 8ef23c22d5..0000000000 --- a/changelog.d/12131.misc +++ /dev/null @@ -1 +0,0 @@ -Fix CI not attaching source distributions and wheels to the GitHub releases. \ No newline at end of file diff --git a/changelog.d/12132.feature b/changelog.d/12132.feature deleted file mode 100644 index 3b8362ad35..0000000000 --- a/changelog.d/12132.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of logging in for large accounts. diff --git a/changelog.d/12135.feature b/changelog.d/12135.feature deleted file mode 100644 index b337f51730..0000000000 --- a/changelog.d/12135.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. diff --git a/changelog.d/12136.misc b/changelog.d/12136.misc deleted file mode 100644 index 98b1c1c9d8..0000000000 --- a/changelog.d/12136.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused mocks from `test_typing`. \ No newline at end of file diff --git a/changelog.d/12137.misc b/changelog.d/12137.misc deleted file mode 100644 index 118ff77a91..0000000000 --- a/changelog.d/12137.misc +++ /dev/null @@ -1 +0,0 @@ -Give `scripts-dev` scripts suffixes for neater CI config. \ No newline at end of file diff --git a/changelog.d/12138.removal b/changelog.d/12138.removal deleted file mode 100644 index 6ed84d476c..0000000000 --- a/changelog.d/12138.removal +++ /dev/null @@ -1 +0,0 @@ -Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. diff --git a/changelog.d/12140.misc b/changelog.d/12140.misc deleted file mode 100644 index 33a21a29f0..0000000000 --- a/changelog.d/12140.misc +++ /dev/null @@ -1 +0,0 @@ -Move `synctl` into `synapse._scripts` and expose as an entry point. \ No newline at end of file diff --git a/changelog.d/12142.misc b/changelog.d/12142.misc deleted file mode 100644 index 5d09f90b52..0000000000 --- a/changelog.d/12142.misc +++ /dev/null @@ -1 +0,0 @@ -Move the snapcraft configuration file to `contrib`. \ No newline at end of file diff --git a/changelog.d/12143.doc b/changelog.d/12143.doc deleted file mode 100644 index 4b9db74b1f..0000000000 --- a/changelog.d/12143.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation for demo scripts. diff --git a/changelog.d/12144.misc b/changelog.d/12144.misc deleted file mode 100644 index d8f71bb203..0000000000 --- a/changelog.d/12144.misc +++ /dev/null @@ -1 +0,0 @@ -Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. diff --git a/changelog.d/12145.misc b/changelog.d/12145.misc deleted file mode 100644 index 4092a2d66e..0000000000 --- a/changelog.d/12145.misc +++ /dev/null @@ -1 +0,0 @@ -Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. diff --git a/changelog.d/12146.misc b/changelog.d/12146.misc deleted file mode 100644 index b67a701dbb..0000000000 --- a/changelog.d/12146.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. diff --git a/changelog.d/12149.misc b/changelog.d/12149.misc deleted file mode 100644 index d39af96723..0000000000 --- a/changelog.d/12149.misc +++ /dev/null @@ -1 +0,0 @@ -Add test for `ObservableDeferred`'s cancellation behaviour. diff --git a/changelog.d/12150.misc b/changelog.d/12150.misc deleted file mode 100644 index 2d2706dac7..0000000000 --- a/changelog.d/12150.misc +++ /dev/null @@ -1 +0,0 @@ -Use `ParamSpec` in type hints for `synapse.logging.context`. diff --git a/changelog.d/12151.feature b/changelog.d/12151.feature deleted file mode 100644 index 18432b2da9..0000000000 --- a/changelog.d/12151.feature +++ /dev/null @@ -1 +0,0 @@ -Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. diff --git a/changelog.d/12152.misc b/changelog.d/12152.misc deleted file mode 100644 index b9877eaccb..0000000000 --- a/changelog.d/12152.misc +++ /dev/null @@ -1 +0,0 @@ -Prune unused jobs from `tox` config. \ No newline at end of file diff --git a/changelog.d/12153.misc b/changelog.d/12153.misc deleted file mode 100644 index f02d140f38..0000000000 --- a/changelog.d/12153.misc +++ /dev/null @@ -1 +0,0 @@ -Move CI checks out of tox, to facilitate a move to using poetry. \ No newline at end of file diff --git a/changelog.d/12154.misc b/changelog.d/12154.misc deleted file mode 100644 index 18d2a4728b..0000000000 --- a/changelog.d/12154.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid generating state groups for local out-of-band leaves. diff --git a/changelog.d/12155.misc b/changelog.d/12155.misc deleted file mode 100644 index 9f333e718a..0000000000 --- a/changelog.d/12155.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid trying to calculate the state at outlier events. diff --git a/changelog.d/12156.misc b/changelog.d/12156.misc deleted file mode 100644 index 4818d988d7..0000000000 --- a/changelog.d/12156.misc +++ /dev/null @@ -1 +0,0 @@ -Fix some type annotations. diff --git a/changelog.d/12157.bugfix b/changelog.d/12157.bugfix deleted file mode 100644 index c3d2e700bb..0000000000 --- a/changelog.d/12157.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in #4864 whereby background updates are never run with the default background batch size. diff --git a/changelog.d/12159.misc b/changelog.d/12159.misc deleted file mode 100644 index 30500f2fd9..0000000000 --- a/changelog.d/12159.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints for `ObservableDeferred` attributes. diff --git a/changelog.d/12161.misc b/changelog.d/12161.misc deleted file mode 100644 index 43eff08d46..0000000000 --- a/changelog.d/12161.misc +++ /dev/null @@ -1 +0,0 @@ -Use a prebuilt Action for the `tests-done` CI job. diff --git a/changelog.d/12163.misc b/changelog.d/12163.misc deleted file mode 100644 index 13de0895f5..0000000000 --- a/changelog.d/12163.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce number of DB queries made during processing of `/sync`. diff --git a/changelog.d/12173.misc b/changelog.d/12173.misc deleted file mode 100644 index 9f333e718a..0000000000 --- a/changelog.d/12173.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid trying to calculate the state at outlier events. diff --git a/changelog.d/12175.bugfix b/changelog.d/12175.bugfix deleted file mode 100644 index 881cb9b76c..0000000000 --- a/changelog.d/12175.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. diff --git a/changelog.d/12179.doc b/changelog.d/12179.doc deleted file mode 100644 index 55d8caa45a..0000000000 --- a/changelog.d/12179.doc +++ /dev/null @@ -1 +0,0 @@ -Updates to the Room DAG concepts development document. diff --git a/changelog.d/12180.misc b/changelog.d/12180.misc deleted file mode 100644 index 7a347352fd..0000000000 --- a/changelog.d/12180.misc +++ /dev/null @@ -1 +0,0 @@ -Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. diff --git a/changelog.d/12182.misc b/changelog.d/12182.misc deleted file mode 100644 index 7e9ad2c752..0000000000 --- a/changelog.d/12182.misc +++ /dev/null @@ -1 +0,0 @@ -Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. diff --git a/changelog.d/12183.misc b/changelog.d/12183.misc deleted file mode 100644 index dd441bb64f..0000000000 --- a/changelog.d/12183.misc +++ /dev/null @@ -1 +0,0 @@ -Add cancellation support to `@cached` and `@cachedList` decorators. diff --git a/changelog.d/12187.misc b/changelog.d/12187.misc deleted file mode 100644 index c53e68faa5..0000000000 --- a/changelog.d/12187.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused variables. diff --git a/changelog.d/12188.misc b/changelog.d/12188.misc deleted file mode 100644 index 403158481c..0000000000 --- a/changelog.d/12188.misc +++ /dev/null @@ -1 +0,0 @@ -Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. diff --git a/changelog.d/12189.bugfix b/changelog.d/12189.bugfix deleted file mode 100644 index df9b0dc413..0000000000 --- a/changelog.d/12189.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12192.misc b/changelog.d/12192.misc deleted file mode 100644 index bdfe8dad98..0000000000 --- a/changelog.d/12192.misc +++ /dev/null @@ -1 +0,0 @@ -Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. diff --git a/changelog.d/12196.doc b/changelog.d/12196.doc deleted file mode 100644 index 269f06aa33..0000000000 --- a/changelog.d/12196.doc +++ /dev/null @@ -1 +0,0 @@ -Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. \ No newline at end of file diff --git a/changelog.d/12197.misc b/changelog.d/12197.misc deleted file mode 100644 index 7d0e9b6bbf..0000000000 --- a/changelog.d/12197.misc +++ /dev/null @@ -1 +0,0 @@ -Remove some dead code. diff --git a/changelog.d/12200.removal b/changelog.d/12200.removal deleted file mode 100644 index 312c7ae325..0000000000 --- a/changelog.d/12200.removal +++ /dev/null @@ -1 +0,0 @@ -The groups/communities feature in Synapse has been deprecated. diff --git a/changelog.d/12202.misc b/changelog.d/12202.misc deleted file mode 100644 index 9f333e718a..0000000000 --- a/changelog.d/12202.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid trying to calculate the state at outlier events. diff --git a/changelog.d/12203.misc b/changelog.d/12203.misc deleted file mode 100644 index 892dc5bfb7..0000000000 --- a/changelog.d/12203.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a misleading comment in the function `check_event_for_spam`. diff --git a/changelog.d/12204.doc b/changelog.d/12204.doc deleted file mode 100644 index c4b2805bb1..0000000000 --- a/changelog.d/12204.doc +++ /dev/null @@ -1 +0,0 @@ -Document that contributors can sign off privately by email. diff --git a/changelog.d/12206.misc b/changelog.d/12206.misc deleted file mode 100644 index df59bb56cd..0000000000 --- a/changelog.d/12206.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary `pass` statements. diff --git a/changelog.d/12207.misc b/changelog.d/12207.misc deleted file mode 100644 index b67a701dbb..0000000000 --- a/changelog.d/12207.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. diff --git a/changelog.d/12208.misc b/changelog.d/12208.misc deleted file mode 100644 index c5b6356799..0000000000 --- a/changelog.d/12208.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. \ No newline at end of file diff --git a/changelog.d/12210.misc b/changelog.d/12210.misc deleted file mode 100644 index 3f6a8747c2..0000000000 --- a/changelog.d/12210.misc +++ /dev/null @@ -1 +0,0 @@ -Update the SSO username picker template to comply with SIWA guidelines. diff --git a/changelog.d/12211.misc b/changelog.d/12211.misc deleted file mode 100644 index d11634a1ee..0000000000 --- a/changelog.d/12211.misc +++ /dev/null @@ -1 +0,0 @@ -Improve code documentation for the typing stream over replication. \ No newline at end of file diff --git a/changelog.d/12212.feature b/changelog.d/12212.feature deleted file mode 100644 index fe337ff990..0000000000 --- a/changelog.d/12212.feature +++ /dev/null @@ -1 +0,0 @@ -Add a new Jinja2 template filter to extract the local part of an email address. diff --git a/changelog.d/12215.bugfix b/changelog.d/12215.bugfix deleted file mode 100644 index 593b12556b..0000000000 --- a/changelog.d/12215.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. diff --git a/debian/changelog b/debian/changelog index 02136a0d60..09ef24ebb0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.55.0~rc1) stable; urgency=medium + + * New synapse release 1.55.0~rc1. + + -- Synapse Packaging team Tue, 15 Mar 2022 10:59:31 +0000 + matrix-synapse-py3 (1.54.0) stable; urgency=medium * New synapse release 1.54.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 4b00565976..870707f476 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -68,7 +68,7 @@ try: except ImportError: pass -__version__ = "1.54.0" +__version__ = "1.55.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when -- cgit 1.4.1 From 9e90d643e6947c2fa4286c21a6351061cf510026 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 15 Mar 2022 11:16:36 +0000 Subject: Changelog tweaks --- CHANGES.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index b0311f73bf..60e7ecb1b9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,12 @@ Synapse 1.55.0rc1 (2022-03-15) ============================== +This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version. + Features -------- -- Add third-party rules rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) +- Add third-party rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) - Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132)) - Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135)) - Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151)) @@ -16,9 +18,9 @@ Bugfixes - Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090)) - Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189)) -- Fix a bug introduced in #4864 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) +- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) - Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) -- Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) +- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) Improved Documentation @@ -34,7 +36,7 @@ Improved Documentation Deprecations and Removals ------------------------- -- Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700)) +- **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))** - Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) - The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) -- cgit 1.4.1 From 5dd949bee6158a8b651db9f2ae417a62c8184bfd Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 15 Mar 2022 14:16:37 +0100 Subject: Add type hints to some tests/handlers files. (#12224) --- changelog.d/12224.misc | 1 + mypy.ini | 5 --- tests/handlers/test_directory.py | 84 +++++++++++++++++++---------------- tests/handlers/test_e2e_keys.py | 36 ++++++++------- tests/handlers/test_oidc.py | 94 +++++++++++++++++++++------------------- tests/handlers/test_profile.py | 43 ++++++++++-------- tests/handlers/test_saml.py | 24 +++++----- 7 files changed, 156 insertions(+), 131 deletions(-) create mode 100644 changelog.d/12224.misc diff --git a/changelog.d/12224.misc b/changelog.d/12224.misc new file mode 100644 index 0000000000..b67a701dbb --- /dev/null +++ b/changelog.d/12224.misc @@ -0,0 +1 @@ +Add type hints to tests files. diff --git a/mypy.ini b/mypy.ini index f9c39fcaae..fe31bfb8bb 100644 --- a/mypy.ini +++ b/mypy.ini @@ -67,13 +67,8 @@ exclude = (?x) |tests/federation/transport/test_knocking.py |tests/federation/transport/test_server.py |tests/handlers/test_cas.py - |tests/handlers/test_directory.py - |tests/handlers/test_e2e_keys.py |tests/handlers/test_federation.py - |tests/handlers/test_oidc.py |tests/handlers/test_presence.py - |tests/handlers/test_profile.py - |tests/handlers/test_saml.py |tests/handlers/test_typing.py |tests/http/federation/test_matrix_federation_agent.py |tests/http/federation/test_srv_resolver.py diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 6e403a87c5..11ad44223d 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -12,14 +12,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from typing import Any, Awaitable, Callable, Dict from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse.api.errors import synapse.rest.admin from synapse.api.constants import EventTypes from synapse.rest.client import directory, login, room -from synapse.types import RoomAlias, create_requester +from synapse.server import HomeServer +from synapse.types import JsonDict, RoomAlias, create_requester +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable @@ -28,13 +32,15 @@ from tests.test_utils import make_awaitable class DirectoryTestCase(unittest.HomeserverTestCase): """Tests the directory service.""" - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.mock_federation = Mock() self.mock_registry = Mock() - self.query_handlers = {} + self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} - def register_query_handler(query_type, handler): + def register_query_handler( + query_type: str, handler: Callable[[dict], Awaitable[JsonDict]] + ) -> None: self.query_handlers[query_type] = handler self.mock_registry.register_query_handler = register_query_handler @@ -54,7 +60,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): return hs - def test_get_local_association(self): + def test_get_local_association(self) -> None: self.get_success( self.store.create_room_alias_association( self.my_room, "!8765qwer:test", ["test"] @@ -65,7 +71,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.assertEqual({"room_id": "!8765qwer:test", "servers": ["test"]}, result) - def test_get_remote_association(self): + def test_get_remote_association(self) -> None: self.mock_federation.make_query.return_value = make_awaitable( {"room_id": "!8765qwer:test", "servers": ["test", "remote"]} ) @@ -83,7 +89,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): ignore_backoff=True, ) - def test_incoming_fed_query(self): + def test_incoming_fed_query(self) -> None: self.get_success( self.store.create_room_alias_association( self.your_room, "!8765asdf:test", ["test"] @@ -105,7 +111,7 @@ class TestCreateAlias(unittest.HomeserverTestCase): directory.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_directory_handler() # Create user @@ -125,7 +131,7 @@ class TestCreateAlias(unittest.HomeserverTestCase): self.test_user_tok = self.login("user", "pass") self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok) - def test_create_alias_joined_room(self): + def test_create_alias_joined_room(self) -> None: """A user can create an alias for a room they're in.""" self.get_success( self.handler.create_association( @@ -135,7 +141,7 @@ class TestCreateAlias(unittest.HomeserverTestCase): ) ) - def test_create_alias_other_room(self): + def test_create_alias_other_room(self) -> None: """A user cannot create an alias for a room they're NOT in.""" other_room_id = self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok @@ -150,7 +156,7 @@ class TestCreateAlias(unittest.HomeserverTestCase): synapse.api.errors.SynapseError, ) - def test_create_alias_admin(self): + def test_create_alias_admin(self) -> None: """An admin can create an alias for a room they're NOT in.""" other_room_id = self.helper.create_room_as( self.test_user, tok=self.test_user_tok @@ -173,7 +179,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): directory.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() @@ -195,7 +201,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): self.test_user_tok = self.login("user", "pass") self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok) - def _create_alias(self, user): + def _create_alias(self, user) -> None: # Create a new alias to this room. self.get_success( self.store.create_room_alias_association( @@ -203,7 +209,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): ) ) - def test_delete_alias_not_allowed(self): + def test_delete_alias_not_allowed(self) -> None: """A user that doesn't meet the expected guidelines cannot delete an alias.""" self._create_alias(self.admin_user) self.get_failure( @@ -213,7 +219,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): synapse.api.errors.AuthError, ) - def test_delete_alias_creator(self): + def test_delete_alias_creator(self) -> None: """An alias creator can delete their own alias.""" # Create an alias from a different user. self._create_alias(self.test_user) @@ -232,7 +238,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): synapse.api.errors.SynapseError, ) - def test_delete_alias_admin(self): + def test_delete_alias_admin(self) -> None: """A server admin can delete an alias created by another user.""" # Create an alias from a different user. self._create_alias(self.test_user) @@ -251,7 +257,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): synapse.api.errors.SynapseError, ) - def test_delete_alias_sufficient_power(self): + def test_delete_alias_sufficient_power(self) -> None: """A user with a sufficient power level should be able to delete an alias.""" self._create_alias(self.admin_user) @@ -288,7 +294,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): directory.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() @@ -317,7 +323,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): ) return room_alias - def _set_canonical_alias(self, content): + def _set_canonical_alias(self, content) -> None: """Configure the canonical alias state on the room.""" self.helper.send_state( self.room_id, @@ -334,7 +340,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): ) ) - def test_remove_alias(self): + def test_remove_alias(self) -> None: """Removing an alias that is the canonical alias should remove it there too.""" # Set this new alias as the canonical alias for this room self._set_canonical_alias( @@ -356,7 +362,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): self.assertNotIn("alias", data["content"]) self.assertNotIn("alt_aliases", data["content"]) - def test_remove_other_alias(self): + def test_remove_other_alias(self) -> None: """Removing an alias listed as in alt_aliases should remove it there too.""" # Create a second alias. other_test_alias = "#test2:test" @@ -393,7 +399,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): servlets = [directory.register_servlets, room.register_servlets] - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() # Add custom alias creation rules to the config. @@ -403,7 +409,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): return config - def test_denied(self): + def test_denied(self) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request( @@ -413,7 +419,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): ) self.assertEqual(403, channel.code, channel.result) - def test_allowed(self): + def test_allowed(self) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request( @@ -423,7 +429,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): ) self.assertEqual(200, channel.code, channel.result) - def test_denied_during_creation(self): + def test_denied_during_creation(self) -> None: """A room alias that is not allowed should be rejected during creation.""" # Invalid room alias. self.helper.create_room_as( @@ -432,7 +438,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): extra_content={"room_alias_name": "foo"}, ) - def test_allowed_during_creation(self): + def test_allowed_during_creation(self) -> None: """A valid room alias should be allowed during creation.""" room_id = self.helper.create_room_as( self.user_id, @@ -459,7 +465,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): data = {"room_alias_name": "unofficial_test"} allowed_localpart = "allowed" - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() # Add custom room list publication rules to the config. @@ -474,7 +480,9 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): return config - def prepare(self, reactor, clock, hs): + def prepare( + self, reactor: MemoryReactor, clock: Clock, hs: HomeServer + ) -> HomeServer: self.allowed_user_id = self.register_user(self.allowed_localpart, "pass") self.allowed_access_token = self.login(self.allowed_localpart, "pass") @@ -483,7 +491,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): return hs - def test_denied_without_publication_permission(self): + def test_denied_without_publication_permission(self) -> None: """ Try to create a room, register an alias for it, and publish it, as a user without permission to publish rooms. @@ -497,7 +505,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): expect_code=403, ) - def test_allowed_when_creating_private_room(self): + def test_allowed_when_creating_private_room(self) -> None: """ Try to create a room, register an alias for it, and NOT publish it, as a user without permission to publish rooms. @@ -511,7 +519,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): expect_code=200, ) - def test_allowed_with_publication_permission(self): + def test_allowed_with_publication_permission(self) -> None: """ Try to create a room, register an alias for it, and publish it, as a user WITH permission to publish rooms. @@ -525,7 +533,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): expect_code=200, ) - def test_denied_publication_with_invalid_alias(self): + def test_denied_publication_with_invalid_alias(self) -> None: """ Try to create a room, register an alias for it, and publish it, as a user WITH permission to publish rooms. @@ -538,7 +546,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): expect_code=403, ) - def test_can_create_as_private_room_after_rejection(self): + def test_can_create_as_private_room_after_rejection(self) -> None: """ After failing to publish a room with an alias as a user without publish permission, retry as the same user, but without publishing the room. @@ -549,7 +557,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): self.test_denied_without_publication_permission() self.test_allowed_when_creating_private_room() - def test_can_create_with_permission_after_rejection(self): + def test_can_create_with_permission_after_rejection(self) -> None: """ After failing to publish a room with an alias as a user without publish permission, retry as someone with permission, using the same alias. @@ -566,7 +574,9 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): servlets = [directory.register_servlets, room.register_servlets] - def prepare(self, reactor, clock, hs): + def prepare( + self, reactor: MemoryReactor, clock: Clock, hs: HomeServer + ) -> HomeServer: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request( @@ -579,7 +589,7 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): return hs - def test_disabling_room_list(self): + def test_disabling_room_list(self) -> None: self.room_list_handler.enable_room_list_search = True self.directory_handler.enable_room_list_search = True diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 9338ab92e9..ac21a28c43 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -20,33 +20,37 @@ from parameterized import parameterized from signedjson import key as key, sign as sign from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import RoomEncryptionAlgorithms from synapse.api.errors import Codes, SynapseError +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return self.setup_test_homeserver(federation_client=mock.Mock()) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_e2e_keys_handler() self.store = self.hs.get_datastores().main - def test_query_local_devices_no_devices(self): + def test_query_local_devices_no_devices(self) -> None: """If the user has no devices, we expect an empty list.""" local_user = "@boris:" + self.hs.hostname res = self.get_success(self.handler.query_local_devices({local_user: None})) self.assertDictEqual(res, {local_user: {}}) - def test_reupload_one_time_keys(self): + def test_reupload_one_time_keys(self) -> None: """we should be able to re-upload the same keys""" local_user = "@boris:" + self.hs.hostname device_id = "xyz" - keys = { + keys: JsonDict = { "alg1:k1": "key1", "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}}, "alg2:k3": {"key": "key3"}, @@ -74,7 +78,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): res, {"one_time_key_counts": {"alg1": 1, "alg2": 2, "signed_curve25519": 0}} ) - def test_change_one_time_keys(self): + def test_change_one_time_keys(self) -> None: """attempts to change one-time-keys should be rejected""" local_user = "@boris:" + self.hs.hostname @@ -134,7 +138,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): SynapseError, ) - def test_claim_one_time_key(self): + def test_claim_one_time_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" keys = {"alg1:k1": "key1"} @@ -161,7 +165,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): }, ) - def test_fallback_key(self): + def test_fallback_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" fallback_key = {"alg1:k1": "fallback_key1"} @@ -294,7 +298,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}}, ) - def test_replace_master_key(self): + def test_replace_master_key(self) -> None: """uploading a new signing key should make the old signing key unavailable""" local_user = "@boris:" + self.hs.hostname keys1 = { @@ -328,7 +332,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): ) self.assertDictEqual(devices["master_keys"], {local_user: keys2["master_key"]}) - def test_reupload_signatures(self): + def test_reupload_signatures(self) -> None: """re-uploading a signature should not fail""" local_user = "@boris:" + self.hs.hostname keys1 = { @@ -433,7 +437,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): self.assertDictEqual(devices["device_keys"][local_user]["abc"], device_key_1) self.assertDictEqual(devices["device_keys"][local_user]["def"], device_key_2) - def test_self_signing_key_doesnt_show_up_as_device(self): + def test_self_signing_key_doesnt_show_up_as_device(self) -> None: """signing keys should be hidden when fetching a user's devices""" local_user = "@boris:" + self.hs.hostname keys1 = { @@ -462,7 +466,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): res = self.get_success(self.handler.query_local_devices({local_user: None})) self.assertDictEqual(res, {local_user: {}}) - def test_upload_signatures(self): + def test_upload_signatures(self) -> None: """should check signatures that are uploaded""" # set up a user with cross-signing keys and a device. This user will # try uploading signatures @@ -686,7 +690,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): other_master_key["signatures"][local_user]["ed25519:" + usersigning_pubkey], ) - def test_query_devices_remote_no_sync(self): + def test_query_devices_remote_no_sync(self) -> None: """Tests that querying keys for a remote user that we don't share a room with returns the cross signing keys correctly. """ @@ -759,7 +763,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): }, ) - def test_query_devices_remote_sync(self): + def test_query_devices_remote_sync(self) -> None: """Tests that querying keys for a remote user that we share a room with, but haven't yet fetched the keys for, returns the cross signing keys correctly. @@ -845,7 +849,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): (["device_1", "device_2"],), ] ) - def test_query_all_devices_caches_result(self, device_ids: Iterable[str]): + def test_query_all_devices_caches_result(self, device_ids: Iterable[str]) -> None: """Test that requests for all of a remote user's devices are cached. We do this by asserting that only one call over federation was made, and that @@ -853,7 +857,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): """ local_user_id = "@test:test" remote_user_id = "@test:other" - request_body = {"device_keys": {remote_user_id: []}} + request_body: JsonDict = {"device_keys": {remote_user_id: []}} response_devices = [ { diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index e8418b6638..014815db6e 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -13,14 +13,18 @@ # limitations under the License. import json import os +from typing import Any, Dict from unittest.mock import ANY, Mock, patch from urllib.parse import parse_qs, urlparse import pymacaroons +from twisted.test.proto_helpers import MemoryReactor + from synapse.handlers.sso import MappingException from synapse.server import HomeServer -from synapse.types import UserID +from synapse.types import JsonDict, UserID +from synapse.util import Clock from synapse.util.macaroons import get_value_from_macaroon from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock @@ -98,7 +102,7 @@ class TestMappingProviderFailures(TestMappingProvider): } -async def get_json(url): +async def get_json(url: str) -> JsonDict: # Mock get_json calls to handle jwks & oidc discovery endpoints if url == WELL_KNOWN: # Minimal discovery document, as defined in OpenID.Discovery @@ -116,6 +120,8 @@ async def get_json(url): elif url == JWKS_URI: return {"keys": []} + return {} + def _key_file_path() -> str: """path to a file containing the private half of a test key""" @@ -147,12 +153,12 @@ class OidcHandlerTestCase(HomeserverTestCase): if not HAS_OIDC: skip = "requires OIDC" - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL return config - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.http_client = Mock(spec=["get_json"]) self.http_client.get_json.side_effect = get_json self.http_client.user_agent = b"Synapse Test" @@ -164,7 +170,7 @@ class OidcHandlerTestCase(HomeserverTestCase): sso_handler = hs.get_sso_handler() # Mock the render error method. self.render_error = Mock(return_value=None) - sso_handler.render_error = self.render_error + sso_handler.render_error = self.render_error # type: ignore[assignment] # Reduce the number of attempts when generating MXIDs. sso_handler._MAP_USERNAME_RETRIES = 3 @@ -193,14 +199,14 @@ class OidcHandlerTestCase(HomeserverTestCase): return args @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_config(self): + def test_config(self) -> None: """Basic config correctly sets up the callback URL and client auth correctly.""" self.assertEqual(self.provider._callback_url, CALLBACK_URL) self.assertEqual(self.provider._client_auth.client_id, CLIENT_ID) self.assertEqual(self.provider._client_auth.client_secret, CLIENT_SECRET) @override_config({"oidc_config": {**DEFAULT_CONFIG, "discover": True}}) - def test_discovery(self): + def test_discovery(self) -> None: """The handler should discover the endpoints from OIDC discovery document.""" # This would throw if some metadata were invalid metadata = self.get_success(self.provider.load_metadata()) @@ -219,13 +225,13 @@ class OidcHandlerTestCase(HomeserverTestCase): self.http_client.get_json.assert_not_called() @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG}) - def test_no_discovery(self): + def test_no_discovery(self) -> None: """When discovery is disabled, it should not try to load from discovery document.""" self.get_success(self.provider.load_metadata()) self.http_client.get_json.assert_not_called() @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG}) - def test_load_jwks(self): + def test_load_jwks(self) -> None: """JWKS loading is done once (then cached) if used.""" jwks = self.get_success(self.provider.load_jwks()) self.http_client.get_json.assert_called_once_with(JWKS_URI) @@ -253,7 +259,7 @@ class OidcHandlerTestCase(HomeserverTestCase): self.get_failure(self.provider.load_jwks(force=True), RuntimeError) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_validate_config(self): + def test_validate_config(self) -> None: """Provider metadatas are extensively validated.""" h = self.provider @@ -336,14 +342,14 @@ class OidcHandlerTestCase(HomeserverTestCase): force_load_metadata() @override_config({"oidc_config": {**DEFAULT_CONFIG, "skip_verification": True}}) - def test_skip_verification(self): + def test_skip_verification(self) -> None: """Provider metadata validation can be disabled by config.""" with self.metadata_edit({"issuer": "http://insecure"}): # This should not throw get_awaitable_result(self.provider.load_metadata()) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_redirect_request(self): + def test_redirect_request(self) -> None: """The redirect request has the right arguments & generates a valid session cookie.""" req = Mock(spec=["cookies"]) req.cookies = [] @@ -387,7 +393,7 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertEqual(redirect, "http://client/redirect") @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_callback_error(self): + def test_callback_error(self) -> None: """Errors from the provider returned in the callback are displayed.""" request = Mock(args={}) request.args[b"error"] = [b"invalid_client"] @@ -399,7 +405,7 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertRenderedError("invalid_client", "some description") @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_callback(self): + def test_callback(self) -> None: """Code callback works and display errors if something went wrong. A lot of scenarios are tested here: @@ -428,9 +434,9 @@ class OidcHandlerTestCase(HomeserverTestCase): "username": username, } expected_user_id = "@%s:%s" % (username, self.hs.hostname) - self.provider._exchange_code = simple_async_mock(return_value=token) - self.provider._parse_id_token = simple_async_mock(return_value=userinfo) - self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo) + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] + self.provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] + self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment] auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -468,7 +474,7 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertRenderedError("mapping_error") # Handle ID token errors - self.provider._parse_id_token = simple_async_mock(raises=Exception()) + self.provider._parse_id_token = simple_async_mock(raises=Exception()) # type: ignore[assignment] self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_token") @@ -483,7 +489,7 @@ class OidcHandlerTestCase(HomeserverTestCase): "type": "bearer", "access_token": "access_token", } - self.provider._exchange_code = simple_async_mock(return_value=token) + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] self.get_success(self.handler.handle_oidc_callback(request)) auth_handler.complete_sso_login.assert_called_once_with( @@ -510,8 +516,8 @@ class OidcHandlerTestCase(HomeserverTestCase): id_token = { "sid": "abcdefgh", } - self.provider._parse_id_token = simple_async_mock(return_value=id_token) - self.provider._exchange_code = simple_async_mock(return_value=token) + self.provider._parse_id_token = simple_async_mock(return_value=id_token) # type: ignore[assignment] + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] auth_handler.complete_sso_login.reset_mock() self.provider._fetch_userinfo.reset_mock() self.get_success(self.handler.handle_oidc_callback(request)) @@ -531,21 +537,21 @@ class OidcHandlerTestCase(HomeserverTestCase): self.render_error.assert_not_called() # Handle userinfo fetching error - self.provider._fetch_userinfo = simple_async_mock(raises=Exception()) + self.provider._fetch_userinfo = simple_async_mock(raises=Exception()) # type: ignore[assignment] self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("fetch_error") # Handle code exchange failure from synapse.handlers.oidc import OidcError - self.provider._exchange_code = simple_async_mock( + self.provider._exchange_code = simple_async_mock( # type: ignore[assignment] raises=OidcError("invalid_request") ) self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request") @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_callback_session(self): + def test_callback_session(self) -> None: """The callback verifies the session presence and validity""" request = Mock(spec=["args", "getCookie", "cookies"]) @@ -590,7 +596,7 @@ class OidcHandlerTestCase(HomeserverTestCase): @override_config( {"oidc_config": {**DEFAULT_CONFIG, "client_auth_method": "client_secret_post"}} ) - def test_exchange_code(self): + def test_exchange_code(self) -> None: """Code exchange behaves correctly and handles various error scenarios.""" token = {"type": "bearer"} token_json = json.dumps(token).encode("utf-8") @@ -686,7 +692,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_exchange_code_jwt_key(self): + def test_exchange_code_jwt_key(self) -> None: """Test that code exchange works with a JWK client secret.""" from authlib.jose import jwt @@ -741,7 +747,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_exchange_code_no_auth(self): + def test_exchange_code_no_auth(self) -> None: """Test that code exchange works with no client secret.""" token = {"type": "bearer"} self.http_client.request = simple_async_mock( @@ -776,7 +782,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_extra_attributes(self): + def test_extra_attributes(self) -> None: """ Login while using a mapping provider that implements get_extra_attributes. """ @@ -790,8 +796,8 @@ class OidcHandlerTestCase(HomeserverTestCase): "username": "foo", "phone": "1234567", } - self.provider._exchange_code = simple_async_mock(return_value=token) - self.provider._parse_id_token = simple_async_mock(return_value=userinfo) + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] + self.provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -817,12 +823,12 @@ class OidcHandlerTestCase(HomeserverTestCase): ) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_map_userinfo_to_user(self): + def test_map_userinfo_to_user(self) -> None: """Ensure that mapping the userinfo returned from a provider to an MXID works properly.""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() - userinfo = { + userinfo: dict = { "sub": "test_user", "username": "test_user", } @@ -870,7 +876,7 @@ class OidcHandlerTestCase(HomeserverTestCase): ) @override_config({"oidc_config": {**DEFAULT_CONFIG, "allow_existing_users": True}}) - def test_map_userinfo_to_existing_user(self): + def test_map_userinfo_to_existing_user(self) -> None: """Existing users can log in with OpenID Connect when allow_existing_users is True.""" store = self.hs.get_datastores().main user = UserID.from_string("@test_user:test") @@ -974,7 +980,7 @@ class OidcHandlerTestCase(HomeserverTestCase): ) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_map_userinfo_to_invalid_localpart(self): + def test_map_userinfo_to_invalid_localpart(self) -> None: """If the mapping provider generates an invalid localpart it should be rejected.""" self.get_success( _make_callback_with_userinfo(self.hs, {"sub": "test2", "username": "föö"}) @@ -991,7 +997,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_map_userinfo_to_user_retries(self): + def test_map_userinfo_to_user_retries(self) -> None: """The mapping provider can retry generating an MXID if the MXID is already in use.""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -1039,7 +1045,7 @@ class OidcHandlerTestCase(HomeserverTestCase): ) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_empty_localpart(self): + def test_empty_localpart(self) -> None: """Attempts to map onto an empty localpart should be rejected.""" userinfo = { "sub": "tester", @@ -1058,7 +1064,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_null_localpart(self): + def test_null_localpart(self) -> None: """Mapping onto a null localpart via an empty OIDC attribute should be rejected""" userinfo = { "sub": "tester", @@ -1075,7 +1081,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_attribute_requirements(self): + def test_attribute_requirements(self) -> None: """The required attributes must be met from the OIDC userinfo response.""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -1115,7 +1121,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_attribute_requirements_contains(self): + def test_attribute_requirements_contains(self) -> None: """Test that auth succeeds if userinfo attribute CONTAINS required value""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -1146,7 +1152,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_attribute_requirements_mismatch(self): + def test_attribute_requirements_mismatch(self) -> None: """ Test that auth fails if attributes exist but don't match, or are non-string values. @@ -1154,7 +1160,7 @@ class OidcHandlerTestCase(HomeserverTestCase): auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() # userinfo with "test": "not_foobar" attribute should fail - userinfo = { + userinfo: dict = { "sub": "tester", "username": "tester", "test": "not_foobar", @@ -1248,9 +1254,9 @@ async def _make_callback_with_userinfo( handler = hs.get_oidc_handler() provider = handler._providers["oidc"] - provider._exchange_code = simple_async_mock(return_value={"id_token": ""}) - provider._parse_id_token = simple_async_mock(return_value=userinfo) - provider._fetch_userinfo = simple_async_mock(return_value=userinfo) + provider._exchange_code = simple_async_mock(return_value={"id_token": ""}) # type: ignore[assignment] + provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] + provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment] state = "state" session = handler._token_generator.generate_oidc_session_token( diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 972cbac6e4..08733a9f2d 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -11,14 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict +from typing import Any, Awaitable, Callable, Dict from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse.types from synapse.api.errors import AuthError, SynapseError from synapse.rest import admin from synapse.server import HomeServer -from synapse.types import UserID +from synapse.types import JsonDict, UserID +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable @@ -29,13 +32,15 @@ class ProfileTestCase(unittest.HomeserverTestCase): servlets = [admin.register_servlets] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.mock_federation = Mock() self.mock_registry = Mock() - self.query_handlers = {} + self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} - def register_query_handler(query_type, handler): + def register_query_handler( + query_type: str, handler: Callable[[dict], Awaitable[JsonDict]] + ) -> None: self.query_handlers[query_type] = handler self.mock_registry.register_query_handler = register_query_handler @@ -47,7 +52,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) return hs - def prepare(self, reactor, clock, hs: HomeServer): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.frank = UserID.from_string("@1234abcd:test") @@ -58,7 +63,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.handler = hs.get_profile_handler() - def test_get_my_name(self): + def test_get_my_name(self) -> None: self.get_success( self.store.set_profile_displayname(self.frank.localpart, "Frank") ) @@ -67,7 +72,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertEqual("Frank", displayname) - def test_set_my_name(self): + def test_set_my_name(self) -> None: self.get_success( self.handler.set_displayname( self.frank, synapse.types.create_requester(self.frank), "Frank Jr." @@ -110,7 +115,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.get_success(self.store.get_profile_displayname(self.frank.localpart)) ) - def test_set_my_name_if_disabled(self): + def test_set_my_name_if_disabled(self) -> None: self.hs.config.registration.enable_set_displayname = False # Setting displayname for the first time is allowed @@ -135,7 +140,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): SynapseError, ) - def test_set_my_name_noauth(self): + def test_set_my_name_noauth(self) -> None: self.get_failure( self.handler.set_displayname( self.frank, synapse.types.create_requester(self.bob), "Frank Jr." @@ -143,7 +148,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): AuthError, ) - def test_get_other_name(self): + def test_get_other_name(self) -> None: self.mock_federation.make_query.return_value = make_awaitable( {"displayname": "Alice"} ) @@ -158,7 +163,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ignore_backoff=True, ) - def test_incoming_fed_query(self): + def test_incoming_fed_query(self) -> None: self.get_success(self.store.create_profile("caroline")) self.get_success(self.store.set_profile_displayname("caroline", "Caroline")) @@ -174,7 +179,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertEqual({"displayname": "Caroline"}, response) - def test_get_my_avatar(self): + def test_get_my_avatar(self) -> None: self.get_success( self.store.set_profile_avatar_url( self.frank.localpart, "http://my.server/me.png" @@ -184,7 +189,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertEqual("http://my.server/me.png", avatar_url) - def test_set_my_avatar(self): + def test_set_my_avatar(self) -> None: self.get_success( self.handler.set_avatar_url( self.frank, @@ -225,7 +230,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), ) - def test_set_my_avatar_if_disabled(self): + def test_set_my_avatar_if_disabled(self) -> None: self.hs.config.registration.enable_set_avatar_url = False # Setting displayname for the first time is allowed @@ -250,7 +255,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): SynapseError, ) - def test_avatar_constraints_no_config(self): + def test_avatar_constraints_no_config(self) -> None: """Tests that the method to check an avatar against configured constraints skips all of its check if no constraint is configured. """ @@ -263,7 +268,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertTrue(res) @unittest.override_config({"max_avatar_size": 50}) - def test_avatar_constraints_missing(self): + def test_avatar_constraints_missing(self) -> None: """Tests that an avatar isn't allowed if the file at the given MXC URI couldn't be found. """ @@ -273,7 +278,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertFalse(res) @unittest.override_config({"max_avatar_size": 50}) - def test_avatar_constraints_file_size(self): + def test_avatar_constraints_file_size(self) -> None: """Tests that a file that's above the allowed file size is forbidden but one that's below it is allowed. """ @@ -295,7 +300,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertFalse(res) @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) - def test_avatar_constraint_mime_type(self): + def test_avatar_constraint_mime_type(self) -> None: """Tests that a file with an unauthorised MIME type is forbidden but one with an authorised content type is allowed. """ diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 23941abed8..8d4404eda1 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -12,12 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional +from typing import Any, Dict, Optional from unittest.mock import Mock import attr +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.errors import RedirectException +from synapse.server import HomeServer +from synapse.util import Clock from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase, override_config @@ -81,10 +85,10 @@ class TestRedirectMappingProvider(TestMappingProvider): class SamlHandlerTestCase(HomeserverTestCase): - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL - saml_config = { + saml_config: Dict[str, Any] = { "sp_config": {"metadata": {}}, # Disable grandfathering. "grandfathered_mxid_source_attribute": None, @@ -98,7 +102,7 @@ class SamlHandlerTestCase(HomeserverTestCase): return config - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver() self.handler = hs.get_saml_handler() @@ -114,7 +118,7 @@ class SamlHandlerTestCase(HomeserverTestCase): elif not has_xmlsec1: skip = "Requires xmlsec1" - def test_map_saml_response_to_user(self): + def test_map_saml_response_to_user(self) -> None: """Ensure that mapping the SAML response returned from a provider to an MXID works properly.""" # stub out the auth handler @@ -140,7 +144,7 @@ class SamlHandlerTestCase(HomeserverTestCase): ) @override_config({"saml2_config": {"grandfathered_mxid_source_attribute": "mxid"}}) - def test_map_saml_response_to_existing_user(self): + def test_map_saml_response_to_existing_user(self) -> None: """Existing users can log in with SAML account.""" store = self.hs.get_datastores().main self.get_success( @@ -186,7 +190,7 @@ class SamlHandlerTestCase(HomeserverTestCase): auth_provider_session_id=None, ) - def test_map_saml_response_to_invalid_localpart(self): + def test_map_saml_response_to_invalid_localpart(self) -> None: """If the mapping provider generates an invalid localpart it should be rejected.""" # stub out the auth handler @@ -207,7 +211,7 @@ class SamlHandlerTestCase(HomeserverTestCase): ) auth_handler.complete_sso_login.assert_not_called() - def test_map_saml_response_to_user_retries(self): + def test_map_saml_response_to_user_retries(self) -> None: """The mapping provider can retry generating an MXID if the MXID is already in use.""" # stub out the auth handler and error renderer @@ -271,7 +275,7 @@ class SamlHandlerTestCase(HomeserverTestCase): } } ) - def test_map_saml_response_redirect(self): + def test_map_saml_response_redirect(self) -> None: """Test a mapping provider that raises a RedirectException""" saml_response = FakeAuthnResponse({"uid": "test", "username": "test_user"}) @@ -292,7 +296,7 @@ class SamlHandlerTestCase(HomeserverTestCase): }, } ) - def test_attribute_requirements(self): + def test_attribute_requirements(self) -> None: """The required attributes must be met from the SAML response.""" # stub out the auth handler -- cgit 1.4.1 From dea577998f221297d3ff30bdf904f7147f3c3d8a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 15 Mar 2022 15:40:34 +0000 Subject: Add tests for database transaction callbacks (#12198) Signed-off-by: Sean Quah --- changelog.d/12198.misc | 1 + tests/storage/test_database.py | 104 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12198.misc diff --git a/changelog.d/12198.misc b/changelog.d/12198.misc new file mode 100644 index 0000000000..6b184a9053 --- /dev/null +++ b/changelog.d/12198.misc @@ -0,0 +1 @@ +Add tests for database transaction callbacks. diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index 8597867563..ae13bed086 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -12,7 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.database import make_tuple_comparison_clause +from typing import Callable, Tuple +from unittest.mock import Mock, call + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.storage.database import ( + DatabasePool, + LoggingTransaction, + make_tuple_comparison_clause, +) +from synapse.util import Clock from tests import unittest @@ -22,3 +33,94 @@ class TupleComparisonClauseTestCase(unittest.TestCase): clause, args = make_tuple_comparison_clause([("a", 1), ("b", 2)]) self.assertEqual(clause, "(a,b) > (?,?)") self.assertEqual(args, [1, 2]) + + +class CallbacksTestCase(unittest.HomeserverTestCase): + """Tests for transaction callbacks.""" + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.db_pool: DatabasePool = self.store.db_pool + + def _run_interaction( + self, func: Callable[[LoggingTransaction], object] + ) -> Tuple[Mock, Mock]: + """Run the given function in a database transaction, with callbacks registered. + + Args: + func: The function to be run in a transaction. The transaction will be + retried if `func` raises an `OperationalError`. + + Returns: + Two mocks, which were registered as an `after_callback` and an + `exception_callback` respectively, on every transaction attempt. + """ + after_callback = Mock() + exception_callback = Mock() + + def _test_txn(txn: LoggingTransaction) -> None: + txn.call_after(after_callback, 123, 456, extra=789) + txn.call_on_exception(exception_callback, 987, 654, extra=321) + func(txn) + + try: + self.get_success_or_raise( + self.db_pool.runInteraction("test_transaction", _test_txn) + ) + except Exception: + pass + + return after_callback, exception_callback + + def test_after_callback(self) -> None: + """Test that the after callback is called when a transaction succeeds.""" + after_callback, exception_callback = self._run_interaction(lambda txn: None) + + after_callback.assert_called_once_with(123, 456, extra=789) + exception_callback.assert_not_called() + + def test_exception_callback(self) -> None: + """Test that the exception callback is called when a transaction fails.""" + _test_txn = Mock(side_effect=ZeroDivisionError) + after_callback, exception_callback = self._run_interaction(_test_txn) + + after_callback.assert_not_called() + exception_callback.assert_called_once_with(987, 654, extra=321) + + def test_failed_retry(self) -> None: + """Test that the exception callback is called for every failed attempt.""" + # Always raise an `OperationalError`. + _test_txn = Mock(side_effect=self.db_pool.engine.module.OperationalError) + after_callback, exception_callback = self._run_interaction(_test_txn) + + after_callback.assert_not_called() + exception_callback.assert_has_calls( + [ + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + ] + ) + self.assertEqual(exception_callback.call_count, 6) # no additional calls + + def test_successful_retry(self) -> None: + """Test callbacks for a failed transaction followed by a successful attempt.""" + # Raise an `OperationalError` on the first attempt only. + _test_txn = Mock( + side_effect=[self.db_pool.engine.module.OperationalError, None] + ) + after_callback, exception_callback = self._run_interaction(_test_txn) + + # Calling both `after_callback`s when the first attempt failed is rather + # surprising (#12184). Let's document the behaviour in a test. + after_callback.assert_has_calls( + [ + call(123, 456, extra=789), + call(123, 456, extra=789), + ] + ) + self.assertEqual(after_callback.call_count, 2) # no additional calls + exception_callback.assert_not_called() -- cgit 1.4.1 From dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 15 Mar 2022 14:06:05 -0400 Subject: Use the ignored_users table to test event visibility & sync. (#12225) Instead of fetching the raw account data and re-parsing it. The ignored_users table is a denormalised version of the account data for quick searching. --- changelog.d/12225.misc | 1 + synapse/handlers/sync.py | 30 ++----------------- synapse/push/bulk_push_rule_evaluator.py | 2 +- synapse/storage/databases/main/account_data.py | 41 ++++++++++++++++++++++++-- synapse/visibility.py | 18 ++--------- tests/storage/test_account_data.py | 17 +++++++++++ 6 files changed, 62 insertions(+), 47 deletions(-) create mode 100644 changelog.d/12225.misc diff --git a/changelog.d/12225.misc b/changelog.d/12225.misc new file mode 100644 index 0000000000..23105c727c --- /dev/null +++ b/changelog.d/12225.misc @@ -0,0 +1 @@ +Use the `ignored_users` table in additional places instead of re-parsing the account data. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 0aa3052fd6..c9d6a18bd7 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -28,7 +28,7 @@ from typing import ( import attr from prometheus_client import Counter -from synapse.api.constants import AccountDataTypes, EventTypes, Membership, ReceiptTypes +from synapse.api.constants import EventTypes, Membership, ReceiptTypes from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -1601,7 +1601,7 @@ class SyncHandler: return set(), set(), set(), set() # 3. Work out which rooms need reporting in the sync response. - ignored_users = await self._get_ignored_users(user_id) + ignored_users = await self.store.ignored_users(user_id) if since_token: room_changes = await self._get_rooms_changed( sync_result_builder, ignored_users @@ -1627,7 +1627,6 @@ class SyncHandler: logger.debug("Generating room entry for %s", room_entry.room_id) await self._generate_room_entry( sync_result_builder, - ignored_users, room_entry, ephemeral=ephemeral_by_room.get(room_entry.room_id, []), tags=tags_by_room.get(room_entry.room_id), @@ -1657,29 +1656,6 @@ class SyncHandler: newly_left_users, ) - async def _get_ignored_users(self, user_id: str) -> FrozenSet[str]: - """Retrieve the users ignored by the given user from their global account_data. - - Returns an empty set if - - there is no global account_data entry for ignored_users - - there is such an entry, but it's not a JSON object. - """ - # TODO: Can we `SELECT ignored_user_id FROM ignored_users WHERE ignorer_user_id=?;` instead? - ignored_account_data = ( - await self.store.get_global_account_data_by_type_for_user( - user_id=user_id, data_type=AccountDataTypes.IGNORED_USER_LIST - ) - ) - - # If there is ignored users account data and it matches the proper type, - # then use it. - ignored_users: FrozenSet[str] = frozenset() - if ignored_account_data: - ignored_users_data = ignored_account_data.get("ignored_users", {}) - if isinstance(ignored_users_data, dict): - ignored_users = frozenset(ignored_users_data.keys()) - return ignored_users - async def _have_rooms_changed( self, sync_result_builder: "SyncResultBuilder" ) -> bool: @@ -2022,7 +1998,6 @@ class SyncHandler: async def _generate_room_entry( self, sync_result_builder: "SyncResultBuilder", - ignored_users: FrozenSet[str], room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], tags: Optional[Dict[str, Dict[str, Any]]], @@ -2051,7 +2026,6 @@ class SyncHandler: Args: sync_result_builder - ignored_users: Set of users ignored by user. room_builder ephemeral: List of new ephemeral events for room tags: List of *all* tags for room, or None if there has been diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 8140afcb6b..030898e4d0 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -213,7 +213,7 @@ class BulkPushRuleEvaluator: if not event.is_state(): ignorers = await self.store.ignored_by(event.sender) else: - ignorers = set() + ignorers = frozenset() for uid, rules in rules_by_user.items(): if event.sender == uid: diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 52146aacc8..9af9f4f18e 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -14,7 +14,17 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, cast +from typing import ( + TYPE_CHECKING, + Any, + Dict, + FrozenSet, + Iterable, + List, + Optional, + Tuple, + cast, +) from synapse.api.constants import AccountDataTypes from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker @@ -365,7 +375,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) ) @cached(max_entries=5000, iterable=True) - async def ignored_by(self, user_id: str) -> Set[str]: + async def ignored_by(self, user_id: str) -> FrozenSet[str]: """ Get users which ignore the given user. @@ -375,7 +385,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) Return: The user IDs which ignore the given user. """ - return set( + return frozenset( await self.db_pool.simple_select_onecol( table="ignored_users", keyvalues={"ignored_user_id": user_id}, @@ -384,6 +394,26 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) ) ) + @cached(max_entries=5000, iterable=True) + async def ignored_users(self, user_id: str) -> FrozenSet[str]: + """ + Get users which the given user ignores. + + Params: + user_id: The user ID which is making the request. + + Return: + The user IDs which are ignored by the given user. + """ + return frozenset( + await self.db_pool.simple_select_onecol( + table="ignored_users", + keyvalues={"ignorer_user_id": user_id}, + retcol="ignored_user_id", + desc="ignored_users", + ) + ) + def process_replication_rows( self, stream_name: str, @@ -529,6 +559,10 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) else: currently_ignored_users = set() + # If the data has not changed, nothing to do. + if previously_ignored_users == currently_ignored_users: + return + # Delete entries which are no longer ignored. self.db_pool.simple_delete_many_txn( txn, @@ -551,6 +585,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) # Invalidate the cache for any ignored users which were added or removed. for ignored_user_id in previously_ignored_users ^ currently_ignored_users: self._invalidate_cache_and_stream(txn, self.ignored_by, (ignored_user_id,)) + self._invalidate_cache_and_stream(txn, self.ignored_users, (user_id,)) async def purge_account_data_for_user(self, user_id: str) -> None: """ diff --git a/synapse/visibility.py b/synapse/visibility.py index 281cbe4d88..49519eb8f5 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -14,12 +14,7 @@ import logging from typing import Dict, FrozenSet, List, Optional -from synapse.api.constants import ( - AccountDataTypes, - EventTypes, - HistoryVisibility, - Membership, -) +from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.events import EventBase from synapse.events.utils import prune_event from synapse.storage import Storage @@ -87,15 +82,8 @@ async def filter_events_for_client( state_filter=StateFilter.from_types(types), ) - ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user( - user_id, AccountDataTypes.IGNORED_USER_LIST - ) - - ignore_list: FrozenSet[str] = frozenset() - if ignore_dict_content: - ignored_users_dict = ignore_dict_content.get("ignored_users", {}) - if isinstance(ignored_users_dict, dict): - ignore_list = frozenset(ignored_users_dict.keys()) + # Get the users who are ignored by the requesting user. + ignore_list = await storage.main.ignored_users(user_id) erased_senders = await storage.main.are_users_erased(e.sender for e in events) diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py index 272cd35402..72bf5b3d31 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py @@ -47,9 +47,18 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): expected_ignorer_user_ids, ) + def assert_ignored( + self, ignorer_user_id: str, expected_ignored_user_ids: Set[str] + ) -> None: + self.assertEqual( + self.get_success(self.store.ignored_users(ignorer_user_id)), + expected_ignored_user_ids, + ) + def test_ignoring_users(self): """Basic adding/removing of users from the ignore list.""" self._update_ignore_list("@other:test", "@another:remote") + self.assert_ignored(self.user, {"@other:test", "@another:remote"}) # Check a user which no one ignores. self.assert_ignorers("@user:test", set()) @@ -62,6 +71,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): # Add one user, remove one user, and leave one user. self._update_ignore_list("@foo:test", "@another:remote") + self.assert_ignored(self.user, {"@foo:test", "@another:remote"}) # Check the removed user. self.assert_ignorers("@other:test", set()) @@ -76,20 +86,24 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): """Ensure that caching works properly between different users.""" # The first user ignores a user. self._update_ignore_list("@other:test") + self.assert_ignored(self.user, {"@other:test"}) self.assert_ignorers("@other:test", {self.user}) # The second user ignores them. self._update_ignore_list("@other:test", ignorer_user_id="@second:test") + self.assert_ignored("@second:test", {"@other:test"}) self.assert_ignorers("@other:test", {self.user, "@second:test"}) # The first user un-ignores them. self._update_ignore_list() + self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", {"@second:test"}) def test_invalid_data(self): """Invalid data ends up clearing out the ignored users list.""" # Add some data and ensure it is there. self._update_ignore_list("@other:test") + self.assert_ignored(self.user, {"@other:test"}) self.assert_ignorers("@other:test", {self.user}) # No ignored_users key. @@ -102,10 +116,12 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): ) # No one ignores the user now. + self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", set()) # Add some data and ensure it is there. self._update_ignore_list("@other:test") + self.assert_ignored(self.user, {"@other:test"}) self.assert_ignorers("@other:test", {self.user}) # Invalid data. @@ -118,4 +134,5 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): ) # No one ignores the user now. + self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", set()) -- cgit 1.4.1 From 4587b35929d22731644a11120a9e7d6a9c3bc304 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 07:21:36 -0400 Subject: Clean-up logic for rebasing URLs during URL preview. (#12219) By using urljoin from the standard library and reducing the number of places URLs are rebased. --- changelog.d/12219.misc | 1 + synapse/rest/media/v1/preview_html.py | 39 +------------------ synapse/rest/media/v1/preview_url_resource.py | 23 ++++++------ tests/rest/media/v1/test_html_preview.py | 54 ++++++--------------------- 4 files changed, 26 insertions(+), 91 deletions(-) create mode 100644 changelog.d/12219.misc diff --git a/changelog.d/12219.misc b/changelog.d/12219.misc new file mode 100644 index 0000000000..6079414092 --- /dev/null +++ b/changelog.d/12219.misc @@ -0,0 +1 @@ +Clean-up logic around rebasing URLs for URL image previews. diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py index 872a9e72e8..4cc9c66fbe 100644 --- a/synapse/rest/media/v1/preview_html.py +++ b/synapse/rest/media/v1/preview_html.py @@ -16,7 +16,6 @@ import itertools import logging import re from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Set, Union -from urllib import parse as urlparse if TYPE_CHECKING: from lxml import etree @@ -144,9 +143,7 @@ def decode_body( return etree.fromstring(body, parser) -def parse_html_to_open_graph( - tree: "etree.Element", media_uri: str -) -> Dict[str, Optional[str]]: +def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: """ Parse the HTML document into an Open Graph response. @@ -155,7 +152,6 @@ def parse_html_to_open_graph( Args: tree: The parsed HTML document. - media_url: The URI used to download the body. Returns: The Open Graph response as a dictionary. @@ -209,7 +205,7 @@ def parse_html_to_open_graph( "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image']/@content" ) if meta_image: - og["og:image"] = rebase_url(meta_image[0], media_uri) + og["og:image"] = meta_image[0] else: # TODO: consider inlined CSS styles as well as width & height attribs images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]") @@ -320,37 +316,6 @@ def _iterate_over_text( ) -def rebase_url(url: str, base: str) -> str: - """ - Resolves a potentially relative `url` against an absolute `base` URL. - - For example: - - >>> rebase_url("subpage", "https://example.com/foo/") - 'https://example.com/foo/subpage' - >>> rebase_url("sibling", "https://example.com/foo") - 'https://example.com/sibling' - >>> rebase_url("/bar", "https://example.com/foo/") - 'https://example.com/bar' - >>> rebase_url("https://alice.com/a/", "https://example.com/foo/") - 'https://alice.com/a' - """ - base_parts = urlparse.urlparse(base) - # Convert the parsed URL to a list for (potential) modification. - url_parts = list(urlparse.urlparse(url)) - # Add a scheme, if one does not exist. - if not url_parts[0]: - url_parts[0] = base_parts.scheme or "http" - # Fix up the hostname, if this is not a data URL. - if url_parts[0] != "data" and not url_parts[1]: - url_parts[1] = base_parts.netloc - # If the path does not start with a /, nest it under the base path's last - # directory. - if not url_parts[2].startswith("/"): - url_parts[2] = re.sub(r"/[^/]+$", "/", base_parts.path) + url_parts[2] - return urlparse.urlunparse(url_parts) - - def summarize_paragraphs( text_nodes: Iterable[str], min_size: int = 200, max_size: int = 500 ) -> Optional[str]: diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 14ea88b240..d47af8ead6 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -22,7 +22,7 @@ import shutil import sys import traceback from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple -from urllib import parse as urlparse +from urllib.parse import urljoin, urlparse, urlsplit from urllib.request import urlopen import attr @@ -44,11 +44,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.media.v1._base import get_filename_from_headers from synapse.rest.media.v1.media_storage import MediaStorage from synapse.rest.media.v1.oembed import OEmbedProvider -from synapse.rest.media.v1.preview_html import ( - decode_body, - parse_html_to_open_graph, - rebase_url, -) +from synapse.rest.media.v1.preview_html import decode_body, parse_html_to_open_graph from synapse.types import JsonDict, UserID from synapse.util import json_encoder from synapse.util.async_helpers import ObservableDeferred @@ -187,7 +183,7 @@ class PreviewUrlResource(DirectServeJsonResource): ts = self.clock.time_msec() # XXX: we could move this into _do_preview if we wanted. - url_tuple = urlparse.urlsplit(url) + url_tuple = urlsplit(url) for entry in self.url_preview_url_blacklist: match = True for attrib in entry: @@ -322,7 +318,7 @@ class PreviewUrlResource(DirectServeJsonResource): # Parse Open Graph information from the HTML in case the oEmbed # response failed or is incomplete. - og_from_html = parse_html_to_open_graph(tree, media_info.uri) + og_from_html = parse_html_to_open_graph(tree) # Compile the Open Graph response by using the scraped # information from the HTML and overlaying any information @@ -588,12 +584,17 @@ class PreviewUrlResource(DirectServeJsonResource): if "og:image" not in og or not og["og:image"]: return + # The image URL from the HTML might be relative to the previewed page, + # convert it to an URL which can be requested directly. + image_url = og["og:image"] + url_parts = urlparse(image_url) + if url_parts.scheme != "data": + image_url = urljoin(media_info.uri, image_url) + # FIXME: it might be cleaner to use the same flow as the main /preview_url # request itself and benefit from the same caching etc. But for now we # just rely on the caching on the master request to speed things up. - image_info = await self._handle_url( - rebase_url(og["og:image"], media_info.uri), user, allow_data_urls=True - ) + image_info = await self._handle_url(image_url, user, allow_data_urls=True) if _is_media(image_info.media_type): # TODO: make sure we don't choke on white-on-transparent images diff --git a/tests/rest/media/v1/test_html_preview.py b/tests/rest/media/v1/test_html_preview.py index 3fb37a2a59..62e308814d 100644 --- a/tests/rest/media/v1/test_html_preview.py +++ b/tests/rest/media/v1/test_html_preview.py @@ -16,7 +16,6 @@ from synapse.rest.media.v1.preview_html import ( _get_html_media_encodings, decode_body, parse_html_to_open_graph, - rebase_url, summarize_paragraphs, ) @@ -161,7 +160,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -177,7 +176,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -196,7 +195,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual( og, @@ -218,7 +217,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -232,7 +231,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) @@ -247,7 +246,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Title", "og:description": "Some text."}) @@ -262,7 +261,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) @@ -290,7 +289,7 @@ class CalcOgTestCase(unittest.TestCase): FooSome text. """.strip() tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) def test_invalid_encoding(self) -> None: @@ -304,7 +303,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html", "invalid-encoding") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) def test_invalid_encoding2(self) -> None: @@ -319,7 +318,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "ÿÿ Foo", "og:description": "Some text."}) def test_windows_1252(self) -> None: @@ -333,7 +332,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "ó", "og:description": "Some text."}) @@ -448,34 +447,3 @@ class MediaEncodingTestCase(unittest.TestCase): 'text/html; charset="invalid"', ) self.assertEqual(list(encodings), ["utf-8", "cp1252"]) - - -class RebaseUrlTestCase(unittest.TestCase): - def test_relative(self) -> None: - """Relative URLs should be resolved based on the context of the base URL.""" - self.assertEqual( - rebase_url("subpage", "https://example.com/foo/"), - "https://example.com/foo/subpage", - ) - self.assertEqual( - rebase_url("sibling", "https://example.com/foo"), - "https://example.com/sibling", - ) - self.assertEqual( - rebase_url("/bar", "https://example.com/foo/"), - "https://example.com/bar", - ) - - def test_absolute(self) -> None: - """Absolute URLs should not be modified.""" - self.assertEqual( - rebase_url("https://alice.com/a/", "https://example.com/foo/"), - "https://alice.com/a/", - ) - - def test_data(self) -> None: - """Data URLs should not be modified.""" - self.assertEqual( - rebase_url("data:,Hello%2C%20World%21", "https://example.com/foo/"), - "data:,Hello%2C%20World%21", - ) -- cgit 1.4.1 From 1da0f79d5455b594f2aa989106a672786f5b990f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 09:20:57 -0400 Subject: Refactor relations tests (#12232) * Moves the relation pagination tests to a separate class. * Move the assertion of the response code into the `_send_relation` helper. * Moves some helpers into the base-class. --- changelog.d/12232.misc | 1 + tests/rest/client/test_relations.py | 1389 +++++++++++++++++------------------ 2 files changed, 674 insertions(+), 716 deletions(-) create mode 100644 changelog.d/12232.misc diff --git a/changelog.d/12232.misc b/changelog.d/12232.misc new file mode 100644 index 0000000000..4a4132edff --- /dev/null +++ b/changelog.d/12232.misc @@ -0,0 +1 @@ +Refactor relations tests to improve code re-use. diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 0cbe6c0cf7..3dbd1304a8 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -79,6 +79,7 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): content: Optional[dict] = None, access_token: Optional[str] = None, parent_id: Optional[str] = None, + expected_response_code: int = 200, ) -> FakeChannel: """Helper function to send a relation pointing at `self.parent_id` @@ -115,16 +116,50 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): content, access_token=access_token, ) + self.assertEqual(expected_response_code, channel.code, channel.json_body) return channel + def _get_related_events(self) -> List[str]: + """ + Requests /relations on the parent ID and returns a list of event IDs. + """ + # Request the relations of the event. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + return [ev["event_id"] for ev in channel.json_body["chunk"]] + + def _get_bundled_aggregations(self) -> JsonDict: + """ + Requests /event on the parent ID and returns the m.relations field (from unsigned), if it exists. + """ + # Fetch the bundled aggregations of the event. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + return channel.json_body["unsigned"].get("m.relations", {}) + + def _get_aggregations(self) -> List[JsonDict]: + """Request /aggregations on the parent ID and includes the returned chunk.""" + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + return channel.json_body["chunk"] + class RelationsTestCase(BaseRelationsTestCase): def test_send_relation(self) -> None: """Tests that sending a relation works.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") - self.assertEqual(200, channel.code, channel.json_body) - event_id = channel.json_body["event_id"] channel = self.make_request( @@ -151,13 +186,13 @@ class RelationsTestCase(BaseRelationsTestCase): def test_deny_invalid_event(self) -> None: """Test that we deny relations on non-existant events""" - channel = self._send_relation( + self._send_relation( RelationTypes.ANNOTATION, EventTypes.Message, parent_id="foo", content={"body": "foo", "msgtype": "m.text"}, + expected_response_code=400, ) - self.assertEqual(400, channel.code, channel.json_body) # Unless that event is referenced from another event! self.get_success( @@ -171,13 +206,12 @@ class RelationsTestCase(BaseRelationsTestCase): desc="test_deny_invalid_event", ) ) - channel = self._send_relation( + self._send_relation( RelationTypes.THREAD, EventTypes.Message, parent_id="foo", content={"body": "foo", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) def test_deny_invalid_room(self) -> None: """Test that we deny relations on non-existant events""" @@ -187,18 +221,20 @@ class RelationsTestCase(BaseRelationsTestCase): parent_id = res["event_id"] # Attempt to send an annotation to that event. - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", parent_id=parent_id, key="A" + self._send_relation( + RelationTypes.ANNOTATION, + "m.reaction", + parent_id=parent_id, + key="A", + expected_response_code=400, ) - self.assertEqual(400, channel.code, channel.json_body) def test_deny_double_react(self) -> None: """Test that we deny relations on membership events""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(400, channel.code, channel.json_body) + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", expected_response_code=400 + ) def test_deny_forked_thread(self) -> None: """It is invalid to start a thread off a thread.""" @@ -208,461 +244,160 @@ class RelationsTestCase(BaseRelationsTestCase): content={"msgtype": "m.text", "body": "foo"}, parent_id=self.parent_id, ) - self.assertEqual(200, channel.code, channel.json_body) parent_id = channel.json_body["event_id"] - channel = self._send_relation( + self._send_relation( RelationTypes.THREAD, "m.room.message", content={"msgtype": "m.text", "body": "foo"}, parent_id=parent_id, + expected_response_code=400, ) - self.assertEqual(400, channel.code, channel.json_body) - def test_basic_paginate_relations(self) -> None: - """Tests that calling pagination API correctly the latest relations.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) - first_annotation_id = channel.json_body["event_id"] + def test_aggregation(self) -> None: + """Test that annotations get correctly aggregated.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEqual(200, channel.code, channel.json_body) - second_annotation_id = channel.json_body["event_id"] + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") channel = self.make_request( "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - # We expect to get back a single pagination result, which is the latest - # full relation event we sent above. - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - self.assert_dict( + self.assertEqual( + channel.json_body, { - "event_id": second_annotation_id, - "sender": self.user_id, - "type": "m.reaction", + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] }, - channel.json_body["chunk"][0], - ) - - # We also expect to get the original event (the id of which is self.parent_id) - self.assertEqual( - channel.json_body["original_event"]["event_id"], self.parent_id ) - # Make sure next_batch has something in it that looks like it could be a - # valid token. - self.assertIsInstance( - channel.json_body.get("next_batch"), str, channel.json_body - ) + def test_aggregation_must_be_annotation(self) -> None: + """Test that aggregations must be annotations.""" - # Request the relations again, but with a different direction. channel = self.make_request( "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations" - f"/{self.parent_id}?limit=1&org.matrix.msc3715.dir=f", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations" + f"/{self.parent_id}/{RelationTypes.REPLACE}?limit=1", access_token=self.user_token, ) - self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual(400, channel.code, channel.json_body) - # We expect to get back a single pagination result, which is the earliest - # full relation event we sent above. - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - self.assert_dict( - { - "event_id": first_annotation_id, - "sender": self.user_id, - "type": "m.reaction", - }, - channel.json_body["chunk"][0], - ) + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) + def test_bundled_aggregations(self) -> None: + """ + Test that annotations, references, and threads get correctly bundled. - def test_repeated_paginate_relations(self) -> None: - """Test that if we paginate using a limit and tokens then we get the - expected events. + Note that this doesn't test against /relations since only thread relations + get bundled via that API. See test_aggregation_get_event_for_thread. + + See test_edit for a similar test for edits. """ + # Setup by sending a variety of relations. + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - expected_event_ids = [] - for idx in range(10): - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", chr(ord("a") + idx) - ) - self.assertEqual(200, channel.code, channel.json_body) - expected_event_ids.append(channel.json_body["event_id"]) + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") + reply_1 = channel.json_body["event_id"] - prev_token = "" - found_event_ids: List[str] = [] - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + prev_token + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") + reply_2 = channel.json_body["event_id"] - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) + self._send_relation(RelationTypes.THREAD, "m.room.test") - found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - next_batch = channel.json_body.get("next_batch") + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_2 = channel.json_body["event_id"] - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch + def assert_bundle(event_json: JsonDict) -> None: + """Assert the expected values of the bundled aggregations.""" + relations_dict = event_json["unsigned"].get("m.relations") - if not prev_token: - break + # Ensure the fields are as expected. + self.assertCountEqual( + relations_dict.keys(), + ( + RelationTypes.ANNOTATION, + RelationTypes.REFERENCE, + RelationTypes.THREAD, + ), + ) - # We paginated backwards, so reverse - found_event_ids.reverse() - self.assertEqual(found_event_ids, expected_event_ids) + # Check the values of each field. + self.assertEqual( + { + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] + }, + relations_dict[RelationTypes.ANNOTATION], + ) - def test_pagination_from_sync_and_messages(self) -> None: - """Pagination tokens from /sync and /messages can be used to paginate /relations.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") - self.assertEqual(200, channel.code, channel.json_body) - annotation_id = channel.json_body["event_id"] - # Send an event after the relation events. - self.helper.send(self.room, body="Latest event", tok=self.user_token) + self.assertEqual( + {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, + relations_dict[RelationTypes.REFERENCE], + ) - # Request /sync, limiting it such that only the latest event is returned - # (and not the relation). - filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 1}}}') + self.assertEqual( + 2, + relations_dict[RelationTypes.THREAD].get("count"), + ) + self.assertTrue( + relations_dict[RelationTypes.THREAD].get("current_user_participated") + ) + # The latest thread event has some fields that don't matter. + self.assert_dict( + { + "content": { + "m.relates_to": { + "event_id": self.parent_id, + "rel_type": RelationTypes.THREAD, + } + }, + "event_id": thread_2, + "sender": self.user_id, + "type": "m.room.test", + }, + relations_dict[RelationTypes.THREAD].get("latest_event"), + ) + + # Request the event directly. channel = self.make_request( - "GET", f"/sync?filter={filter}", access_token=self.user_token + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] - sync_prev_batch = room_timeline["prev_batch"] - self.assertIsNotNone(sync_prev_batch) - # Ensure the relation event is not in the batch returned from /sync. - self.assertNotIn( - annotation_id, [ev["event_id"] for ev in room_timeline["events"]] - ) + assert_bundle(channel.json_body) - # Request /messages, limiting it such that only the latest event is - # returned (and not the relation). + # Request the room messages. channel = self.make_request( "GET", - f"/rooms/{self.room}/messages?dir=b&limit=1", + f"/rooms/{self.room}/messages?dir=b", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - messages_end = channel.json_body["end"] - self.assertIsNotNone(messages_end) - # Ensure the relation event is not in the chunk returned from /messages. - self.assertNotIn( - annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] + assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) + + # Request the room context. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/context/{self.parent_id}", + access_token=self.user_token, ) - - # Request /relations with the pagination tokens received from both the - # /sync and /messages responses above, in turn. - # - # This is a tiny bit silly since the client wouldn't know the parent ID - # from the requests above; consider the parent ID to be known from a - # previous /sync. - for from_token in (sync_prev_batch, messages_end): - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - # The relation should be in the returned chunk. - self.assertIn( - annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] - ) - - def test_aggregation_pagination_groups(self) -> None: - """Test that we can paginate annotation groups correctly.""" - - # We need to create ten separate users to send each reaction. - access_tokens = [self.user_token, self.user2_token] - idx = 0 - while len(access_tokens) < 10: - user_id, token = self._create_user("test" + str(idx)) - idx += 1 - - self.helper.join(self.room, user=user_id, tok=token) - access_tokens.append(token) - - idx = 0 - sent_groups = {"👍": 10, "a": 7, "b": 5, "c": 3, "d": 2, "e": 1} - for key in itertools.chain.from_iterable( - itertools.repeat(key, num) for key, num in sent_groups.items() - ): - channel = self._send_relation( - RelationTypes.ANNOTATION, - "m.reaction", - key=key, - access_token=access_tokens[idx], - ) - self.assertEqual(200, channel.code, channel.json_body) - - idx += 1 - idx %= len(access_tokens) - - prev_token: Optional[str] = None - found_groups: Dict[str, int] = {} - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + prev_token - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - - for groups in channel.json_body["chunk"]: - # We only expect reactions - self.assertEqual(groups["type"], "m.reaction", channel.json_body) - - # We should only see each key once - self.assertNotIn(groups["key"], found_groups, channel.json_body) - - found_groups[groups["key"]] = groups["count"] - - next_batch = channel.json_body.get("next_batch") - - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch - - if not prev_token: - break - - self.assertEqual(sent_groups, found_groups) - - def test_aggregation_pagination_within_group(self) -> None: - """Test that we can paginate within an annotation group.""" - - # We need to create ten separate users to send each reaction. - access_tokens = [self.user_token, self.user2_token] - idx = 0 - while len(access_tokens) < 10: - user_id, token = self._create_user("test" + str(idx)) - idx += 1 - - self.helper.join(self.room, user=user_id, tok=token) - access_tokens.append(token) - - idx = 0 - expected_event_ids = [] - for _ in range(10): - channel = self._send_relation( - RelationTypes.ANNOTATION, - "m.reaction", - key="👍", - access_token=access_tokens[idx], - ) - self.assertEqual(200, channel.code, channel.json_body) - expected_event_ids.append(channel.json_body["event_id"]) - - idx += 1 - - # Also send a different type of reaction so that we test we don't see it - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - self.assertEqual(200, channel.code, channel.json_body) - - prev_token = "" - found_event_ids: List[str] = [] - encoded_key = urllib.parse.quote_plus("👍".encode()) - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + prev_token - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}" - f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}" - f"/m.reaction/{encoded_key}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - - found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - - next_batch = channel.json_body.get("next_batch") - - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch - - if not prev_token: - break - - # We paginated backwards, so reverse - found_event_ids.reverse() - self.assertEqual(found_event_ids, expected_event_ids) - - def test_aggregation(self) -> None: - """Test that annotations get correctly aggregated.""" - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual( - channel.json_body, - { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - ] - }, - ) - - def test_aggregation_must_be_annotation(self) -> None: - """Test that aggregations must be annotations.""" - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations" - f"/{self.parent_id}/{RelationTypes.REPLACE}?limit=1", - access_token=self.user_token, - ) - self.assertEqual(400, channel.code, channel.json_body) - - @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) - def test_bundled_aggregations(self) -> None: - """ - Test that annotations, references, and threads get correctly bundled. - - Note that this doesn't test against /relations since only thread relations - get bundled via that API. See test_aggregation_get_event_for_thread. - - See test_edit for a similar test for edits. - """ - # Setup by sending a variety of relations. - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - reply_1 = channel.json_body["event_id"] - - channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - reply_2 = channel.json_body["event_id"] - - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - thread_2 = channel.json_body["event_id"] - - def assert_bundle(event_json: JsonDict) -> None: - """Assert the expected values of the bundled aggregations.""" - relations_dict = event_json["unsigned"].get("m.relations") - - # Ensure the fields are as expected. - self.assertCountEqual( - relations_dict.keys(), - ( - RelationTypes.ANNOTATION, - RelationTypes.REFERENCE, - RelationTypes.THREAD, - ), - ) - - # Check the values of each field. - self.assertEqual( - { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - ] - }, - relations_dict[RelationTypes.ANNOTATION], - ) - - self.assertEqual( - {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, - relations_dict[RelationTypes.REFERENCE], - ) - - self.assertEqual( - 2, - relations_dict[RelationTypes.THREAD].get("count"), - ) - self.assertTrue( - relations_dict[RelationTypes.THREAD].get("current_user_participated") - ) - # The latest thread event has some fields that don't matter. - self.assert_dict( - { - "content": { - "m.relates_to": { - "event_id": self.parent_id, - "rel_type": RelationTypes.THREAD, - } - }, - "event_id": thread_2, - "sender": self.user_id, - "type": "m.room.test", - }, - relations_dict[RelationTypes.THREAD].get("latest_event"), - ) - - # Request the event directly. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(channel.json_body) - - # Request the room messages. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/messages?dir=b", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) - - # Request the room context. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/context/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(channel.json_body["event"]) + self.assertEqual(200, channel.code, channel.json_body) + assert_bundle(channel.json_body["event"]) # Request sync. channel = self.make_request("GET", "/sync", access_token=self.user_token) @@ -693,14 +428,12 @@ class RelationsTestCase(BaseRelationsTestCase): when directly requested. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) annotation_id = channel.json_body["event_id"] # Annotate the annotation. - channel = self._send_relation( + self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=annotation_id ) - self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", @@ -713,14 +446,12 @@ class RelationsTestCase(BaseRelationsTestCase): def test_aggregation_get_event_for_thread(self) -> None: """Test that threads get bundled aggregations included when directly requested.""" channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) thread_id = channel.json_body["event_id"] # Annotate the annotation. - channel = self._send_relation( + self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id ) - self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", @@ -877,8 +608,6 @@ class RelationsTestCase(BaseRelationsTestCase): "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, ) - self.assertEqual(200, channel.code, channel.json_body) - edit_event_id = channel.json_body["event_id"] def assert_bundle(event_json: JsonDict) -> None: @@ -954,7 +683,7 @@ class RelationsTestCase(BaseRelationsTestCase): shouldn't be allowed, are correctly handled. """ - channel = self._send_relation( + self._send_relation( RelationTypes.REPLACE, "m.room.message", content={ @@ -963,309 +692,575 @@ class RelationsTestCase(BaseRelationsTestCase): "m.new_content": {"msgtype": "m.text", "body": "First edit"}, }, ) + + new_body = {"msgtype": "m.text", "body": "I've been edited!"} + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + ) + edit_event_id = channel.json_body["event_id"] + + self._send_relation( + RelationTypes.REPLACE, + "m.room.message.WRONG_TYPE", + content={ + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": {"msgtype": "m.text", "body": "Edit, but wrong type"}, + }, + ) + + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + self.assertEqual(channel.json_body["content"], new_body) + + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + ) + + def test_edit_reply(self) -> None: + """Test that editing a reply works.""" + + # Create a reply to edit. + channel = self._send_relation( + RelationTypes.REFERENCE, + "m.room.message", + content={"msgtype": "m.text", "body": "A reply!"}, + ) + reply = channel.json_body["event_id"] + + new_body = {"msgtype": "m.text", "body": "I've been edited!"} + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + parent_id=reply, + ) + edit_event_id = channel.json_body["event_id"] + + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{reply}", + access_token=self.user_token, + ) self.assertEqual(200, channel.code, channel.json_body) + # We expect to see the new body in the dict, as well as the reference + # metadata sill intact. + self.assertDictContainsSubset(new_body, channel.json_body["content"]) + self.assertDictContainsSubset( + { + "m.relates_to": { + "event_id": self.parent_id, + "rel_type": "m.reference", + } + }, + channel.json_body["content"], + ) + + # We expect that the edit relation appears in the unsigned relations + # section. + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + ) + + def test_edit_thread(self) -> None: + """Test that editing a thread works.""" + + # Create a thread and edit the last event. + channel = self._send_relation( + RelationTypes.THREAD, + "m.room.message", + content={"msgtype": "m.text", "body": "A threaded reply!"}, + ) + threaded_event_id = channel.json_body["event_id"] + new_body = {"msgtype": "m.text", "body": "I've been edited!"} channel = self._send_relation( RelationTypes.REPLACE, "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + parent_id=threaded_event_id, + ) + + # Fetch the thread root, to get the bundled aggregation for the thread. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # We expect that the edit message appears in the thread summary in the + # unsigned relations section. + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.THREAD, relations_dict) + + thread_summary = relations_dict[RelationTypes.THREAD] + self.assertIn("latest_event", thread_summary) + latest_event_in_thread = thread_summary["latest_event"] + self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!") + + def test_edit_edit(self) -> None: + """Test that an edit cannot be edited.""" + new_body = {"msgtype": "m.text", "body": "Initial edit"} + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={ + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": new_body, + }, + ) + edit_event_id = channel.json_body["event_id"] + + # Edit the edit event. + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={ + "msgtype": "m.text", + "body": "foo", + "m.new_content": {"msgtype": "m.text", "body": "Ignored edit"}, + }, + parent_id=edit_event_id, + ) + + # Request the original event. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + # The edit to the edit should be ignored. + self.assertEqual(channel.json_body["content"], new_body) + + # The relations information should not include the edit to the edit. + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + ) + + def test_unknown_relations(self) -> None: + """Unknown relations should be accepted.""" + channel = self._send_relation("m.relation.test", "m.room.test") + event_id = channel.json_body["event_id"] + + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # We expect to get back a single pagination result, which is the full + # relation event we sent above. + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assert_dict( + {"event_id": event_id, "sender": self.user_id, "type": "m.room.test"}, + channel.json_body["chunk"][0], + ) + + # We also expect to get the original event (the id of which is self.parent_id) + self.assertEqual( + channel.json_body["original_event"]["event_id"], self.parent_id + ) + + # When bundling the unknown relation is not included. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertNotIn("m.relations", channel.json_body["unsigned"]) + + # But unknown relations can be directly queried. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual(channel.json_body["chunk"], []) + + def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: + """ + Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. + """ + for event in events: + if event["event_id"] == self.parent_id: + return event + + raise AssertionError(f"Event {self.parent_id} not found in chunk") + + def test_background_update(self) -> None: + """Test the event_arbitrary_relations background update.""" + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") + annotation_event_id_good = channel.json_body["event_id"] + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="A") + annotation_event_id_bad = channel.json_body["event_id"] + + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_event_id = channel.json_body["event_id"] + + # Clean-up the table as if the inserts did not happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="event_relations", + column="event_id", + iterable=(annotation_event_id_bad, thread_event_id), + keyvalues={}, + desc="RelationsTestCase.test_background_update", + ) + ) + + # Only the "good" annotation should be found. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", + access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual( + [ev["event_id"] for ev in channel.json_body["chunk"]], + [annotation_event_id_good], + ) - edit_event_id = channel.json_body["event_id"] - - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message.WRONG_TYPE", - content={ - "msgtype": "m.text", - "body": "Wibble", - "m.new_content": {"msgtype": "m.text", "body": "Edit, but wrong type"}, - }, + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + {"update_name": "event_arbitrary_relations", "progress_json": "{}"}, + ) ) - self.assertEqual(200, channel.code, channel.json_body) + # Ugh, have to reset this flag + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # The "good" annotation and the thread should be found, but not the "bad" + # annotation. channel = self.make_request( "GET", - f"/rooms/{self.room}/event/{self.parent_id}", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) + self.assertCountEqual( + [ev["event_id"] for ev in channel.json_body["chunk"]], + [annotation_event_id_good, thread_event_id], + ) - self.assertEqual(channel.json_body["content"], new_body) - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) +class RelationPaginationTestCase(BaseRelationsTestCase): + def test_basic_paginate_relations(self) -> None: + """Tests that calling pagination API correctly the latest relations.""" + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + first_annotation_id = channel.json_body["event_id"] - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") + second_annotation_id = channel.json_body["event_id"] - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", + access_token=self.user_token, ) + self.assertEqual(200, channel.code, channel.json_body) - def test_edit_reply(self) -> None: - """Test that editing a reply works.""" - - # Create a reply to edit. - channel = self._send_relation( - RelationTypes.REFERENCE, - "m.room.message", - content={"msgtype": "m.text", "body": "A reply!"}, + # We expect to get back a single pagination result, which is the latest + # full relation event we sent above. + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assert_dict( + { + "event_id": second_annotation_id, + "sender": self.user_id, + "type": "m.reaction", + }, + channel.json_body["chunk"][0], ) - self.assertEqual(200, channel.code, channel.json_body) - reply = channel.json_body["event_id"] - new_body = {"msgtype": "m.text", "body": "I've been edited!"} - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, - parent_id=reply, + # We also expect to get the original event (the id of which is self.parent_id) + self.assertEqual( + channel.json_body["original_event"]["event_id"], self.parent_id ) - self.assertEqual(200, channel.code, channel.json_body) - edit_event_id = channel.json_body["event_id"] + # Make sure next_batch has something in it that looks like it could be a + # valid token. + self.assertIsInstance( + channel.json_body.get("next_batch"), str, channel.json_body + ) + # Request the relations again, but with a different direction. channel = self.make_request( "GET", - f"/rooms/{self.room}/event/{reply}", + f"/_matrix/client/unstable/rooms/{self.room}/relations" + f"/{self.parent_id}?limit=1&org.matrix.msc3715.dir=f", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - # We expect to see the new body in the dict, as well as the reference - # metadata sill intact. - self.assertDictContainsSubset(new_body, channel.json_body["content"]) - self.assertDictContainsSubset( + # We expect to get back a single pagination result, which is the earliest + # full relation event we sent above. + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assert_dict( { - "m.relates_to": { - "event_id": self.parent_id, - "rel_type": "m.reference", - } + "event_id": first_annotation_id, + "sender": self.user_id, + "type": "m.reaction", }, - channel.json_body["content"], + channel.json_body["chunk"][0], ) - # We expect that the edit relation appears in the unsigned relations - # section. - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) + def test_repeated_paginate_relations(self) -> None: + """Test that if we paginate using a limit and tokens then we get the + expected events. + """ - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) + expected_event_ids = [] + for idx in range(10): + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", chr(ord("a") + idx) + ) + expected_event_ids.append(channel.json_body["event_id"]) - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict - ) + prev_token = "" + found_event_ids: List[str] = [] + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token - def test_edit_thread(self) -> None: - """Test that editing a thread works.""" + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) - # Create a thread and edit the last event. - channel = self._send_relation( - RelationTypes.THREAD, - "m.room.message", - content={"msgtype": "m.text", "body": "A threaded reply!"}, - ) - self.assertEqual(200, channel.code, channel.json_body) - threaded_event_id = channel.json_body["event_id"] + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) + next_batch = channel.json_body.get("next_batch") - new_body = {"msgtype": "m.text", "body": "I've been edited!"} - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, - parent_id=threaded_event_id, + self.assertNotEqual(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEqual(found_event_ids, expected_event_ids) + + def test_pagination_from_sync_and_messages(self) -> None: + """Pagination tokens from /sync and /messages can be used to paginate /relations.""" + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") + annotation_id = channel.json_body["event_id"] + # Send an event after the relation events. + self.helper.send(self.room, body="Latest event", tok=self.user_token) + + # Request /sync, limiting it such that only the latest event is returned + # (and not the relation). + filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 1}}}') + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token ) self.assertEqual(200, channel.code, channel.json_body) + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + sync_prev_batch = room_timeline["prev_batch"] + self.assertIsNotNone(sync_prev_batch) + # Ensure the relation event is not in the batch returned from /sync. + self.assertNotIn( + annotation_id, [ev["event_id"] for ev in room_timeline["events"]] + ) - # Fetch the thread root, to get the bundled aggregation for the thread. + # Request /messages, limiting it such that only the latest event is + # returned (and not the relation). channel = self.make_request( "GET", - f"/rooms/{self.room}/event/{self.parent_id}", + f"/rooms/{self.room}/messages?dir=b&limit=1", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) + messages_end = channel.json_body["end"] + self.assertIsNotNone(messages_end) + # Ensure the relation event is not in the chunk returned from /messages. + self.assertNotIn( + annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] + ) - # We expect that the edit message appears in the thread summary in the - # unsigned relations section. - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.THREAD, relations_dict) + # Request /relations with the pagination tokens received from both the + # /sync and /messages responses above, in turn. + # + # This is a tiny bit silly since the client wouldn't know the parent ID + # from the requests above; consider the parent ID to be known from a + # previous /sync. + for from_token in (sync_prev_batch, messages_end): + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) - thread_summary = relations_dict[RelationTypes.THREAD] - self.assertIn("latest_event", thread_summary) - latest_event_in_thread = thread_summary["latest_event"] - self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!") + # The relation should be in the returned chunk. + self.assertIn( + annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] + ) + + def test_aggregation_pagination_groups(self) -> None: + """Test that we can paginate annotation groups correctly.""" + + # We need to create ten separate users to send each reaction. + access_tokens = [self.user_token, self.user2_token] + idx = 0 + while len(access_tokens) < 10: + user_id, token = self._create_user("test" + str(idx)) + idx += 1 + + self.helper.join(self.room, user=user_id, tok=token) + access_tokens.append(token) + + idx = 0 + sent_groups = {"👍": 10, "a": 7, "b": 5, "c": 3, "d": 2, "e": 1} + for key in itertools.chain.from_iterable( + itertools.repeat(key, num) for key, num in sent_groups.items() + ): + self._send_relation( + RelationTypes.ANNOTATION, + "m.reaction", + key=key, + access_token=access_tokens[idx], + ) + + idx += 1 + idx %= len(access_tokens) - def test_edit_edit(self) -> None: - """Test that an edit cannot be edited.""" - new_body = {"msgtype": "m.text", "body": "Initial edit"} - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={ - "msgtype": "m.text", - "body": "Wibble", - "m.new_content": new_body, - }, - ) - self.assertEqual(200, channel.code, channel.json_body) - edit_event_id = channel.json_body["event_id"] + prev_token: Optional[str] = None + found_groups: Dict[str, int] = {} + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token - # Edit the edit event. - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={ - "msgtype": "m.text", - "body": "foo", - "m.new_content": {"msgtype": "m.text", "body": "Ignored edit"}, - }, - parent_id=edit_event_id, - ) - self.assertEqual(200, channel.code, channel.json_body) + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1{from_token}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) - # Request the original event. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - # The edit to the edit should be ignored. - self.assertEqual(channel.json_body["content"], new_body) + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - # The relations information should not include the edit to the edit. - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) + for groups in channel.json_body["chunk"]: + # We only expect reactions + self.assertEqual(groups["type"], "m.reaction", channel.json_body) - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) + # We should only see each key once + self.assertNotIn(groups["key"], found_groups, channel.json_body) - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict - ) + found_groups[groups["key"]] = groups["count"] - def test_unknown_relations(self) -> None: - """Unknown relations should be accepted.""" - channel = self._send_relation("m.relation.test", "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - event_id = channel.json_body["event_id"] + next_batch = channel.json_body.get("next_batch") - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) + self.assertNotEqual(prev_token, next_batch) + prev_token = next_batch - # We expect to get back a single pagination result, which is the full - # relation event we sent above. - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - self.assert_dict( - {"event_id": event_id, "sender": self.user_id, "type": "m.room.test"}, - channel.json_body["chunk"][0], - ) + if not prev_token: + break - # We also expect to get the original event (the id of which is self.parent_id) - self.assertEqual( - channel.json_body["original_event"]["event_id"], self.parent_id - ) + self.assertEqual(sent_groups, found_groups) - # When bundling the unknown relation is not included. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertNotIn("m.relations", channel.json_body["unsigned"]) + def test_aggregation_pagination_within_group(self) -> None: + """Test that we can paginate within an annotation group.""" - # But unknown relations can be directly queried. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual(channel.json_body["chunk"], []) + # We need to create ten separate users to send each reaction. + access_tokens = [self.user_token, self.user2_token] + idx = 0 + while len(access_tokens) < 10: + user_id, token = self._create_user("test" + str(idx)) + idx += 1 - def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: - """ - Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. - """ - for event in events: - if event["event_id"] == self.parent_id: - return event + self.helper.join(self.room, user=user_id, tok=token) + access_tokens.append(token) - raise AssertionError(f"Event {self.parent_id} not found in chunk") + idx = 0 + expected_event_ids = [] + for _ in range(10): + channel = self._send_relation( + RelationTypes.ANNOTATION, + "m.reaction", + key="👍", + access_token=access_tokens[idx], + ) + expected_event_ids.append(channel.json_body["event_id"]) - def test_background_update(self) -> None: - """Test the event_arbitrary_relations background update.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") - self.assertEqual(200, channel.code, channel.json_body) - annotation_event_id_good = channel.json_body["event_id"] + idx += 1 - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="A") - self.assertEqual(200, channel.code, channel.json_body) - annotation_event_id_bad = channel.json_body["event_id"] + # Also send a different type of reaction so that we test we don't see it + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - thread_event_id = channel.json_body["event_id"] + prev_token = "" + found_event_ids: List[str] = [] + encoded_key = urllib.parse.quote_plus("👍".encode()) + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token - # Clean-up the table as if the inserts did not happen during event creation. - self.get_success( - self.store.db_pool.simple_delete_many( - table="event_relations", - column="event_id", - iterable=(annotation_event_id_bad, thread_event_id), - keyvalues={}, - desc="RelationsTestCase.test_background_update", + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}" + f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}" + f"/m.reaction/{encoded_key}?limit=1{from_token}", + access_token=self.user_token, ) - ) + self.assertEqual(200, channel.code, channel.json_body) - # Only the "good" annotation should be found. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual( - [ev["event_id"] for ev in channel.json_body["chunk"]], - [annotation_event_id_good], - ) + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - # Insert and run the background update. - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - {"update_name": "event_arbitrary_relations", "progress_json": "{}"}, - ) - ) + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - # Ugh, have to reset this flag - self.store.db_pool.updates._all_done = False - self.wait_for_background_updates() + next_batch = channel.json_body.get("next_batch") - # The "good" annotation and the thread should be found, but not the "bad" - # annotation. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertCountEqual( - [ev["event_id"] for ev in channel.json_body["chunk"]], - [annotation_event_id_good, thread_event_id], - ) + self.assertNotEqual(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEqual(found_event_ids, expected_event_ids) class RelationRedactionTestCase(BaseRelationsTestCase): @@ -1294,46 +1289,6 @@ class RelationRedactionTestCase(BaseRelationsTestCase): ) self.assertEqual(200, channel.code, channel.json_body) - def _make_relation_requests(self) -> Tuple[List[str], JsonDict]: - """ - Makes requests and ensures they result in a 200 response, returns a - tuple of results: - - 1. `/relations` -> Returns a list of event IDs. - 2. `/event` -> Returns the response's m.relations field (from unsigned), - if it exists. - """ - - # Request the relations of the event. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEquals(200, channel.code, channel.json_body) - event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]] - - # Fetch the bundled aggregations of the event. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEquals(200, channel.code, channel.json_body) - bundled_relations = channel.json_body["unsigned"].get("m.relations", {}) - - return event_ids, bundled_relations - - def _get_aggregations(self) -> List[JsonDict]: - """Request /aggregations on the parent ID and includes the returned chunk.""" - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - return channel.json_body["chunk"] - def test_redact_relation_annotation(self) -> None: """ Test that annotations of an event are properly handled after the @@ -1343,17 +1298,16 @@ class RelationRedactionTestCase(BaseRelationsTestCase): the response to relations. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) to_redact_event_id = channel.json_body["event_id"] channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token ) - self.assertEqual(200, channel.code, channel.json_body) unredacted_event_id = channel.json_body["event_id"] # Both relations should exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id]) self.assertEquals( relations["m.annotation"], @@ -1368,7 +1322,8 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self._redact(to_redact_event_id) # The unredacted relation should still exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [unredacted_event_id]) self.assertEquals( relations["m.annotation"], @@ -1391,7 +1346,6 @@ class RelationRedactionTestCase(BaseRelationsTestCase): EventTypes.Message, content={"body": "reply 1", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) unredacted_event_id = channel.json_body["event_id"] # Note that the *last* event in the thread is redacted, as that gets @@ -1401,11 +1355,11 @@ class RelationRedactionTestCase(BaseRelationsTestCase): EventTypes.Message, content={"body": "reply 2", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) to_redact_event_id = channel.json_body["event_id"] # Both relations exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [to_redact_event_id, unredacted_event_id]) self.assertDictContainsSubset( { @@ -1424,7 +1378,8 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self._redact(to_redact_event_id) # The unredacted relation should still exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [unredacted_event_id]) self.assertDictContainsSubset( { @@ -1444,7 +1399,7 @@ class RelationRedactionTestCase(BaseRelationsTestCase): is redacted. """ # Add a relation - channel = self._send_relation( + self._send_relation( RelationTypes.REPLACE, "m.room.message", parent_id=self.parent_id, @@ -1454,10 +1409,10 @@ class RelationRedactionTestCase(BaseRelationsTestCase): "m.new_content": {"msgtype": "m.text", "body": "First edit"}, }, ) - self.assertEqual(200, channel.code, channel.json_body) # Check the relation is returned - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 1) self.assertIn(RelationTypes.REPLACE, relations) @@ -1465,7 +1420,8 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self._redact(self.parent_id) # The relations are not returned. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 0) self.assertEqual(relations, {}) @@ -1475,11 +1431,11 @@ class RelationRedactionTestCase(BaseRelationsTestCase): """ # Add a relation channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") - self.assertEqual(200, channel.code, channel.json_body) related_event_id = channel.json_body["event_id"] # The relations should exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 1) self.assertIn(RelationTypes.ANNOTATION, relations) @@ -1491,7 +1447,8 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self._redact(self.parent_id) # The relations are returned. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [related_event_id]) self.assertEquals( relations["m.annotation"], @@ -1512,14 +1469,14 @@ class RelationRedactionTestCase(BaseRelationsTestCase): EventTypes.Message, content={"body": "reply 1", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) related_event_id = channel.json_body["event_id"] # Redact one of the reactions. self._redact(self.parent_id) # The unredacted relation should still exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(len(event_ids), 1) self.assertDictContainsSubset( { -- cgit 1.4.1 From 86965605a4688d80dc0a74ed4993a52f282e970a Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 16 Mar 2022 13:52:59 +0000 Subject: Fix dead link in spam checker warning (#12231) --- changelog.d/12231.doc | 1 + synapse/config/spam_checker.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12231.doc diff --git a/changelog.d/12231.doc b/changelog.d/12231.doc new file mode 100644 index 0000000000..16593d2b92 --- /dev/null +++ b/changelog.d/12231.doc @@ -0,0 +1 @@ +Fix the link to the module documentation in the legacy spam checker warning message. diff --git a/synapse/config/spam_checker.py b/synapse/config/spam_checker.py index a233a9ce03..4c52103b1c 100644 --- a/synapse/config/spam_checker.py +++ b/synapse/config/spam_checker.py @@ -25,8 +25,8 @@ logger = logging.getLogger(__name__) LEGACY_SPAM_CHECKER_WARNING = """ This server is using a spam checker module that is implementing the deprecated spam checker interface. Please check with the module's maintainer to see if a new version -supporting Synapse's generic modules system is available. -For more information, please see https://matrix-org.github.io/synapse/latest/modules.html +supporting Synapse's generic modules system is available. For more information, please +see https://matrix-org.github.io/synapse/latest/modules/index.html ---------------------------------------------------------------------------------------""" -- cgit 1.4.1 From c486fa5fd9082643e40a55ffa59d902aa6db4c2b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 10:37:04 -0400 Subject: Add some missing type hints to cache datastore. (#12216) --- changelog.d/12216.misc | 1 + synapse/storage/databases/main/cache.py | 57 +++++++++++++++++++++------------ 2 files changed, 37 insertions(+), 21 deletions(-) create mode 100644 changelog.d/12216.misc diff --git a/changelog.d/12216.misc b/changelog.d/12216.misc new file mode 100644 index 0000000000..dc398ac1e0 --- /dev/null +++ b/changelog.d/12216.misc @@ -0,0 +1 @@ +Add missing type hints for cache storage. diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index d6a2df1afe..2d7511d613 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -23,6 +23,7 @@ from synapse.replication.tcp.streams.events import ( EventsStream, EventsStreamCurrentStateRow, EventsStreamEventRow, + EventsStreamRow, ) from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -31,6 +32,7 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.engines import PostgresEngine +from synapse.util.caches.descriptors import _CachedFunction from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -82,7 +84,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): if last_id == current_id: return [], current_id, False - def get_all_updated_caches_txn(txn): + def get_all_updated_caches_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Tuple[int, tuple]], int, bool]: # We purposefully don't bound by the current token, as we want to # send across cache invalidations as quickly as possible. Cache # invalidations are idempotent, so duplicates are fine. @@ -107,7 +111,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "get_all_updated_caches", get_all_updated_caches_txn ) - def process_replication_rows(self, stream_name, instance_name, token, rows): + def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] + ) -> None: if stream_name == EventsStream.NAME: for row in rows: self._process_event_stream_row(token, row) @@ -142,10 +148,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore): super().process_replication_rows(stream_name, instance_name, token, rows) - def _process_event_stream_row(self, token, row): + def _process_event_stream_row(self, token: int, row: EventsStreamRow) -> None: data = row.data if row.type == EventsStreamEventRow.TypeId: + assert isinstance(data, EventsStreamEventRow) self._invalidate_caches_for_event( token, data.event_id, @@ -157,9 +164,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): backfilled=False, ) elif row.type == EventsStreamCurrentStateRow.TypeId: - self._curr_state_delta_stream_cache.entity_has_changed( - row.data.room_id, token - ) + assert isinstance(data, EventsStreamCurrentStateRow) + self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) if data.type == EventTypes.Member: self.get_rooms_for_user_with_stream_ordering.invalidate( @@ -170,15 +176,15 @@ class CacheInvalidationWorkerStore(SQLBaseStore): def _invalidate_caches_for_event( self, - stream_ordering, - event_id, - room_id, - etype, - state_key, - redacts, - relates_to, - backfilled, - ): + stream_ordering: int, + event_id: str, + room_id: str, + etype: str, + state_key: Optional[str], + redacts: Optional[str], + relates_to: Optional[str], + backfilled: bool, + ) -> None: self._invalidate_get_event_cache(event_id) self.have_seen_event.invalidate((room_id, event_id)) @@ -207,7 +213,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self.get_thread_summary.invalidate((relates_to,)) self.get_thread_participated.invalidate((relates_to,)) - async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]): + async def invalidate_cache_and_stream( + self, cache_name: str, keys: Tuple[Any, ...] + ) -> None: """Invalidates the cache and adds it to the cache stream so slaves will know to invalidate their caches. @@ -227,7 +235,12 @@ class CacheInvalidationWorkerStore(SQLBaseStore): keys, ) - def _invalidate_cache_and_stream(self, txn, cache_func, keys): + def _invalidate_cache_and_stream( + self, + txn: LoggingTransaction, + cache_func: _CachedFunction, + keys: Tuple[Any, ...], + ) -> None: """Invalidates the cache and adds it to the cache stream so slaves will know to invalidate their caches. @@ -238,7 +251,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): txn.call_after(cache_func.invalidate, keys) self._send_invalidation_to_replication(txn, cache_func.__name__, keys) - def _invalidate_all_cache_and_stream(self, txn, cache_func): + def _invalidate_all_cache_and_stream( + self, txn: LoggingTransaction, cache_func: _CachedFunction + ) -> None: """Invalidates the entire cache and adds it to the cache stream so slaves will know to invalidate their caches. """ @@ -279,8 +294,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): ) def _send_invalidation_to_replication( - self, txn, cache_name: str, keys: Optional[Iterable[Any]] - ): + self, txn: LoggingTransaction, cache_name: str, keys: Optional[Iterable[Any]] + ) -> None: """Notifies replication that given cache has been invalidated. Note that this does *not* invalidate the cache locally. @@ -315,7 +330,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "instance_name": self._instance_name, "cache_func": cache_name, "keys": keys, - "invalidation_ts": self.clock.time_msec(), + "invalidation_ts": self._clock.time_msec(), }, ) -- cgit 1.4.1 From fc9bd620ce94b64af46737e25a524336967782a1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 10:39:15 -0400 Subject: Add a relations handler to avoid duplication. (#12227) Adds a handler layer between the REST and datastore layers for relations. --- changelog.d/12227.misc | 1 + synapse/handlers/pagination.py | 5 +- synapse/handlers/relations.py | 117 +++++++++++++++++++++++++++++++++++++++ synapse/rest/client/relations.py | 75 +++---------------------- synapse/server.py | 5 ++ 5 files changed, 134 insertions(+), 69 deletions(-) create mode 100644 changelog.d/12227.misc create mode 100644 synapse/handlers/relations.py diff --git a/changelog.d/12227.misc b/changelog.d/12227.misc new file mode 100644 index 0000000000..41c9dcbd37 --- /dev/null +++ b/changelog.d/12227.misc @@ -0,0 +1 @@ +Refactor the relations endpoints to add a `RelationsHandler`. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 60059fec3e..41679f7f86 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Set +from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set import attr @@ -422,7 +422,7 @@ class PaginationHandler: pagin_config: PaginationConfig, as_client_event: bool = True, event_filter: Optional[Filter] = None, - ) -> Dict[str, Any]: + ) -> JsonDict: """Get messages in a room. Args: @@ -431,6 +431,7 @@ class PaginationHandler: pagin_config: The pagination config rules to apply, if any. as_client_event: True to get events in client-server format. event_filter: Filter to apply to results or None + Returns: Pagination API results """ diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py new file mode 100644 index 0000000000..8e475475ad --- /dev/null +++ b/synapse/handlers/relations.py @@ -0,0 +1,117 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Optional + +from synapse.api.errors import SynapseError +from synapse.types import JsonDict, Requester, StreamToken + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +logger = logging.getLogger(__name__) + + +class RelationsHandler: + def __init__(self, hs: "HomeServer"): + self._main_store = hs.get_datastores().main + self._auth = hs.get_auth() + self._clock = hs.get_clock() + self._event_handler = hs.get_event_handler() + self._event_serializer = hs.get_event_client_serializer() + + async def get_relations( + self, + requester: Requester, + event_id: str, + room_id: str, + relation_type: Optional[str] = None, + event_type: Optional[str] = None, + aggregation_key: Optional[str] = None, + limit: int = 5, + direction: str = "b", + from_token: Optional[StreamToken] = None, + to_token: Optional[StreamToken] = None, + ) -> JsonDict: + """Get related events of a event, ordered by topological ordering. + + TODO Accept a PaginationConfig instead of individual pagination parameters. + + Args: + requester: The user requesting the relations. + event_id: Fetch events that relate to this event ID. + room_id: The room the event belongs to. + relation_type: Only fetch events with this relation type, if given. + event_type: Only fetch events with this event type, if given. + aggregation_key: Only fetch events with this aggregation key, if given. + limit: Only fetch the most recent `limit` events. + direction: Whether to fetch the most recent first (`"b"`) or the + oldest first (`"f"`). + from_token: Fetch rows from the given token, or from the start if None. + to_token: Fetch rows up to the given token, or up to the end if None. + + Returns: + The pagination chunk. + """ + + user_id = requester.user.to_string() + + await self._auth.check_user_in_room_or_world_readable( + room_id, user_id, allow_departed_users=True + ) + + # This gets the original event and checks that a) the event exists and + # b) the user is allowed to view it. + event = await self._event_handler.get_event(requester.user, room_id, event_id) + if event is None: + raise SynapseError(404, "Unknown parent event.") + + pagination_chunk = await self._main_store.get_relations_for_event( + event_id=event_id, + event=event, + room_id=room_id, + relation_type=relation_type, + event_type=event_type, + aggregation_key=aggregation_key, + limit=limit, + direction=direction, + from_token=from_token, + to_token=to_token, + ) + + events = await self._main_store.get_events_as_list( + [c["event_id"] for c in pagination_chunk.chunk] + ) + + now = self._clock.time_msec() + # Do not bundle aggregations when retrieving the original event because + # we want the content before relations are applied to it. + original_event = self._event_serializer.serialize_event( + event, now, bundle_aggregations=None + ) + # The relations returned for the requested event do include their + # bundled aggregations. + aggregations = await self._main_store.get_bundled_aggregations( + events, requester.user.to_string() + ) + serialized_events = self._event_serializer.serialize_events( + events, now, bundle_aggregations=aggregations + ) + + return_value = await pagination_chunk.to_dict(self._main_store) + return_value["chunk"] = serialized_events + return_value["original_event"] = original_event + + return return_value diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index d9a6be43f7..c16078b187 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -51,9 +51,7 @@ class RelationPaginationServlet(RestServlet): super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastores().main - self.clock = hs.get_clock() - self._event_serializer = hs.get_event_client_serializer() - self.event_handler = hs.get_event_handler() + self._relations_handler = hs.get_relations_handler() async def on_GET( self, @@ -65,16 +63,6 @@ class RelationPaginationServlet(RestServlet): ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) - await self.auth.check_user_in_room_or_world_readable( - room_id, requester.user.to_string(), allow_departed_users=True - ) - - # This gets the original event and checks that a) the event exists and - # b) the user is allowed to view it. - event = await self.event_handler.get_event(requester.user, room_id, parent_id) - if event is None: - raise SynapseError(404, "Unknown parent event.") - limit = parse_integer(request, "limit", default=5) direction = parse_string( request, "org.matrix.msc3715.dir", default="b", allowed_values=["f", "b"] @@ -90,9 +78,9 @@ class RelationPaginationServlet(RestServlet): if to_token_str: to_token = await StreamToken.from_string(self.store, to_token_str) - pagination_chunk = await self.store.get_relations_for_event( + result = await self._relations_handler.get_relations( + requester=requester, event_id=parent_id, - event=event, room_id=room_id, relation_type=relation_type, event_type=event_type, @@ -102,30 +90,7 @@ class RelationPaginationServlet(RestServlet): to_token=to_token, ) - events = await self.store.get_events_as_list( - [c["event_id"] for c in pagination_chunk.chunk] - ) - - now = self.clock.time_msec() - # Do not bundle aggregations when retrieving the original event because - # we want the content before relations are applied to it. - original_event = self._event_serializer.serialize_event( - event, now, bundle_aggregations=None - ) - # The relations returned for the requested event do include their - # bundled aggregations. - aggregations = await self.store.get_bundled_aggregations( - events, requester.user.to_string() - ) - serialized_events = self._event_serializer.serialize_events( - events, now, bundle_aggregations=aggregations - ) - - return_value = await pagination_chunk.to_dict(self.store) - return_value["chunk"] = serialized_events - return_value["original_event"] = original_event - - return 200, return_value + return 200, result class RelationAggregationPaginationServlet(RestServlet): @@ -245,9 +210,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastores().main - self.clock = hs.get_clock() - self._event_serializer = hs.get_event_client_serializer() - self.event_handler = hs.get_event_handler() + self._relations_handler = hs.get_relations_handler() async def on_GET( self, @@ -260,18 +223,6 @@ class RelationAggregationGroupPaginationServlet(RestServlet): ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) - await self.auth.check_user_in_room_or_world_readable( - room_id, - requester.user.to_string(), - allow_departed_users=True, - ) - - # This checks that a) the event exists and b) the user is allowed to - # view it. - event = await self.event_handler.get_event(requester.user, room_id, parent_id) - if event is None: - raise SynapseError(404, "Unknown parent event.") - if relation_type != RelationTypes.ANNOTATION: raise SynapseError(400, "Relation type must be 'annotation'") @@ -286,9 +237,9 @@ class RelationAggregationGroupPaginationServlet(RestServlet): if to_token_str: to_token = await StreamToken.from_string(self.store, to_token_str) - result = await self.store.get_relations_for_event( + result = await self._relations_handler.get_relations( + requester=requester, event_id=parent_id, - event=event, room_id=room_id, relation_type=relation_type, event_type=event_type, @@ -298,17 +249,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): to_token=to_token, ) - events = await self.store.get_events_as_list( - [c["event_id"] for c in result.chunk] - ) - - now = self.clock.time_msec() - serialized_events = self._event_serializer.serialize_events(events, now) - - return_value = await result.to_dict(self.store) - return_value["chunk"] = serialized_events - - return 200, return_value + return 200, result def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: diff --git a/synapse/server.py b/synapse/server.py index 2fcf18a7a6..380369db92 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -94,6 +94,7 @@ from synapse.handlers.profile import ProfileHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.register import RegistrationHandler +from synapse.handlers.relations import RelationsHandler from synapse.handlers.room import ( RoomContextHandler, RoomCreationHandler, @@ -719,6 +720,10 @@ class HomeServer(metaclass=abc.ABCMeta): def get_pagination_handler(self) -> PaginationHandler: return PaginationHandler(self) + @cache_in_self + def get_relations_handler(self) -> RelationsHandler: + return RelationsHandler(self) + @cache_in_self def get_room_context_handler(self) -> RoomContextHandler: return RoomContextHandler(self) -- cgit 1.4.1 From 61210567405b1ac7efaa23d5513cc0b443da0a3a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 16 Mar 2022 15:07:41 +0000 Subject: Handle cancellation in `DatabasePool.runInteraction()` (#12199) To handle cancellation, we ensure that `after_callback`s and `exception_callback`s are always run, since the transaction will complete on another thread regardless of cancellation. We also wait until everything is done before releasing the `CancelledError`, so that logging contexts won't get used after they have been finished. Signed-off-by: Sean Quah --- changelog.d/12199.misc | 1 + synapse/storage/database.py | 61 +++++++++++++++++++++++++----------------- tests/storage/test_database.py | 58 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+), 24 deletions(-) create mode 100644 changelog.d/12199.misc diff --git a/changelog.d/12199.misc b/changelog.d/12199.misc new file mode 100644 index 0000000000..16dec1d26d --- /dev/null +++ b/changelog.d/12199.misc @@ -0,0 +1 @@ +Handle cancellation in `DatabasePool.runInteraction()`. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 99802228c9..9749f0c06e 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -41,6 +41,7 @@ from prometheus_client import Histogram from typing_extensions import Literal from twisted.enterprise import adbapi +from twisted.internet import defer from synapse.api.errors import StoreError from synapse.config.database import DatabaseConnectionConfig @@ -55,6 +56,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor +from synapse.util.async_helpers import delay_cancellation from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -732,34 +734,45 @@ class DatabasePool: Returns: The result of func """ - after_callbacks: List[_CallbackListEntry] = [] - exception_callbacks: List[_CallbackListEntry] = [] - if not current_context(): - logger.warning("Starting db txn '%s' from sentinel context", desc) + async def _runInteraction() -> R: + after_callbacks: List[_CallbackListEntry] = [] + exception_callbacks: List[_CallbackListEntry] = [] - try: - with opentracing.start_active_span(f"db.{desc}"): - result = await self.runWithConnection( - self.new_transaction, - desc, - after_callbacks, - exception_callbacks, - func, - *args, - db_autocommit=db_autocommit, - isolation_level=isolation_level, - **kwargs, - ) + if not current_context(): + logger.warning("Starting db txn '%s' from sentinel context", desc) - for after_callback, after_args, after_kwargs in after_callbacks: - after_callback(*after_args, **after_kwargs) - except Exception: - for after_callback, after_args, after_kwargs in exception_callbacks: - after_callback(*after_args, **after_kwargs) - raise + try: + with opentracing.start_active_span(f"db.{desc}"): + result = await self.runWithConnection( + self.new_transaction, + desc, + after_callbacks, + exception_callbacks, + func, + *args, + db_autocommit=db_autocommit, + isolation_level=isolation_level, + **kwargs, + ) - return cast(R, result) + for after_callback, after_args, after_kwargs in after_callbacks: + after_callback(*after_args, **after_kwargs) + + return cast(R, result) + except Exception: + for after_callback, after_args, after_kwargs in exception_callbacks: + after_callback(*after_args, **after_kwargs) + raise + + # To handle cancellation, we ensure that `after_callback`s and + # `exception_callback`s are always run, since the transaction will complete + # on another thread regardless of cancellation. + # + # We also wait until everything above is done before releasing the + # `CancelledError`, so that logging contexts won't get used after they have been + # finished. + return await delay_cancellation(defer.ensureDeferred(_runInteraction())) async def runWithConnection( self, diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index ae13bed086..a40fc20ef9 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -15,6 +15,8 @@ from typing import Callable, Tuple from unittest.mock import Mock, call +from twisted.internet import defer +from twisted.internet.defer import CancelledError, Deferred from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer @@ -124,3 +126,59 @@ class CallbacksTestCase(unittest.HomeserverTestCase): ) self.assertEqual(after_callback.call_count, 2) # no additional calls exception_callback.assert_not_called() + + +class CancellationTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.db_pool: DatabasePool = self.store.db_pool + + def test_after_callback(self) -> None: + """Test that the after callback is called when a transaction succeeds.""" + d: "Deferred[None]" + after_callback = Mock() + exception_callback = Mock() + + def _test_txn(txn: LoggingTransaction) -> None: + txn.call_after(after_callback, 123, 456, extra=789) + txn.call_on_exception(exception_callback, 987, 654, extra=321) + d.cancel() + + d = defer.ensureDeferred( + self.db_pool.runInteraction("test_transaction", _test_txn) + ) + self.get_failure(d, CancelledError) + + after_callback.assert_called_once_with(123, 456, extra=789) + exception_callback.assert_not_called() + + def test_exception_callback(self) -> None: + """Test that the exception callback is called when a transaction fails.""" + d: "Deferred[None]" + after_callback = Mock() + exception_callback = Mock() + + def _test_txn(txn: LoggingTransaction) -> None: + txn.call_after(after_callback, 123, 456, extra=789) + txn.call_on_exception(exception_callback, 987, 654, extra=321) + d.cancel() + # Simulate a retryable failure on every attempt. + raise self.db_pool.engine.module.OperationalError() + + d = defer.ensureDeferred( + self.db_pool.runInteraction("test_transaction", _test_txn) + ) + self.get_failure(d, CancelledError) + + after_callback.assert_not_called() + exception_callback.assert_has_calls( + [ + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + ] + ) + self.assertEqual(exception_callback.call_count, 6) # no additional calls -- cgit 1.4.1 From 96274565ff0dbb7d21b02b04fcef115330426707 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 12:17:39 -0400 Subject: Fix bundling aggregations if unsigned is not a returned event field. (#12234) An error occured if a filter was supplied with `event_fields` which did not include `unsigned`. In that case, bundled aggregations are still added as the spec states it is allowed for servers to add additional fields. --- changelog.d/12234.bugfix | 1 + synapse/events/utils.py | 9 ++++++--- tests/rest/client/test_relations.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 3 deletions(-) create mode 100644 changelog.d/12234.bugfix diff --git a/changelog.d/12234.bugfix b/changelog.d/12234.bugfix new file mode 100644 index 0000000000..dbb77f36ff --- /dev/null +++ b/changelog.d/12234.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when a `filter` argument with `event_fields` supplied but not including the `unsigned` field could result in a 500 error on `/sync`. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index b2a237c1e0..a0520068e0 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -530,9 +530,12 @@ class EventClientSerializer: # Include the bundled aggregations in the event. if serialized_aggregations: - serialized_event["unsigned"].setdefault("m.relations", {}).update( - serialized_aggregations - ) + # There is likely already an "unsigned" field, but a filter might + # have stripped it off (via the event_fields option). The server is + # allowed to return additional fields, so add it back. + serialized_event.setdefault("unsigned", {}).setdefault( + "m.relations", {} + ).update(serialized_aggregations) def serialize_events( self, diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 0cbe6c0cf7..171f4e97c8 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1267,6 +1267,34 @@ class RelationsTestCase(BaseRelationsTestCase): [annotation_event_id_good, thread_event_id], ) + def test_bundled_aggregations_with_filter(self) -> None: + """ + If "unsigned" is an omitted field (due to filtering), adding the bundled + aggregations should not break. + + Note that the spec allows for a server to return additional fields beyond + what is specified. + """ + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + + # Note that the sync filter does not include "unsigned" as a field. + filter = urllib.parse.quote_plus( + b'{"event_fields": ["content", "event_id"], "room": {"timeline": {"limit": 3}}}' + ) + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token + ) + self.assertEqual(200, channel.code, channel.json_body) + + # Ensure the timeline is limited, find the parent event. + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + self.assertTrue(room_timeline["limited"]) + parent_event = self._find_event_in_chunk(room_timeline["events"]) + + # Ensure there's bundled aggregations on it. + self.assertIn("unsigned", parent_event) + self.assertIn("m.relations", parent_event["unsigned"]) + class RelationRedactionTestCase(BaseRelationsTestCase): """ -- cgit 1.4.1 From f70afbd565f34cdc093e083b92376a1154b007a7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 12:20:05 -0400 Subject: Re-generate changelog. --- CHANGES.md | 1 + changelog.d/12234.bugfix | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/12234.bugfix diff --git a/CHANGES.md b/CHANGES.md index 60e7ecb1b9..78498c10b5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -21,6 +21,7 @@ Bugfixes - Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) - Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) - Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) +- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234)) Improved Documentation diff --git a/changelog.d/12234.bugfix b/changelog.d/12234.bugfix deleted file mode 100644 index dbb77f36ff..0000000000 --- a/changelog.d/12234.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when a `filter` argument with `event_fields` supplied but not including the `unsigned` field could result in a 500 error on `/sync`. -- cgit 1.4.1