From 8e56a1b73c9819ea4bddbe6a4734966e70b3b92c Mon Sep 17 00:00:00 2001 From: lukasdenk <63459921+lukasdenk@users.noreply.github.com> Date: Wed, 2 Mar 2022 11:35:34 +0100 Subject: Make get_room_version use cached get_room_version_id. (#11808) --- changelog.d/11808.misc | 1 + synapse/storage/databases/main/state.py | 27 +++++++++++++-------------- tests/handlers/test_room_summary.py | 5 ++++- 3 files changed, 18 insertions(+), 15 deletions(-) create mode 100644 changelog.d/11808.misc diff --git a/changelog.d/11808.misc b/changelog.d/11808.misc new file mode 100644 index 0000000000..cdc5fc75b7 --- /dev/null +++ b/changelog.d/11808.misc @@ -0,0 +1 @@ +Make method `get_room_version` use cached `get_room_version_id`. diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 2fb3e65192..417aef1dbc 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -42,6 +42,16 @@ logger = logging.getLogger(__name__) MAX_STATE_DELTA_HOPS = 100 +def _retrieve_and_check_room_version(room_id: str, room_version_id: str) -> RoomVersion: + v = KNOWN_ROOM_VERSIONS.get(room_version_id) + if not v: + raise UnsupportedRoomVersionError( + "Room %s uses a room version %s which is no longer supported" + % (room_id, room_version_id) + ) + return v + + # this inherits from EventsWorkerStore because it calls self.get_events class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """The parts of StateGroupStore that can be called from workers.""" @@ -62,11 +72,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): Typically this happens if support for the room's version has been removed from Synapse. """ - return await self.db_pool.runInteraction( - "get_room_version_txn", - self.get_room_version_txn, - room_id, - ) + room_version_id = await self.get_room_version_id(room_id) + return _retrieve_and_check_room_version(room_id, room_version_id) def get_room_version_txn( self, txn: LoggingTransaction, room_id: str @@ -82,15 +89,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): removed from Synapse. """ room_version_id = self.get_room_version_id_txn(txn, room_id) - v = KNOWN_ROOM_VERSIONS.get(room_version_id) - - if not v: - raise UnsupportedRoomVersionError( - "Room %s uses a room version %s which is no longer supported" - % (room_id, room_version_id) - ) - - return v + return _retrieve_and_check_room_version(room_id, room_version_id) @cached(max_entries=10000) async def get_room_version_id(self, room_id: str) -> str: diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index b33ff94a39..cff07a8973 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -658,7 +658,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): def test_unknown_room_version(self): """ - If an room with an unknown room version is encountered it should not cause + If a room with an unknown room version is encountered it should not cause the entire summary to skip. """ # Poke the database and update the room version to an unknown one. @@ -670,6 +670,9 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): desc="updated-room-version", ) ) + # Invalidate method so that it returns the currently updated version + # instead of the cached version. + self.hs.get_datastores().main.get_room_version_id.invalidate((self.room,)) # The result should have only the space, along with a link from space -> room. expected = [(self.space, [self.room])] -- cgit 1.4.1 From c7b2f1ccdc412c4f5f07f4fe630d2c2040caf93d Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 2 Mar 2022 10:37:04 +0000 Subject: Back out in-flight state caching changes. (#12126) --- changelog.d/10870.misc | 1 - changelog.d/11608.misc | 1 - changelog.d/11610.misc | 1 - changelog.d/12033.misc | 1 - changelog.d/12126.removal | 1 + synapse/storage/databases/state/store.py | 243 ++------------- tests/storage/databases/test_state_store.py | 454 ---------------------------- 7 files changed, 26 insertions(+), 676 deletions(-) delete mode 100644 changelog.d/10870.misc delete mode 100644 changelog.d/11608.misc delete mode 100644 changelog.d/11610.misc delete mode 100644 changelog.d/12033.misc create mode 100644 changelog.d/12126.removal delete mode 100644 tests/storage/databases/test_state_store.py diff --git a/changelog.d/10870.misc b/changelog.d/10870.misc deleted file mode 100644 index 3af049b969..0000000000 --- a/changelog.d/10870.misc +++ /dev/null @@ -1 +0,0 @@ -Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/changelog.d/11608.misc b/changelog.d/11608.misc deleted file mode 100644 index 3af049b969..0000000000 --- a/changelog.d/11608.misc +++ /dev/null @@ -1 +0,0 @@ -Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/changelog.d/11610.misc b/changelog.d/11610.misc deleted file mode 100644 index 3af049b969..0000000000 --- a/changelog.d/11610.misc +++ /dev/null @@ -1 +0,0 @@ -Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/changelog.d/12033.misc b/changelog.d/12033.misc deleted file mode 100644 index 3af049b969..0000000000 --- a/changelog.d/12033.misc +++ /dev/null @@ -1 +0,0 @@ -Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/changelog.d/12126.removal b/changelog.d/12126.removal new file mode 100644 index 0000000000..8c8bf6ee7e --- /dev/null +++ b/changelog.d/12126.removal @@ -0,0 +1 @@ +Back out in-flight state caching changes. \ No newline at end of file diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index dadf3d1e3a..7614d76ac6 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -13,24 +13,11 @@ # limitations under the License. import logging -from typing import ( - TYPE_CHECKING, - Collection, - Dict, - Iterable, - Optional, - Sequence, - Set, - Tuple, -) +from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple import attr -from sortedcontainers import SortedDict - -from twisted.internet import defer from synapse.api.constants import EventTypes -from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -42,12 +29,6 @@ from synapse.storage.state import StateFilter from synapse.storage.types import Cursor from synapse.storage.util.sequence import build_sequence_generator from synapse.types import MutableStateMap, StateKey, StateMap -from synapse.util import unwrapFirstError -from synapse.util.async_helpers import ( - AbstractObservableDeferred, - ObservableDeferred, - yieldable_gather_results, -) from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import DictionaryCache @@ -56,8 +37,8 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) + MAX_STATE_DELTA_HOPS = 100 -MAX_INFLIGHT_REQUESTS_PER_GROUP = 5 @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -73,24 +54,6 @@ class _GetStateGroupDelta: return len(self.delta_ids) if self.delta_ids else 0 -def state_filter_rough_priority_comparator( - state_filter: StateFilter, -) -> Tuple[int, int]: - """ - Returns a comparable value that roughly indicates the relative size of this - state filter compared to others. - 'Larger' state filters should sort first when using ascending order, so - this is essentially the opposite of 'size'. - It should be treated as a rough guide only and should not be interpreted to - have any particular meaning. The representation may also change - - The current implementation returns a tuple of the form: - * -1 for include_others, 0 otherwise - * -(number of entries in state_filter.types) - """ - return -int(state_filter.include_others), -len(state_filter.types) - - class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): """A data store for fetching/storing state groups.""" @@ -143,12 +106,6 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): 500000, ) - # Current ongoing get_state_for_groups in-flight requests - # {group ID -> {StateFilter -> ObservableDeferred}} - self._state_group_inflight_requests: Dict[ - int, SortedDict[StateFilter, AbstractObservableDeferred[StateMap[str]]] - ] = {} - def get_max_state_group_txn(txn: Cursor) -> int: txn.execute("SELECT COALESCE(max(id), 0) FROM state_groups") return txn.fetchone()[0] # type: ignore @@ -200,7 +157,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): ) async def _get_state_groups_from_groups( - self, groups: Sequence[int], state_filter: StateFilter + self, groups: List[int], state_filter: StateFilter ) -> Dict[int, StateMap[str]]: """Returns the state groups for a given set of groups from the database, filtering on types of state events. @@ -271,170 +228,6 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return state_filter.filter_state(state_dict_ids), not missing_types - def _get_state_for_group_gather_inflight_requests( - self, group: int, state_filter_left_over: StateFilter - ) -> Tuple[Sequence[AbstractObservableDeferred[StateMap[str]]], StateFilter]: - """ - Attempts to gather in-flight requests and re-use them to retrieve state - for the given state group, filtered with the given state filter. - - If there are more than MAX_INFLIGHT_REQUESTS_PER_GROUP in-flight requests, - and there *still* isn't enough information to complete the request by solely - reusing others, a full state filter will be requested to ensure that subsequent - requests can reuse this request. - - Used as part of _get_state_for_group_using_inflight_cache. - - Returns: - Tuple of two values: - A sequence of ObservableDeferreds to observe - A StateFilter representing what else needs to be requested to fulfill the request - """ - - inflight_requests = self._state_group_inflight_requests.get(group) - if inflight_requests is None: - # no requests for this group, need to retrieve it all ourselves - return (), state_filter_left_over - - # The list of ongoing requests which will help narrow the current request. - reusable_requests = [] - - # Iterate over existing requests in roughly biggest-first order. - for request_state_filter in inflight_requests: - request_deferred = inflight_requests[request_state_filter] - new_state_filter_left_over = state_filter_left_over.approx_difference( - request_state_filter - ) - if new_state_filter_left_over == state_filter_left_over: - # Reusing this request would not gain us anything, so don't bother. - continue - - reusable_requests.append(request_deferred) - state_filter_left_over = new_state_filter_left_over - if state_filter_left_over == StateFilter.none(): - # we have managed to collect enough of the in-flight requests - # to cover our StateFilter and give us the state we need. - break - - if ( - state_filter_left_over != StateFilter.none() - and len(inflight_requests) >= MAX_INFLIGHT_REQUESTS_PER_GROUP - ): - # There are too many requests for this group. - # To prevent even more from building up, we request the whole - # state filter to guarantee that we can be reused by any subsequent - # requests for this state group. - return (), StateFilter.all() - - return reusable_requests, state_filter_left_over - - async def _get_state_for_group_fire_request( - self, group: int, state_filter: StateFilter - ) -> StateMap[str]: - """ - Fires off a request to get the state at a state group, - potentially filtering by type and/or state key. - - This request will be tracked in the in-flight request cache and automatically - removed when it is finished. - - Used as part of _get_state_for_group_using_inflight_cache. - - Args: - group: ID of the state group for which we want to get state - state_filter: the state filter used to fetch state from the database - """ - cache_sequence_nm = self._state_group_cache.sequence - cache_sequence_m = self._state_group_members_cache.sequence - - # Help the cache hit ratio by expanding the filter a bit - db_state_filter = state_filter.return_expanded() - - async def _the_request() -> StateMap[str]: - group_to_state_dict = await self._get_state_groups_from_groups( - (group,), state_filter=db_state_filter - ) - - # Now let's update the caches - self._insert_into_cache( - group_to_state_dict, - db_state_filter, - cache_seq_num_members=cache_sequence_m, - cache_seq_num_non_members=cache_sequence_nm, - ) - - # Remove ourselves from the in-flight cache - group_request_dict = self._state_group_inflight_requests[group] - del group_request_dict[db_state_filter] - if not group_request_dict: - # If there are no more requests in-flight for this group, - # clean up the cache by removing the empty dictionary - del self._state_group_inflight_requests[group] - - return group_to_state_dict[group] - - # We don't immediately await the result, so must use run_in_background - # But we DO await the result before the current log context (request) - # finishes, so don't need to run it as a background process. - request_deferred = run_in_background(_the_request) - observable_deferred = ObservableDeferred(request_deferred, consumeErrors=True) - - # Insert the ObservableDeferred into the cache - group_request_dict = self._state_group_inflight_requests.setdefault( - group, SortedDict(state_filter_rough_priority_comparator) - ) - group_request_dict[db_state_filter] = observable_deferred - - return await make_deferred_yieldable(observable_deferred.observe()) - - async def _get_state_for_group_using_inflight_cache( - self, group: int, state_filter: StateFilter - ) -> MutableStateMap[str]: - """ - Gets the state at a state group, potentially filtering by type and/or - state key. - - 1. Calls _get_state_for_group_gather_inflight_requests to gather any - ongoing requests which might overlap with the current request. - 2. Fires a new request, using _get_state_for_group_fire_request, - for any state which cannot be gathered from ongoing requests. - - Args: - group: ID of the state group for which we want to get state - state_filter: the state filter used to fetch state from the database - Returns: - state map - """ - - # first, figure out whether we can re-use any in-flight requests - # (and if so, what would be left over) - ( - reusable_requests, - state_filter_left_over, - ) = self._get_state_for_group_gather_inflight_requests(group, state_filter) - - if state_filter_left_over != StateFilter.none(): - # Fetch remaining state - remaining = await self._get_state_for_group_fire_request( - group, state_filter_left_over - ) - assembled_state: MutableStateMap[str] = dict(remaining) - else: - assembled_state = {} - - gathered = await make_deferred_yieldable( - defer.gatherResults( - (r.observe() for r in reusable_requests), consumeErrors=True - ) - ).addErrback(unwrapFirstError) - - # assemble our result. - for result_piece in gathered: - assembled_state.update(result_piece) - - # Filter out any state that may be more than what we asked for. - return state_filter.filter_state(assembled_state) - async def _get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None ) -> Dict[int, MutableStateMap[str]]: @@ -476,17 +269,31 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): if not incomplete_groups: return state - async def get_from_cache(group: int, state_filter: StateFilter) -> None: - state[group] = await self._get_state_for_group_using_inflight_cache( - group, state_filter - ) + cache_sequence_nm = self._state_group_cache.sequence + cache_sequence_m = self._state_group_members_cache.sequence - await yieldable_gather_results( - get_from_cache, - incomplete_groups, - state_filter, + # Help the cache hit ratio by expanding the filter a bit + db_state_filter = state_filter.return_expanded() + + group_to_state_dict = await self._get_state_groups_from_groups( + list(incomplete_groups), state_filter=db_state_filter ) + # Now lets update the caches + self._insert_into_cache( + group_to_state_dict, + db_state_filter, + cache_seq_num_members=cache_sequence_m, + cache_seq_num_non_members=cache_sequence_nm, + ) + + # And finally update the result dict, by filtering out any extra + # stuff we pulled out of the database. + for group, group_state_dict in group_to_state_dict.items(): + # We just replace any existing entries, as we will have loaded + # everything we need from the database anyway. + state[group] = state_filter.filter_state(group_state_dict) + return state def _get_state_for_groups_using_cache( diff --git a/tests/storage/databases/test_state_store.py b/tests/storage/databases/test_state_store.py deleted file mode 100644 index 2b484c95a9..0000000000 --- a/tests/storage/databases/test_state_store.py +++ /dev/null @@ -1,454 +0,0 @@ -# Copyright 2022 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import typing -from typing import Dict, List, Sequence, Tuple -from unittest.mock import patch - -from parameterized import parameterized - -from twisted.internet.defer import Deferred, ensureDeferred -from twisted.test.proto_helpers import MemoryReactor - -from synapse.api.constants import EventTypes -from synapse.storage.databases.state.store import ( - MAX_INFLIGHT_REQUESTS_PER_GROUP, - state_filter_rough_priority_comparator, -) -from synapse.storage.state import StateFilter -from synapse.types import StateMap -from synapse.util import Clock - -from tests.unittest import HomeserverTestCase - -if typing.TYPE_CHECKING: - from synapse.server import HomeServer - -# StateFilter for ALL non-m.room.member state events -ALL_NON_MEMBERS_STATE_FILTER = StateFilter.freeze( - types={EventTypes.Member: set()}, - include_others=True, -) - -FAKE_STATE = { - (EventTypes.Member, "@alice:test"): "join", - (EventTypes.Member, "@bob:test"): "leave", - (EventTypes.Member, "@charlie:test"): "invite", - ("test.type", "a"): "AAA", - ("test.type", "b"): "BBB", - ("other.event.type", "state.key"): "123", -} - - -class StateGroupInflightCachingTestCase(HomeserverTestCase): - def prepare( - self, reactor: MemoryReactor, clock: Clock, homeserver: "HomeServer" - ) -> None: - self.state_storage = homeserver.get_storage().state - self.state_datastore = homeserver.get_datastores().state - # Patch out the `_get_state_groups_from_groups`. - # This is useful because it lets us pretend we have a slow database. - get_state_groups_patch = patch.object( - self.state_datastore, - "_get_state_groups_from_groups", - self._fake_get_state_groups_from_groups, - ) - get_state_groups_patch.start() - - self.addCleanup(get_state_groups_patch.stop) - self.get_state_group_calls: List[ - Tuple[Tuple[int, ...], StateFilter, Deferred[Dict[int, StateMap[str]]]] - ] = [] - - def _fake_get_state_groups_from_groups( - self, groups: Sequence[int], state_filter: StateFilter - ) -> "Deferred[Dict[int, StateMap[str]]]": - d: Deferred[Dict[int, StateMap[str]]] = Deferred() - self.get_state_group_calls.append((tuple(groups), state_filter, d)) - return d - - def _complete_request_fake( - self, - groups: Tuple[int, ...], - state_filter: StateFilter, - d: "Deferred[Dict[int, StateMap[str]]]", - ) -> None: - """ - Assemble a fake database response and complete the database request. - """ - - # Return a filtered copy of the fake state - d.callback({group: state_filter.filter_state(FAKE_STATE) for group in groups}) - - def test_duplicate_requests_deduplicated(self) -> None: - """ - Tests that duplicate requests for state are deduplicated. - - This test: - - requests some state (state group 42, 'all' state filter) - - requests it again, before the first request finishes - - checks to see that only one database query was made - - completes the database query - - checks that both requests see the same retrieved state - """ - req1 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.all() - ) - ) - self.pump(by=0.1) - - # This should have gone to the database - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - - req2 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.all() - ) - ) - self.pump(by=0.1) - - # No more calls should have gone to the database - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - self.assertFalse(req2.called) - - groups, sf, d = self.get_state_group_calls[0] - self.assertEqual(groups, (42,)) - self.assertEqual(sf, StateFilter.all()) - - # Now we can complete the request - self._complete_request_fake(groups, sf, d) - - self.assertEqual(self.get_success(req1), FAKE_STATE) - self.assertEqual(self.get_success(req2), FAKE_STATE) - - def test_smaller_request_deduplicated(self) -> None: - """ - Tests that duplicate requests for state are deduplicated. - - This test: - - requests some state (state group 42, 'all' state filter) - - requests a subset of that state, before the first request finishes - - checks to see that only one database query was made - - completes the database query - - checks that both requests see the correct retrieved state - """ - req1 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.from_types((("test.type", None),)) - ) - ) - self.pump(by=0.1) - - # This should have gone to the database - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - - req2 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.from_types((("test.type", "b"),)) - ) - ) - self.pump(by=0.1) - - # No more calls should have gone to the database, because the second - # request was already in the in-flight cache! - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - self.assertFalse(req2.called) - - groups, sf, d = self.get_state_group_calls[0] - self.assertEqual(groups, (42,)) - # The state filter is expanded internally for increased cache hit rate, - # so we the database sees a wider state filter than requested. - self.assertEqual(sf, ALL_NON_MEMBERS_STATE_FILTER) - - # Now we can complete the request - self._complete_request_fake(groups, sf, d) - - self.assertEqual( - self.get_success(req1), - {("test.type", "a"): "AAA", ("test.type", "b"): "BBB"}, - ) - self.assertEqual(self.get_success(req2), {("test.type", "b"): "BBB"}) - - def test_partially_overlapping_request_deduplicated(self) -> None: - """ - Tests that partially-overlapping requests are partially deduplicated. - - This test: - - requests a single type of wildcard state - (This is internally expanded to be all non-member state) - - requests the entire state in parallel - - checks to see that two database queries were made, but that the second - one is only for member state. - - completes the database queries - - checks that both requests have the correct result. - """ - - req1 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.from_types((("test.type", None),)) - ) - ) - self.pump(by=0.1) - - # This should have gone to the database - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - - req2 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.all() - ) - ) - self.pump(by=0.1) - - # Because it only partially overlaps, this also went to the database - self.assertEqual(len(self.get_state_group_calls), 2) - self.assertFalse(req1.called) - self.assertFalse(req2.called) - - # First request: - groups, sf, d = self.get_state_group_calls[0] - self.assertEqual(groups, (42,)) - # The state filter is expanded internally for increased cache hit rate, - # so we the database sees a wider state filter than requested. - self.assertEqual(sf, ALL_NON_MEMBERS_STATE_FILTER) - self._complete_request_fake(groups, sf, d) - - # Second request: - groups, sf, d = self.get_state_group_calls[1] - self.assertEqual(groups, (42,)) - # The state filter is narrowed to only request membership state, because - # the remainder of the state is already being queried in the first request! - self.assertEqual( - sf, StateFilter.freeze({EventTypes.Member: None}, include_others=False) - ) - self._complete_request_fake(groups, sf, d) - - # Check the results are correct - self.assertEqual( - self.get_success(req1), - {("test.type", "a"): "AAA", ("test.type", "b"): "BBB"}, - ) - self.assertEqual(self.get_success(req2), FAKE_STATE) - - def test_in_flight_requests_stop_being_in_flight(self) -> None: - """ - Tests that in-flight request deduplication doesn't somehow 'hold on' - to completed requests: once they're done, they're taken out of the - in-flight cache. - """ - req1 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.all() - ) - ) - self.pump(by=0.1) - - # This should have gone to the database - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - - # Complete the request right away. - self._complete_request_fake(*self.get_state_group_calls[0]) - self.assertTrue(req1.called) - - # Send off another request - req2 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.all() - ) - ) - self.pump(by=0.1) - - # It should have gone to the database again, because the previous request - # isn't in-flight and therefore isn't available for deduplication. - self.assertEqual(len(self.get_state_group_calls), 2) - self.assertFalse(req2.called) - - # Complete the request right away. - self._complete_request_fake(*self.get_state_group_calls[1]) - self.assertTrue(req2.called) - groups, sf, d = self.get_state_group_calls[0] - - self.assertEqual(self.get_success(req1), FAKE_STATE) - self.assertEqual(self.get_success(req2), FAKE_STATE) - - def test_inflight_requests_capped(self) -> None: - """ - Tests that the number of in-flight requests is capped to 5. - - - requests several pieces of state separately - (5 to hit the limit, 1 to 'shunt out', another that comes after the - group has been 'shunted out') - - checks to see that the torrent of requests is shunted out by - rewriting one of the filters as the 'all' state filter - - requests after that one do not cause any additional queries - """ - # 5 at the time of writing. - CAP_COUNT = MAX_INFLIGHT_REQUESTS_PER_GROUP - - reqs = [] - - # Request 7 different keys (1 to 7) of the `some.state` type. - for req_id in range(CAP_COUNT + 2): - reqs.append( - ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, - StateFilter.freeze( - {"some.state": {str(req_id + 1)}}, include_others=False - ), - ) - ) - ) - self.pump(by=0.1) - - # There should only be 6 calls to the database, not 7. - self.assertEqual(len(self.get_state_group_calls), CAP_COUNT + 1) - - # Assert that the first 5 are exact requests for the individual pieces - # wanted - for req_id in range(CAP_COUNT): - groups, sf, d = self.get_state_group_calls[req_id] - self.assertEqual( - sf, - StateFilter.freeze( - {"some.state": {str(req_id + 1)}}, include_others=False - ), - ) - - # The 6th request should be the 'all' state filter - groups, sf, d = self.get_state_group_calls[CAP_COUNT] - self.assertEqual(sf, StateFilter.all()) - - # Complete the queries and check which requests complete as a result - for req_id in range(CAP_COUNT): - # This request should not have been completed yet - self.assertFalse(reqs[req_id].called) - - groups, sf, d = self.get_state_group_calls[req_id] - self._complete_request_fake(groups, sf, d) - - # This should have only completed this one request - self.assertTrue(reqs[req_id].called) - - # Now complete the final query; the last 2 requests should complete - # as a result - self.assertFalse(reqs[CAP_COUNT].called) - self.assertFalse(reqs[CAP_COUNT + 1].called) - groups, sf, d = self.get_state_group_calls[CAP_COUNT] - self._complete_request_fake(groups, sf, d) - self.assertTrue(reqs[CAP_COUNT].called) - self.assertTrue(reqs[CAP_COUNT + 1].called) - - @parameterized.expand([(False,), (True,)]) - def test_ordering_of_request_reuse(self, reverse: bool) -> None: - """ - Tests that 'larger' in-flight requests are ordered first. - - This is mostly a design decision in order to prevent a request from - hanging on to multiple queries when it would have been sufficient to - hang on to only one bigger query. - - The 'size' of a state filter is a rough heuristic. - - - requests two pieces of state, one 'larger' than the other, but each - spawning a query - - requests a third piece of state - - completes the larger of the first two queries - - checks that the third request gets completed (and doesn't needlessly - wait for the other query) - - Parameters: - reverse: whether to reverse the order of the initial requests, to ensure - that the effect doesn't depend on the order of request submission. - """ - - # We add in an extra state type to make sure that both requests spawn - # queries which are not optimised out. - state_filters = [ - StateFilter.freeze( - {"state.type": {"A"}, "other.state.type": {"a"}}, include_others=False - ), - StateFilter.freeze( - { - "state.type": None, - "other.state.type": {"b"}, - # The current rough size comparator uses the number of state types - # as an indicator of size. - # To influence it to make this state filter bigger than the previous one, - # we add another dummy state type. - "extra.state.type": {"c"}, - }, - include_others=False, - ), - ] - - if reverse: - # For fairness, we perform one test run with the list reversed. - state_filters.reverse() - smallest_state_filter_idx = 1 - biggest_state_filter_idx = 0 - else: - smallest_state_filter_idx = 0 - biggest_state_filter_idx = 1 - - # This assertion is for our own sanity more than anything else. - self.assertLess( - state_filter_rough_priority_comparator( - state_filters[biggest_state_filter_idx] - ), - state_filter_rough_priority_comparator( - state_filters[smallest_state_filter_idx] - ), - "Test invalid: bigger state filter is not actually bigger.", - ) - - # Spawn the initial two requests - for state_filter in state_filters: - ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, - state_filter, - ) - ) - - # Spawn a third request - req = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, - StateFilter.freeze( - { - "state.type": {"A"}, - }, - include_others=False, - ), - ) - ) - self.pump(by=0.1) - - self.assertFalse(req.called) - - # Complete the largest request's query to make sure that the final request - # only waits for that one (and doesn't needlessly wait for both queries) - self._complete_request_fake( - *self.get_state_group_calls[biggest_state_filter_idx] - ) - - # That should have been sufficient to complete the third request - self.assertTrue(req.called) -- cgit 1.4.1 From a43a5ea5bfefcb25d90209958fb014a3b5e0ead0 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 10:38:10 +0000 Subject: Remove misleading newsfile from #12126 which backs out an unreleased change. --- changelog.d/12126.removal | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/12126.removal diff --git a/changelog.d/12126.removal b/changelog.d/12126.removal deleted file mode 100644 index 8c8bf6ee7e..0000000000 --- a/changelog.d/12126.removal +++ /dev/null @@ -1 +0,0 @@ -Back out in-flight state caching changes. \ No newline at end of file -- cgit 1.4.1 From 879e4a7bd7a90cda4c8ea908aede53d8e038ca7c Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 10:45:16 +0000 Subject: 1.54.0rc1 --- CHANGES.md | 96 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/11599.doc | 1 - changelog.d/11617.feature | 1 - changelog.d/11808.misc | 1 - changelog.d/11835.feature | 1 - changelog.d/11865.removal | 1 - changelog.d/11900.misc | 1 - changelog.d/11972.misc | 1 - changelog.d/11974.misc | 1 - changelog.d/11984.misc | 1 - changelog.d/11985.feature | 1 - changelog.d/11991.misc | 1 - changelog.d/11992.bugfix | 1 - changelog.d/11994.misc | 1 - changelog.d/11996.misc | 1 - changelog.d/11997.docker | 1 - changelog.d/11999.bugfix | 1 - changelog.d/12000.feature | 1 - changelog.d/12001.feature | 1 - changelog.d/12003.doc | 1 - changelog.d/12004.doc | 1 - changelog.d/12005.misc | 1 - changelog.d/12008.removal | 1 - changelog.d/12009.feature | 1 - changelog.d/12011.misc | 1 - changelog.d/12012.misc | 1 - changelog.d/12013.misc | 1 - changelog.d/12015.misc | 1 - changelog.d/12016.misc | 1 - changelog.d/12018.removal | 1 - changelog.d/12019.misc | 1 - changelog.d/12020.feature | 1 - changelog.d/12021.feature | 1 - changelog.d/12022.feature | 1 - changelog.d/12024.bugfix | 1 - changelog.d/12025.misc | 1 - changelog.d/12030.misc | 1 - changelog.d/12031.misc | 1 - changelog.d/12034.misc | 1 - changelog.d/12037.bugfix | 1 - changelog.d/12039.misc | 1 - changelog.d/12041.misc | 1 - changelog.d/12051.misc | 1 - changelog.d/12052.misc | 1 - changelog.d/12056.bugfix | 1 - changelog.d/12058.feature | 1 - changelog.d/12059.misc | 1 - changelog.d/12060.misc | 1 - changelog.d/12062.feature | 1 - changelog.d/12063.misc | 1 - changelog.d/12066.misc | 1 - changelog.d/12067.feature | 1 - changelog.d/12068.misc | 1 - changelog.d/12069.misc | 1 - changelog.d/12070.misc | 1 - changelog.d/12072.misc | 1 - changelog.d/12073.removal | 1 - changelog.d/12077.bugfix | 1 - changelog.d/12084.misc | 1 - changelog.d/12088.misc | 1 - changelog.d/12089.bugfix | 1 - changelog.d/12092.misc | 1 - changelog.d/12094.misc | 1 - changelog.d/12098.bugfix | 1 - changelog.d/12099.misc | 1 - changelog.d/12100.bugfix | 1 - changelog.d/12105.bugfix | 1 - changelog.d/12106.misc | 1 - changelog.d/12109.misc | 1 - changelog.d/12111.misc | 1 - changelog.d/12112.docker | 1 - changelog.d/12119.misc | 1 - debian/changelog | 6 +++ synapse/__init__.py | 2 +- 74 files changed, 103 insertions(+), 72 deletions(-) delete mode 100644 changelog.d/11599.doc delete mode 100644 changelog.d/11617.feature delete mode 100644 changelog.d/11808.misc delete mode 100644 changelog.d/11835.feature delete mode 100644 changelog.d/11865.removal delete mode 100644 changelog.d/11900.misc delete mode 100644 changelog.d/11972.misc delete mode 100644 changelog.d/11974.misc delete mode 100644 changelog.d/11984.misc delete mode 100644 changelog.d/11985.feature delete mode 100644 changelog.d/11991.misc delete mode 100644 changelog.d/11992.bugfix delete mode 100644 changelog.d/11994.misc delete mode 100644 changelog.d/11996.misc delete mode 100644 changelog.d/11997.docker delete mode 100644 changelog.d/11999.bugfix delete mode 100644 changelog.d/12000.feature delete mode 100644 changelog.d/12001.feature delete mode 100644 changelog.d/12003.doc delete mode 100644 changelog.d/12004.doc delete mode 100644 changelog.d/12005.misc delete mode 100644 changelog.d/12008.removal delete mode 100644 changelog.d/12009.feature delete mode 100644 changelog.d/12011.misc delete mode 100644 changelog.d/12012.misc delete mode 100644 changelog.d/12013.misc delete mode 100644 changelog.d/12015.misc delete mode 100644 changelog.d/12016.misc delete mode 100644 changelog.d/12018.removal delete mode 100644 changelog.d/12019.misc delete mode 100644 changelog.d/12020.feature delete mode 100644 changelog.d/12021.feature delete mode 100644 changelog.d/12022.feature delete mode 100644 changelog.d/12024.bugfix delete mode 100644 changelog.d/12025.misc delete mode 100644 changelog.d/12030.misc delete mode 100644 changelog.d/12031.misc delete mode 100644 changelog.d/12034.misc delete mode 100644 changelog.d/12037.bugfix delete mode 100644 changelog.d/12039.misc delete mode 100644 changelog.d/12041.misc delete mode 100644 changelog.d/12051.misc delete mode 100644 changelog.d/12052.misc delete mode 100644 changelog.d/12056.bugfix delete mode 100644 changelog.d/12058.feature delete mode 100644 changelog.d/12059.misc delete mode 100644 changelog.d/12060.misc delete mode 100644 changelog.d/12062.feature delete mode 100644 changelog.d/12063.misc delete mode 100644 changelog.d/12066.misc delete mode 100644 changelog.d/12067.feature delete mode 100644 changelog.d/12068.misc delete mode 100644 changelog.d/12069.misc delete mode 100644 changelog.d/12070.misc delete mode 100644 changelog.d/12072.misc delete mode 100644 changelog.d/12073.removal delete mode 100644 changelog.d/12077.bugfix delete mode 100644 changelog.d/12084.misc delete mode 100644 changelog.d/12088.misc delete mode 100644 changelog.d/12089.bugfix delete mode 100644 changelog.d/12092.misc delete mode 100644 changelog.d/12094.misc delete mode 100644 changelog.d/12098.bugfix delete mode 100644 changelog.d/12099.misc delete mode 100644 changelog.d/12100.bugfix delete mode 100644 changelog.d/12105.bugfix delete mode 100644 changelog.d/12106.misc delete mode 100644 changelog.d/12109.misc delete mode 100644 changelog.d/12111.misc delete mode 100644 changelog.d/12112.docker delete mode 100644 changelog.d/12119.misc diff --git a/CHANGES.md b/CHANGES.md index 81333097ae..4f0318970e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,99 @@ +Synapse 1.54.0rc1 (2022-03-02) +============================== + +Features +-------- + +- Add support for MSC3202: sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) +- Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. ([\#11835](https://github.com/matrix-org/synapse/issues/11835)) +- Fetch images when previewing Twitter URLs. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) +- Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000)) +- Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). ([\#12001](https://github.com/matrix-org/synapse/issues/12001), [\#12067](https://github.com/matrix-org/synapse/issues/12067)) +- Enable modules to set a custom display name when registering a user. ([\#12009](https://github.com/matrix-org/synapse/issues/12009)) +- Advertise Matrix 1.1 support on `/_matrix/client/versions`. ([\#12020](https://github.com/matrix-org/synapse/issues/12020)) +- Support only the stable identifier for [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069)'s `is_guest` on `/_matrix/client/v3/account/whoami`. ([\#12021](https://github.com/matrix-org/synapse/issues/12021)) +- Advertise Matrix 1.2 support on `/_matrix/client/versions`. ([\#12022](https://github.com/matrix-org/synapse/issues/12022)) +- Use room version 9 as the default room version (per [MSC3589](https://github.com/matrix-org/matrix-doc/pull/3589)). ([\#12058](https://github.com/matrix-org/synapse/issues/12058)) +- Add module callbacks to react to user deactivation status changes (i.e. deactivations and reactivations) and profile updates. ([\#12062](https://github.com/matrix-org/synapse/issues/12062)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse v1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) +- Fix long standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) +- Fix 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) +- Properly fix a long-standing bug where wrong data could be inserted in the `event_search` table when using sqlite. This could block running `synapse_port_db` with an "argument of type 'int' is not iterable" error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) +- Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) +- Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. ([\#12077](https://github.com/matrix-org/synapse/issues/12077)) +- Fix occasional 'Unhandled error in Deferred' error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089)) +- Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#12098](https://github.com/matrix-org/synapse/issues/12098)) +- Fix a long-standing bug which could cause push notifications to malfunction if `use_frozen_dicts` was set in the configuration. ([\#12100](https://github.com/matrix-org/synapse/issues/12100)) +- Fix an extremely rare, long-standing bug in `ReadWriteLock` that would cause an error when a newly unblocked writer completes instantly. ([\#12105](https://github.com/matrix-org/synapse/issues/12105)) + + +Updates to the Docker image +--------------------------- + +- The docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. ([\#11997](https://github.com/matrix-org/synapse/issues/11997)) +- Use Python 3.9 in Docker images by default. ([\#12112](https://github.com/matrix-org/synapse/issues/12112)) + + +Improved Documentation +---------------------- + +- Document support for the `to_device`, `account_data`, `receipts`, and `presence` stream writers for workers. ([\#11599](https://github.com/matrix-org/synapse/issues/11599)) +- Explain the meaning of spam checker callbacks' return values. ([\#12003](https://github.com/matrix-org/synapse/issues/12003)) +- Clarify information about external Identity Provider IDs. ([\#12004](https://github.com/matrix-org/synapse/issues/12004)) + + +Deprecations and Removals +------------------------- + +- Deprecate using `synctl` with the config option `synctl_cache_factor` and print a warning if a user still uses this option. ([\#11865](https://github.com/matrix-org/synapse/issues/11865)) +- Remove support for the legacy structured logging configuration (please see the the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#legacy-structured-logging-configuration-removal) if you are using `structured: true` in the Synapse configuration). ([\#12008](https://github.com/matrix-org/synapse/issues/12008)) +- Drop support for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283) unstable flags now that the stable flags are supported. ([\#12018](https://github.com/matrix-org/synapse/issues/12018)) +- Remove the unstable `/spaces` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12073](https://github.com/matrix-org/synapse/issues/12073)) + + +Internal Changes +---------------- + +- Make method `get_room_version` use cached `get_room_version_id`. ([\#11808](https://github.com/matrix-org/synapse/issues/11808)) +- Remove unnecessary condition on knock->leave auth rule check. ([\#11900](https://github.com/matrix-org/synapse/issues/11900)) +- Add tests for device list changes between local users. ([\#11972](https://github.com/matrix-org/synapse/issues/11972)) +- Optimise calculating device_list changes in `/sync`. ([\#11974](https://github.com/matrix-org/synapse/issues/11974)) +- Add missing type hints to storage classes. ([\#11984](https://github.com/matrix-org/synapse/issues/11984)) +- Refactor the search code for improved readability. ([\#11991](https://github.com/matrix-org/synapse/issues/11991)) +- Move common deduplication code down into `_auth_and_persist_outliers`. ([\#11994](https://github.com/matrix-org/synapse/issues/11994)) +- Limit concurrent joins from applications services. ([\#11996](https://github.com/matrix-org/synapse/issues/11996)) +- Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. ([\#12005](https://github.com/matrix-org/synapse/issues/12005), [\#12039](https://github.com/matrix-org/synapse/issues/12039)) +- Preparation for faster-room-join work: parse msc3706 fields in send_join response. ([\#12011](https://github.com/matrix-org/synapse/issues/12011)) +- Preparation for faster-room-join work: persist information on which events and rooms have partial state to the database. ([\#12012](https://github.com/matrix-org/synapse/issues/12012)) +- Preparation for faster-room-join work: Support for calling `/federation/v1/state` on a remote server. ([\#12013](https://github.com/matrix-org/synapse/issues/12013)) +- Configure `tox` to use `venv` rather than `virtualenv`. ([\#12015](https://github.com/matrix-org/synapse/issues/12015)) +- Fix bug in `StateFilter.return_expanded()` and add some tests. ([\#12016](https://github.com/matrix-org/synapse/issues/12016)) +- Use Matrix v1.1 endpoints (`/_matrix/client/v3/auth/...`) in fallback auth HTML forms. ([\#12019](https://github.com/matrix-org/synapse/issues/12019)) +- Update the `olddeps` CI job to use an old version of `markupsafe`. ([\#12025](https://github.com/matrix-org/synapse/issues/12025)) +- Upgrade mypy to version 0.931. ([\#12030](https://github.com/matrix-org/synapse/issues/12030)) +- Remove legacy `HomeServer.get_datastore()`. ([\#12031](https://github.com/matrix-org/synapse/issues/12031), [\#12070](https://github.com/matrix-org/synapse/issues/12070)) +- Minor typing fixes. ([\#12034](https://github.com/matrix-org/synapse/issues/12034), [\#12069](https://github.com/matrix-org/synapse/issues/12069)) +- After joining a room, create a dedicated logcontext to process the queued events. ([\#12041](https://github.com/matrix-org/synapse/issues/12041)) +- Tidy up GitHub Actions config which builds distributions for PyPI. ([\#12051](https://github.com/matrix-org/synapse/issues/12051)) +- Move configuration out of `setup.cfg`. ([\#12052](https://github.com/matrix-org/synapse/issues/12052), [\#12059](https://github.com/matrix-org/synapse/issues/12059)) +- Fix error message when a worker process fails to talk to another worker process. ([\#12060](https://github.com/matrix-org/synapse/issues/12060)) +- Fix using the complement.sh script without specifying a dir or a branch. Contributed by Nico on behalf of Famedly. ([\#12063](https://github.com/matrix-org/synapse/issues/12063)) +- Add type hints to `tests/rest/client`. ([\#12066](https://github.com/matrix-org/synapse/issues/12066), [\#12072](https://github.com/matrix-org/synapse/issues/12072), [\#12084](https://github.com/matrix-org/synapse/issues/12084), [\#12094](https://github.com/matrix-org/synapse/issues/12094)) +- Add some logging to `/sync` to try and track down #11916. ([\#12068](https://github.com/matrix-org/synapse/issues/12068)) +- Inspect application dependencies using `importlib.metadata` or its backport. ([\#12088](https://github.com/matrix-org/synapse/issues/12088)) +- User `assertEqual` instead of the deprecated `assertEquals` in test code. ([\#12092](https://github.com/matrix-org/synapse/issues/12092)) +- Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to /versions. ([\#12099](https://github.com/matrix-org/synapse/issues/12099)) +- Add `stop_cancellation` utility function to stop `Deferred`s from being cancelled. ([\#12106](https://github.com/matrix-org/synapse/issues/12106)) +- Improve exception handling for concurrent execution. ([\#12109](https://github.com/matrix-org/synapse/issues/12109)) +- Advertise support for Python 3.10 in packaging files. ([\#12111](https://github.com/matrix-org/synapse/issues/12111)) +- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12119](https://github.com/matrix-org/synapse/issues/12119)) + + Synapse 1.53.0 (2022-02-22) =========================== diff --git a/changelog.d/11599.doc b/changelog.d/11599.doc deleted file mode 100644 index f07cfbef4e..0000000000 --- a/changelog.d/11599.doc +++ /dev/null @@ -1 +0,0 @@ -Document support for the `to_device`, `account_data`, `receipts`, and `presence` stream writers for workers. diff --git a/changelog.d/11617.feature b/changelog.d/11617.feature deleted file mode 100644 index cf03f00e7c..0000000000 --- a/changelog.d/11617.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for MSC3202: sending one-time key counts and fallback key usage states to Application Services. \ No newline at end of file diff --git a/changelog.d/11808.misc b/changelog.d/11808.misc deleted file mode 100644 index cdc5fc75b7..0000000000 --- a/changelog.d/11808.misc +++ /dev/null @@ -1 +0,0 @@ -Make method `get_room_version` use cached `get_room_version_id`. diff --git a/changelog.d/11835.feature b/changelog.d/11835.feature deleted file mode 100644 index 7cee39b08c..0000000000 --- a/changelog.d/11835.feature +++ /dev/null @@ -1 +0,0 @@ -Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. diff --git a/changelog.d/11865.removal b/changelog.d/11865.removal deleted file mode 100644 index 9fcabfc720..0000000000 --- a/changelog.d/11865.removal +++ /dev/null @@ -1 +0,0 @@ -Deprecate using `synctl` with the config option `synctl_cache_factor` and print a warning if a user still uses this option. diff --git a/changelog.d/11900.misc b/changelog.d/11900.misc deleted file mode 100644 index edd2852fd4..0000000000 --- a/changelog.d/11900.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary condition on knock->leave auth rule check. \ No newline at end of file diff --git a/changelog.d/11972.misc b/changelog.d/11972.misc deleted file mode 100644 index 29c38bfd82..0000000000 --- a/changelog.d/11972.misc +++ /dev/null @@ -1 +0,0 @@ -Add tests for device list changes between local users. \ No newline at end of file diff --git a/changelog.d/11974.misc b/changelog.d/11974.misc deleted file mode 100644 index 1debad2361..0000000000 --- a/changelog.d/11974.misc +++ /dev/null @@ -1 +0,0 @@ -Optimise calculating device_list changes in `/sync`. diff --git a/changelog.d/11984.misc b/changelog.d/11984.misc deleted file mode 100644 index 8e405b9226..0000000000 --- a/changelog.d/11984.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. \ No newline at end of file diff --git a/changelog.d/11985.feature b/changelog.d/11985.feature deleted file mode 100644 index 120d888a49..0000000000 --- a/changelog.d/11985.feature +++ /dev/null @@ -1 +0,0 @@ -Fetch images when previewing Twitter URLs. Contributed by @AndrewRyanChama. diff --git a/changelog.d/11991.misc b/changelog.d/11991.misc deleted file mode 100644 index 34a3b3a6b9..0000000000 --- a/changelog.d/11991.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor the search code for improved readability. diff --git a/changelog.d/11992.bugfix b/changelog.d/11992.bugfix deleted file mode 100644 index f73c86bb25..0000000000 --- a/changelog.d/11992.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. diff --git a/changelog.d/11994.misc b/changelog.d/11994.misc deleted file mode 100644 index d64297dd78..0000000000 --- a/changelog.d/11994.misc +++ /dev/null @@ -1 +0,0 @@ -Move common deduplication code down into `_auth_and_persist_outliers`. diff --git a/changelog.d/11996.misc b/changelog.d/11996.misc deleted file mode 100644 index 6c675fd193..0000000000 --- a/changelog.d/11996.misc +++ /dev/null @@ -1 +0,0 @@ -Limit concurrent joins from applications services. \ No newline at end of file diff --git a/changelog.d/11997.docker b/changelog.d/11997.docker deleted file mode 100644 index 1b3271457e..0000000000 --- a/changelog.d/11997.docker +++ /dev/null @@ -1 +0,0 @@ -The docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. diff --git a/changelog.d/11999.bugfix b/changelog.d/11999.bugfix deleted file mode 100644 index fd84095900..0000000000 --- a/changelog.d/11999.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. diff --git a/changelog.d/12000.feature b/changelog.d/12000.feature deleted file mode 100644 index 246cc87f0b..0000000000 --- a/changelog.d/12000.feature +++ /dev/null @@ -1 +0,0 @@ -Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. diff --git a/changelog.d/12001.feature b/changelog.d/12001.feature deleted file mode 100644 index dc1153c49e..0000000000 --- a/changelog.d/12001.feature +++ /dev/null @@ -1 +0,0 @@ -Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). diff --git a/changelog.d/12003.doc b/changelog.d/12003.doc deleted file mode 100644 index 1ac8163559..0000000000 --- a/changelog.d/12003.doc +++ /dev/null @@ -1 +0,0 @@ -Explain the meaning of spam checker callbacks' return values. diff --git a/changelog.d/12004.doc b/changelog.d/12004.doc deleted file mode 100644 index 0b4baef210..0000000000 --- a/changelog.d/12004.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify information about external Identity Provider IDs. diff --git a/changelog.d/12005.misc b/changelog.d/12005.misc deleted file mode 100644 index 45e21dbe59..0000000000 --- a/changelog.d/12005.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. diff --git a/changelog.d/12008.removal b/changelog.d/12008.removal deleted file mode 100644 index 57599d9ee9..0000000000 --- a/changelog.d/12008.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for the legacy structured logging configuration (please see the the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#legacy-structured-logging-configuration-removal) if you are using `structured: true` in the Synapse configuration). diff --git a/changelog.d/12009.feature b/changelog.d/12009.feature deleted file mode 100644 index c8a531481e..0000000000 --- a/changelog.d/12009.feature +++ /dev/null @@ -1 +0,0 @@ -Enable modules to set a custom display name when registering a user. diff --git a/changelog.d/12011.misc b/changelog.d/12011.misc deleted file mode 100644 index 258b0e389f..0000000000 --- a/changelog.d/12011.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for faster-room-join work: parse msc3706 fields in send_join response. diff --git a/changelog.d/12012.misc b/changelog.d/12012.misc deleted file mode 100644 index a473f41e78..0000000000 --- a/changelog.d/12012.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for faster-room-join work: persist information on which events and rooms have partial state to the database. diff --git a/changelog.d/12013.misc b/changelog.d/12013.misc deleted file mode 100644 index c0fca8dccb..0000000000 --- a/changelog.d/12013.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for faster-room-join work: Support for calling `/federation/v1/state` on a remote server. diff --git a/changelog.d/12015.misc b/changelog.d/12015.misc deleted file mode 100644 index 3aa32ab4cf..0000000000 --- a/changelog.d/12015.misc +++ /dev/null @@ -1 +0,0 @@ -Configure `tox` to use `venv` rather than `virtualenv`. diff --git a/changelog.d/12016.misc b/changelog.d/12016.misc deleted file mode 100644 index 8856ef46a9..0000000000 --- a/changelog.d/12016.misc +++ /dev/null @@ -1 +0,0 @@ -Fix bug in `StateFilter.return_expanded()` and add some tests. \ No newline at end of file diff --git a/changelog.d/12018.removal b/changelog.d/12018.removal deleted file mode 100644 index e940b62228..0000000000 --- a/changelog.d/12018.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283) unstable flags now that the stable flags are supported. diff --git a/changelog.d/12019.misc b/changelog.d/12019.misc deleted file mode 100644 index b2186320ea..0000000000 --- a/changelog.d/12019.misc +++ /dev/null @@ -1 +0,0 @@ -Use Matrix v1.1 endpoints (`/_matrix/client/v3/auth/...`) in fallback auth HTML forms. \ No newline at end of file diff --git a/changelog.d/12020.feature b/changelog.d/12020.feature deleted file mode 100644 index 1ac9d2060e..0000000000 --- a/changelog.d/12020.feature +++ /dev/null @@ -1 +0,0 @@ -Advertise Matrix 1.1 support on `/_matrix/client/versions`. \ No newline at end of file diff --git a/changelog.d/12021.feature b/changelog.d/12021.feature deleted file mode 100644 index 01378df8ca..0000000000 --- a/changelog.d/12021.feature +++ /dev/null @@ -1 +0,0 @@ -Support only the stable identifier for [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069)'s `is_guest` on `/_matrix/client/v3/account/whoami`. \ No newline at end of file diff --git a/changelog.d/12022.feature b/changelog.d/12022.feature deleted file mode 100644 index 188fb12570..0000000000 --- a/changelog.d/12022.feature +++ /dev/null @@ -1 +0,0 @@ -Advertise Matrix 1.2 support on `/_matrix/client/versions`. \ No newline at end of file diff --git a/changelog.d/12024.bugfix b/changelog.d/12024.bugfix deleted file mode 100644 index 59bcdb93a5..0000000000 --- a/changelog.d/12024.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. diff --git a/changelog.d/12025.misc b/changelog.d/12025.misc deleted file mode 100644 index d9475a7718..0000000000 --- a/changelog.d/12025.misc +++ /dev/null @@ -1 +0,0 @@ -Update the `olddeps` CI job to use an old version of `markupsafe`. diff --git a/changelog.d/12030.misc b/changelog.d/12030.misc deleted file mode 100644 index 607ee97ce6..0000000000 --- a/changelog.d/12030.misc +++ /dev/null @@ -1 +0,0 @@ -Upgrade mypy to version 0.931. diff --git a/changelog.d/12031.misc b/changelog.d/12031.misc deleted file mode 100644 index d4bedc6b97..0000000000 --- a/changelog.d/12031.misc +++ /dev/null @@ -1 +0,0 @@ -Remove legacy `HomeServer.get_datastore()`. diff --git a/changelog.d/12034.misc b/changelog.d/12034.misc deleted file mode 100644 index 8374a63220..0000000000 --- a/changelog.d/12034.misc +++ /dev/null @@ -1 +0,0 @@ -Minor typing fixes. diff --git a/changelog.d/12037.bugfix b/changelog.d/12037.bugfix deleted file mode 100644 index 9295cb4dc0..0000000000 --- a/changelog.d/12037.bugfix +++ /dev/null @@ -1 +0,0 @@ -Properly fix a long-standing bug where wrong data could be inserted in the `event_search` table when using sqlite. This could block running `synapse_port_db` with an "argument of type 'int' is not iterable" error. This bug was partially fixed by a change in Synapse 1.44.0. diff --git a/changelog.d/12039.misc b/changelog.d/12039.misc deleted file mode 100644 index 45e21dbe59..0000000000 --- a/changelog.d/12039.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. diff --git a/changelog.d/12041.misc b/changelog.d/12041.misc deleted file mode 100644 index e56dc093de..0000000000 --- a/changelog.d/12041.misc +++ /dev/null @@ -1 +0,0 @@ -After joining a room, create a dedicated logcontext to process the queued events. diff --git a/changelog.d/12051.misc b/changelog.d/12051.misc deleted file mode 100644 index 9959191352..0000000000 --- a/changelog.d/12051.misc +++ /dev/null @@ -1 +0,0 @@ -Tidy up GitHub Actions config which builds distributions for PyPI. \ No newline at end of file diff --git a/changelog.d/12052.misc b/changelog.d/12052.misc deleted file mode 100644 index 11755ae61b..0000000000 --- a/changelog.d/12052.misc +++ /dev/null @@ -1 +0,0 @@ -Move configuration out of `setup.cfg`. diff --git a/changelog.d/12056.bugfix b/changelog.d/12056.bugfix deleted file mode 100644 index 210e30c63f..0000000000 --- a/changelog.d/12056.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens. \ No newline at end of file diff --git a/changelog.d/12058.feature b/changelog.d/12058.feature deleted file mode 100644 index 7b71692229..0000000000 --- a/changelog.d/12058.feature +++ /dev/null @@ -1 +0,0 @@ -Use room version 9 as the default room version (per [MSC3589](https://github.com/matrix-org/matrix-doc/pull/3589)). diff --git a/changelog.d/12059.misc b/changelog.d/12059.misc deleted file mode 100644 index 9ba4759d99..0000000000 --- a/changelog.d/12059.misc +++ /dev/null @@ -1 +0,0 @@ -Move configuration out of `setup.cfg`. \ No newline at end of file diff --git a/changelog.d/12060.misc b/changelog.d/12060.misc deleted file mode 100644 index d771e6a1b3..0000000000 --- a/changelog.d/12060.misc +++ /dev/null @@ -1 +0,0 @@ -Fix error message when a worker process fails to talk to another worker process. diff --git a/changelog.d/12062.feature b/changelog.d/12062.feature deleted file mode 100644 index 46a606709d..0000000000 --- a/changelog.d/12062.feature +++ /dev/null @@ -1 +0,0 @@ -Add module callbacks to react to user deactivation status changes (i.e. deactivations and reactivations) and profile updates. diff --git a/changelog.d/12063.misc b/changelog.d/12063.misc deleted file mode 100644 index e48c5dd08b..0000000000 --- a/changelog.d/12063.misc +++ /dev/null @@ -1 +0,0 @@ -Fix using the complement.sh script without specifying a dir or a branch. Contributed by Nico on behalf of Famedly. diff --git a/changelog.d/12066.misc b/changelog.d/12066.misc deleted file mode 100644 index 0360dbd61e..0000000000 --- a/changelog.d/12066.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `tests/rest/client`. diff --git a/changelog.d/12067.feature b/changelog.d/12067.feature deleted file mode 100644 index dc1153c49e..0000000000 --- a/changelog.d/12067.feature +++ /dev/null @@ -1 +0,0 @@ -Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). diff --git a/changelog.d/12068.misc b/changelog.d/12068.misc deleted file mode 100644 index 72b211e4f5..0000000000 --- a/changelog.d/12068.misc +++ /dev/null @@ -1 +0,0 @@ -Add some logging to `/sync` to try and track down #11916. diff --git a/changelog.d/12069.misc b/changelog.d/12069.misc deleted file mode 100644 index 8374a63220..0000000000 --- a/changelog.d/12069.misc +++ /dev/null @@ -1 +0,0 @@ -Minor typing fixes. diff --git a/changelog.d/12070.misc b/changelog.d/12070.misc deleted file mode 100644 index d4bedc6b97..0000000000 --- a/changelog.d/12070.misc +++ /dev/null @@ -1 +0,0 @@ -Remove legacy `HomeServer.get_datastore()`. diff --git a/changelog.d/12072.misc b/changelog.d/12072.misc deleted file mode 100644 index 0360dbd61e..0000000000 --- a/changelog.d/12072.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `tests/rest/client`. diff --git a/changelog.d/12073.removal b/changelog.d/12073.removal deleted file mode 100644 index 1f39792712..0000000000 --- a/changelog.d/12073.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the unstable `/spaces` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). diff --git a/changelog.d/12077.bugfix b/changelog.d/12077.bugfix deleted file mode 100644 index 1bce82082d..0000000000 --- a/changelog.d/12077.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. diff --git a/changelog.d/12084.misc b/changelog.d/12084.misc deleted file mode 100644 index 0360dbd61e..0000000000 --- a/changelog.d/12084.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `tests/rest/client`. diff --git a/changelog.d/12088.misc b/changelog.d/12088.misc deleted file mode 100644 index ce4213650c..0000000000 --- a/changelog.d/12088.misc +++ /dev/null @@ -1 +0,0 @@ -Inspect application dependencies using `importlib.metadata` or its backport. \ No newline at end of file diff --git a/changelog.d/12089.bugfix b/changelog.d/12089.bugfix deleted file mode 100644 index 27172c4828..0000000000 --- a/changelog.d/12089.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix occasional 'Unhandled error in Deferred' error message. diff --git a/changelog.d/12092.misc b/changelog.d/12092.misc deleted file mode 100644 index 62653d6f8d..0000000000 --- a/changelog.d/12092.misc +++ /dev/null @@ -1 +0,0 @@ -User `assertEqual` instead of the deprecated `assertEquals` in test code. diff --git a/changelog.d/12094.misc b/changelog.d/12094.misc deleted file mode 100644 index 0360dbd61e..0000000000 --- a/changelog.d/12094.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `tests/rest/client`. diff --git a/changelog.d/12098.bugfix b/changelog.d/12098.bugfix deleted file mode 100644 index 6b696692e3..0000000000 --- a/changelog.d/12098.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. \ No newline at end of file diff --git a/changelog.d/12099.misc b/changelog.d/12099.misc deleted file mode 100644 index 0553825dbc..0000000000 --- a/changelog.d/12099.misc +++ /dev/null @@ -1 +0,0 @@ -Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to /versions. diff --git a/changelog.d/12100.bugfix b/changelog.d/12100.bugfix deleted file mode 100644 index 181095ad99..0000000000 --- a/changelog.d/12100.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug which could cause push notifications to malfunction if `use_frozen_dicts` was set in the configuration. diff --git a/changelog.d/12105.bugfix b/changelog.d/12105.bugfix deleted file mode 100644 index f42e63e01f..0000000000 --- a/changelog.d/12105.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix an extremely rare, long-standing bug in `ReadWriteLock` that would cause an error when a newly unblocked writer completes instantly. diff --git a/changelog.d/12106.misc b/changelog.d/12106.misc deleted file mode 100644 index d918e9e3b1..0000000000 --- a/changelog.d/12106.misc +++ /dev/null @@ -1 +0,0 @@ -Add `stop_cancellation` utility function to stop `Deferred`s from being cancelled. diff --git a/changelog.d/12109.misc b/changelog.d/12109.misc deleted file mode 100644 index 3295e49f43..0000000000 --- a/changelog.d/12109.misc +++ /dev/null @@ -1 +0,0 @@ -Improve exception handling for concurrent execution. diff --git a/changelog.d/12111.misc b/changelog.d/12111.misc deleted file mode 100644 index be84789c9d..0000000000 --- a/changelog.d/12111.misc +++ /dev/null @@ -1 +0,0 @@ -Advertise support for Python 3.10 in packaging files. \ No newline at end of file diff --git a/changelog.d/12112.docker b/changelog.d/12112.docker deleted file mode 100644 index b9e630653d..0000000000 --- a/changelog.d/12112.docker +++ /dev/null @@ -1 +0,0 @@ -Use Python 3.9 in Docker images by default. \ No newline at end of file diff --git a/changelog.d/12119.misc b/changelog.d/12119.misc deleted file mode 100644 index f02d140f38..0000000000 --- a/changelog.d/12119.misc +++ /dev/null @@ -1 +0,0 @@ -Move CI checks out of tox, to facilitate a move to using poetry. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 574930c085..df3db85b8e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.54.0~rc1) stable; urgency=medium + + * New synapse release 1.54.0~rc1. + + -- Synapse Packaging team Wed, 02 Mar 2022 10:43:22 +0000 + matrix-synapse-py3 (1.53.0) stable; urgency=medium * New synapse release 1.53.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 903f2e815d..b21e1ed0f3 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ try: except ImportError: pass -__version__ = "1.53.0" +__version__ = "1.54.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when -- cgit 1.4.1 From d800108bb4e1272235aa6f5f80b2732cee9aa5bf Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 10:54:52 +0000 Subject: Reword changelog --- CHANGES.md | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 4f0318970e..5485e8d47e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,14 @@ Synapse 1.54.0rc1 (2022-03-02) ============================== +Please note that this will be the last release of Synapse that is compatible with Mjolnir 1.3.1 and earlier. +Administrators of servers which have the Mjolnir module installed are advised to upgrade Mjolnir to version 1.3.2 or later. + + Features -------- -- Add support for MSC3202: sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) +- Add support for [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) - Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. ([\#11835](https://github.com/matrix-org/synapse/issues/11835)) - Fetch images when previewing Twitter URLs. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) - Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000)) @@ -22,8 +26,8 @@ Bugfixes - Fix a bug introduced in Synapse v1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) - Fix long standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) -- Fix 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) -- Properly fix a long-standing bug where wrong data could be inserted in the `event_search` table when using sqlite. This could block running `synapse_port_db` with an "argument of type 'int' is not iterable" error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) +- Fix a 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) +- Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an "argument of type 'int' is not iterable" error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) - Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) - Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. ([\#12077](https://github.com/matrix-org/synapse/issues/12077)) - Fix occasional 'Unhandled error in Deferred' error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089)) @@ -35,7 +39,7 @@ Bugfixes Updates to the Docker image --------------------------- -- The docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. ([\#11997](https://github.com/matrix-org/synapse/issues/11997)) +- The Docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. ([\#11997](https://github.com/matrix-org/synapse/issues/11997)) - Use Python 3.9 in Docker images by default. ([\#12112](https://github.com/matrix-org/synapse/issues/12112)) @@ -59,35 +63,35 @@ Deprecations and Removals Internal Changes ---------------- -- Make method `get_room_version` use cached `get_room_version_id`. ([\#11808](https://github.com/matrix-org/synapse/issues/11808)) -- Remove unnecessary condition on knock->leave auth rule check. ([\#11900](https://github.com/matrix-org/synapse/issues/11900)) +- Make the `get_room_version` method use `get_room_version_id` to benefit from caching. ([\#11808](https://github.com/matrix-org/synapse/issues/11808)) +- Remove unnecessary condition on knock -> leave auth rule check. ([\#11900](https://github.com/matrix-org/synapse/issues/11900)) - Add tests for device list changes between local users. ([\#11972](https://github.com/matrix-org/synapse/issues/11972)) -- Optimise calculating device_list changes in `/sync`. ([\#11974](https://github.com/matrix-org/synapse/issues/11974)) +- Optimise calculating `device_list` changes in `/sync`. ([\#11974](https://github.com/matrix-org/synapse/issues/11974)) - Add missing type hints to storage classes. ([\#11984](https://github.com/matrix-org/synapse/issues/11984)) - Refactor the search code for improved readability. ([\#11991](https://github.com/matrix-org/synapse/issues/11991)) - Move common deduplication code down into `_auth_and_persist_outliers`. ([\#11994](https://github.com/matrix-org/synapse/issues/11994)) - Limit concurrent joins from applications services. ([\#11996](https://github.com/matrix-org/synapse/issues/11996)) - Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. ([\#12005](https://github.com/matrix-org/synapse/issues/12005), [\#12039](https://github.com/matrix-org/synapse/issues/12039)) -- Preparation for faster-room-join work: parse msc3706 fields in send_join response. ([\#12011](https://github.com/matrix-org/synapse/issues/12011)) +- Preparation for faster-room-join work: parse MSC3706 fields in send_join response. ([\#12011](https://github.com/matrix-org/synapse/issues/12011)) - Preparation for faster-room-join work: persist information on which events and rooms have partial state to the database. ([\#12012](https://github.com/matrix-org/synapse/issues/12012)) - Preparation for faster-room-join work: Support for calling `/federation/v1/state` on a remote server. ([\#12013](https://github.com/matrix-org/synapse/issues/12013)) - Configure `tox` to use `venv` rather than `virtualenv`. ([\#12015](https://github.com/matrix-org/synapse/issues/12015)) - Fix bug in `StateFilter.return_expanded()` and add some tests. ([\#12016](https://github.com/matrix-org/synapse/issues/12016)) - Use Matrix v1.1 endpoints (`/_matrix/client/v3/auth/...`) in fallback auth HTML forms. ([\#12019](https://github.com/matrix-org/synapse/issues/12019)) - Update the `olddeps` CI job to use an old version of `markupsafe`. ([\#12025](https://github.com/matrix-org/synapse/issues/12025)) -- Upgrade mypy to version 0.931. ([\#12030](https://github.com/matrix-org/synapse/issues/12030)) +- Upgrade Mypy to version 0.931. ([\#12030](https://github.com/matrix-org/synapse/issues/12030)) - Remove legacy `HomeServer.get_datastore()`. ([\#12031](https://github.com/matrix-org/synapse/issues/12031), [\#12070](https://github.com/matrix-org/synapse/issues/12070)) - Minor typing fixes. ([\#12034](https://github.com/matrix-org/synapse/issues/12034), [\#12069](https://github.com/matrix-org/synapse/issues/12069)) - After joining a room, create a dedicated logcontext to process the queued events. ([\#12041](https://github.com/matrix-org/synapse/issues/12041)) - Tidy up GitHub Actions config which builds distributions for PyPI. ([\#12051](https://github.com/matrix-org/synapse/issues/12051)) - Move configuration out of `setup.cfg`. ([\#12052](https://github.com/matrix-org/synapse/issues/12052), [\#12059](https://github.com/matrix-org/synapse/issues/12059)) - Fix error message when a worker process fails to talk to another worker process. ([\#12060](https://github.com/matrix-org/synapse/issues/12060)) -- Fix using the complement.sh script without specifying a dir or a branch. Contributed by Nico on behalf of Famedly. ([\#12063](https://github.com/matrix-org/synapse/issues/12063)) +- Fix using the `complement.sh` script without specifying a directory or a branch. Contributed by Nico on behalf of Famedly. ([\#12063](https://github.com/matrix-org/synapse/issues/12063)) - Add type hints to `tests/rest/client`. ([\#12066](https://github.com/matrix-org/synapse/issues/12066), [\#12072](https://github.com/matrix-org/synapse/issues/12072), [\#12084](https://github.com/matrix-org/synapse/issues/12084), [\#12094](https://github.com/matrix-org/synapse/issues/12094)) - Add some logging to `/sync` to try and track down #11916. ([\#12068](https://github.com/matrix-org/synapse/issues/12068)) - Inspect application dependencies using `importlib.metadata` or its backport. ([\#12088](https://github.com/matrix-org/synapse/issues/12088)) -- User `assertEqual` instead of the deprecated `assertEquals` in test code. ([\#12092](https://github.com/matrix-org/synapse/issues/12092)) -- Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to /versions. ([\#12099](https://github.com/matrix-org/synapse/issues/12099)) +- Use `assertEqual` instead of the deprecated `assertEquals` in test code. ([\#12092](https://github.com/matrix-org/synapse/issues/12092)) +- Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to `/versions`. ([\#12099](https://github.com/matrix-org/synapse/issues/12099)) - Add `stop_cancellation` utility function to stop `Deferred`s from being cancelled. ([\#12106](https://github.com/matrix-org/synapse/issues/12106)) - Improve exception handling for concurrent execution. ([\#12109](https://github.com/matrix-org/synapse/issues/12109)) - Advertise support for Python 3.10 in packaging files. ([\#12111](https://github.com/matrix-org/synapse/issues/12111)) -- cgit 1.4.1 From 010457011c83d4db96d84c43687ee5e291f7685f Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 11:17:36 +0000 Subject: Apply suggestions to changelog --- CHANGES.md | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 5485e8d47e..a81d0a4b14 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,14 +9,12 @@ Features -------- - Add support for [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) -- Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. ([\#11835](https://github.com/matrix-org/synapse/issues/11835)) -- Fetch images when previewing Twitter URLs. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) +- Improve the preview that is produced when generating URL previews for some web pages. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) - Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000)) - Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). ([\#12001](https://github.com/matrix-org/synapse/issues/12001), [\#12067](https://github.com/matrix-org/synapse/issues/12067)) - Enable modules to set a custom display name when registering a user. ([\#12009](https://github.com/matrix-org/synapse/issues/12009)) -- Advertise Matrix 1.1 support on `/_matrix/client/versions`. ([\#12020](https://github.com/matrix-org/synapse/issues/12020)) +- Advertise Matrix 1.1 and 1.2 support on `/_matrix/client/versions`. ([\#12020](https://github.com/matrix-org/synapse/issues/12020), ([\#12022](https://github.com/matrix-org/synapse/issues/12022)) - Support only the stable identifier for [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069)'s `is_guest` on `/_matrix/client/v3/account/whoami`. ([\#12021](https://github.com/matrix-org/synapse/issues/12021)) -- Advertise Matrix 1.2 support on `/_matrix/client/versions`. ([\#12022](https://github.com/matrix-org/synapse/issues/12022)) - Use room version 9 as the default room version (per [MSC3589](https://github.com/matrix-org/matrix-doc/pull/3589)). ([\#12058](https://github.com/matrix-org/synapse/issues/12058)) - Add module callbacks to react to user deactivation status changes (i.e. deactivations and reactivations) and profile updates. ([\#12062](https://github.com/matrix-org/synapse/issues/12062)) @@ -24,16 +22,17 @@ Features Bugfixes -------- -- Fix a bug introduced in Synapse v1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) -- Fix long standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) +- Fix a bug introduced in Synapse 1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) +- Fix long-standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) - Fix a 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) -- Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an "argument of type 'int' is not iterable" error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) -- Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) +- Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an `argument of type 'int' is not iterable` error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) +- Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens in version 1.38.0. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) - Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. ([\#12077](https://github.com/matrix-org/synapse/issues/12077)) -- Fix occasional 'Unhandled error in Deferred' error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089)) -- Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#12098](https://github.com/matrix-org/synapse/issues/12098)) +- Fix occasional `Unhandled error in Deferred` error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089)) +- Fix a bug introduced in Synapse 1.51.0 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#12098](https://github.com/matrix-org/synapse/issues/12098)) - Fix a long-standing bug which could cause push notifications to malfunction if `use_frozen_dicts` was set in the configuration. ([\#12100](https://github.com/matrix-org/synapse/issues/12100)) - Fix an extremely rare, long-standing bug in `ReadWriteLock` that would cause an error when a newly unblocked writer completes instantly. ([\#12105](https://github.com/matrix-org/synapse/issues/12105)) +- Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. This reduces server load and load on the receiving device. ([\#11835](https://github.com/matrix-org/synapse/issues/11835)) Updates to the Docker image -- cgit 1.4.1 From 6adb89ff007500ea9c41fb5bd1a9e644cc6397cd Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Mar 2022 06:56:16 -0500 Subject: Improve and refactor the tests for relations. (#12113) * Modernizes code (f-strings, etc.) * Fixes incorrect comments. * Splits the test case into two. * Factors out some duplicated code. --- changelog.d/12113.misc | 1 + tests/rest/client/test_relations.py | 386 +++++++++++++++++------------------- 2 files changed, 179 insertions(+), 208 deletions(-) create mode 100644 changelog.d/12113.misc diff --git a/changelog.d/12113.misc b/changelog.d/12113.misc new file mode 100644 index 0000000000..102e064053 --- /dev/null +++ b/changelog.d/12113.misc @@ -0,0 +1 @@ +Refactor the tests for event relations. diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index c8db45719e..a087cd7b21 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -34,7 +34,7 @@ from tests.test_utils import make_awaitable from tests.test_utils.event_injection import inject_event -class RelationsTestCase(unittest.HomeserverTestCase): +class BaseRelationsTestCase(unittest.HomeserverTestCase): servlets = [ relations.register_servlets, room.register_servlets, @@ -48,7 +48,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): def default_config(self) -> dict: # We need to enable msc1849 support for aggregations config = super().default_config() - config["experimental_msc1849_support_enabled"] = True # We enable frozen dicts as relations/edits change event contents, so we # want to test that we don't modify the events in the caches. @@ -67,10 +66,62 @@ class RelationsTestCase(unittest.HomeserverTestCase): res = self.helper.send(self.room, body="Hi!", tok=self.user_token) self.parent_id = res["event_id"] - def test_send_relation(self) -> None: - """Tests that sending a relation using the new /send_relation works - creates the right shape of event. + def _create_user(self, localpart: str) -> Tuple[str, str]: + user_id = self.register_user(localpart, "abc123") + access_token = self.login(localpart, "abc123") + + return user_id, access_token + + def _send_relation( + self, + relation_type: str, + event_type: str, + key: Optional[str] = None, + content: Optional[dict] = None, + access_token: Optional[str] = None, + parent_id: Optional[str] = None, + ) -> FakeChannel: + """Helper function to send a relation pointing at `self.parent_id` + + Args: + relation_type: One of `RelationTypes` + event_type: The type of the event to create + key: The aggregation key used for m.annotation relation type. + content: The content of the created event. Will be modified to configure + the m.relates_to key based on the other provided parameters. + access_token: The access token used to send the relation, defaults + to `self.user_token` + parent_id: The event_id this relation relates to. If None, then self.parent_id + + Returns: + FakeChannel """ + if not access_token: + access_token = self.user_token + + original_id = parent_id if parent_id else self.parent_id + + if content is None: + content = {} + content["m.relates_to"] = { + "event_id": original_id, + "rel_type": relation_type, + } + if key is not None: + content["m.relates_to"]["key"] = key + + channel = self.make_request( + "POST", + f"/_matrix/client/v3/rooms/{self.room}/send/{event_type}", + content, + access_token=access_token, + ) + return channel + + +class RelationsTestCase(BaseRelationsTestCase): + def test_send_relation(self) -> None: + """Tests that sending a relation works.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") self.assertEqual(200, channel.code, channel.json_body) @@ -79,7 +130,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, event_id), + f"/rooms/{self.room}/event/{event_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -317,9 +368,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): # Request /sync, limiting it such that only the latest event is returned # (and not the relation). - filter = urllib.parse.quote_plus( - '{"room": {"timeline": {"limit": 1}}}'.encode() - ) + filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 1}}}') channel = self.make_request( "GET", f"/sync?filter={filter}", access_token=self.user_token ) @@ -404,8 +453,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s?limit=1%s" - % (self.room, self.parent_id, from_token), + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1{from_token}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -544,8 +592,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s" - % (self.room, self.parent_id), + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -560,47 +607,13 @@ class RelationsTestCase(unittest.HomeserverTestCase): }, ) - def test_aggregation_redactions(self) -> None: - """Test that annotations get correctly aggregated after a redaction.""" - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) - to_redact_event_id = channel.json_body["event_id"] - - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Now lets redact one of the 'a' reactions - channel = self.make_request( - "POST", - "/_matrix/client/r0/rooms/%s/redact/%s" % (self.room, to_redact_event_id), - access_token=self.user_token, - content={}, - ) - self.assertEqual(200, channel.code, channel.json_body) - - channel = self.make_request( - "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s" - % (self.room, self.parent_id), - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual( - channel.json_body, - {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, - ) - def test_aggregation_must_be_annotation(self) -> None: """Test that aggregations must be annotations.""" channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s/%s?limit=1" - % (self.room, self.parent_id, RelationTypes.REPLACE), + f"/_matrix/client/unstable/rooms/{self.room}/aggregations" + f"/{self.parent_id}/{RelationTypes.REPLACE}?limit=1", access_token=self.user_token, ) self.assertEqual(400, channel.code, channel.json_body) @@ -986,9 +999,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): # Request sync, but limit the timeline so it becomes limited (and includes # bundled aggregations). - filter = urllib.parse.quote_plus( - '{"room": {"timeline": {"limit": 2}}}'.encode() - ) + filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 2}}}') channel = self.make_request( "GET", f"/sync?filter={filter}", access_token=self.user_token ) @@ -1053,7 +1064,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, self.parent_id), + f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1096,7 +1107,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, reply), + f"/rooms/{self.room}/event/{reply}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1198,7 +1209,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): # Request the original event. channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, self.parent_id), + f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1217,102 +1228,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) - def test_relations_redaction_redacts_edits(self) -> None: - """Test that edits of an event are redacted when the original event - is redacted. - """ - # Send a new event - res = self.helper.send(self.room, body="Heyo!", tok=self.user_token) - original_event_id = res["event_id"] - - # Add a relation - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - parent_id=original_event_id, - content={ - "msgtype": "m.text", - "body": "Wibble", - "m.new_content": {"msgtype": "m.text", "body": "First edit"}, - }, - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Check the relation is returned - channel = self.make_request( - "GET", - "/_matrix/client/unstable/rooms/%s/relations/%s/m.replace/m.room.message" - % (self.room, original_event_id), - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertIn("chunk", channel.json_body) - self.assertEqual(len(channel.json_body["chunk"]), 1) - - # Redact the original event - channel = self.make_request( - "PUT", - "/rooms/%s/redact/%s/%s" - % (self.room, original_event_id, "test_relations_redaction_redacts_edits"), - access_token=self.user_token, - content="{}", - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Try to check for remaining m.replace relations - channel = self.make_request( - "GET", - "/_matrix/client/unstable/rooms/%s/relations/%s/m.replace/m.room.message" - % (self.room, original_event_id), - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Check that no relations are returned - self.assertIn("chunk", channel.json_body) - self.assertEqual(channel.json_body["chunk"], []) - - def test_aggregations_redaction_prevents_access_to_aggregations(self) -> None: - """Test that annotations of an event are redacted when the original event - is redacted. - """ - # Send a new event - res = self.helper.send(self.room, body="Hello!", tok=self.user_token) - original_event_id = res["event_id"] - - # Add a relation - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", key="👍", parent_id=original_event_id - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Redact the original - channel = self.make_request( - "PUT", - "/rooms/%s/redact/%s/%s" - % ( - self.room, - original_event_id, - "test_aggregations_redaction_prevents_access_to_aggregations", - ), - access_token=self.user_token, - content="{}", - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Check that aggregations returns zero - channel = self.make_request( - "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s/m.annotation/m.reaction" - % (self.room, original_event_id), - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertIn("chunk", channel.json_body) - self.assertEqual(channel.json_body["chunk"], []) - def test_unknown_relations(self) -> None: """Unknown relations should be accepted.""" channel = self._send_relation("m.relation.test", "m.room.test") @@ -1321,8 +1236,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/relations/%s?limit=1" - % (self.room, self.parent_id), + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1343,7 +1257,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): # When bundling the unknown relation is not included. channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, self.parent_id), + f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1352,8 +1266,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): # But unknown relations can be directly queried. channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s?limit=1" - % (self.room, self.parent_id), + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1369,58 +1282,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): raise AssertionError(f"Event {self.parent_id} not found in chunk") - def _send_relation( - self, - relation_type: str, - event_type: str, - key: Optional[str] = None, - content: Optional[dict] = None, - access_token: Optional[str] = None, - parent_id: Optional[str] = None, - ) -> FakeChannel: - """Helper function to send a relation pointing at `self.parent_id` - - Args: - relation_type: One of `RelationTypes` - event_type: The type of the event to create - key: The aggregation key used for m.annotation relation type. - content: The content of the created event. Will be modified to configure - the m.relates_to key based on the other provided parameters. - access_token: The access token used to send the relation, defaults - to `self.user_token` - parent_id: The event_id this relation relates to. If None, then self.parent_id - - Returns: - FakeChannel - """ - if not access_token: - access_token = self.user_token - - original_id = parent_id if parent_id else self.parent_id - - if content is None: - content = {} - content["m.relates_to"] = { - "event_id": original_id, - "rel_type": relation_type, - } - if key is not None: - content["m.relates_to"]["key"] = key - - channel = self.make_request( - "POST", - f"/_matrix/client/v3/rooms/{self.room}/send/{event_type}", - content, - access_token=access_token, - ) - return channel - - def _create_user(self, localpart: str) -> Tuple[str, str]: - user_id = self.register_user(localpart, "abc123") - access_token = self.login(localpart, "abc123") - - return user_id, access_token - def test_background_update(self) -> None: """Test the event_arbitrary_relations background update.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") @@ -1482,3 +1343,112 @@ class RelationsTestCase(unittest.HomeserverTestCase): [ev["event_id"] for ev in channel.json_body["chunk"]], [annotation_event_id_good, thread_event_id], ) + + +class RelationRedactionTestCase(BaseRelationsTestCase): + """Test the behaviour of relations when the parent or child event is redacted.""" + + def _redact(self, event_id: str) -> None: + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{self.room}/redact/{event_id}", + access_token=self.user_token, + content={}, + ) + self.assertEqual(200, channel.code, channel.json_body) + + def test_redact_relation_annotation(self) -> None: + """Test that annotations of an event are properly handled after the + annotation is redacted. + """ + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEqual(200, channel.code, channel.json_body) + to_redact_event_id = channel.json_body["event_id"] + + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) + self.assertEqual(200, channel.code, channel.json_body) + + # Redact one of the reactions. + self._redact(to_redact_event_id) + + # Ensure that the aggregations are correct. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + self.assertEqual( + channel.json_body, + {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, + ) + + def test_redact_relation_edit(self) -> None: + """Test that edits of an event are redacted when the original event + is redacted. + """ + # Add a relation + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + parent_id=self.parent_id, + content={ + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": {"msgtype": "m.text", "body": "First edit"}, + }, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # Check the relation is returned + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations" + f"/{self.parent_id}/m.replace/m.room.message", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + self.assertIn("chunk", channel.json_body) + self.assertEqual(len(channel.json_body["chunk"]), 1) + + # Redact the original event + self._redact(self.parent_id) + + # Try to check for remaining m.replace relations + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations" + f"/{self.parent_id}/m.replace/m.room.message", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # Check that no relations are returned + self.assertIn("chunk", channel.json_body) + self.assertEqual(channel.json_body["chunk"], []) + + def test_redact_parent(self) -> None: + """Test that annotations of an event are redacted when the original event + is redacted. + """ + # Add a relation + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") + self.assertEqual(200, channel.code, channel.json_body) + + # Redact the original event. + self._redact(self.parent_id) + + # Check that aggregations returns zero + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}/m.annotation/m.reaction", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + self.assertIn("chunk", channel.json_body) + self.assertEqual(channel.json_body["chunk"], []) -- cgit 1.4.1 From 7317b0be82e31d7b41be64e2fea92aad428283d8 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 11:59:53 +0000 Subject: Tweak changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index a81d0a4b14..542390592f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -23,7 +23,7 @@ Bugfixes -------- - Fix a bug introduced in Synapse 1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) -- Fix long-standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) +- Fix long-standing bug where the `get_rooms_for_user` cache was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) - Fix a 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) - Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an `argument of type 'int' is not iterable` error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) - Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens in version 1.38.0. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) -- cgit 1.4.1 From 3b9142f7f462f23eeb754eca6003f127bcc62271 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 12:09:48 +0000 Subject: Reword changelog line about URL previews --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 542390592f..0a87f5cd42 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,7 @@ Features -------- - Add support for [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) -- Improve the preview that is produced when generating URL previews for some web pages. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) +- Improve the generated URL previews for some web pages. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) - Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000)) - Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). ([\#12001](https://github.com/matrix-org/synapse/issues/12001), [\#12067](https://github.com/matrix-org/synapse/issues/12067)) - Enable modules to set a custom display name when registering a user. ([\#12009](https://github.com/matrix-org/synapse/issues/12009)) -- cgit 1.4.1 From f3f0ab10fe766c766dedf9d80e4ef198e3e45c09 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Mar 2022 13:00:16 +0000 Subject: Move scripts directory inside synapse, exposing as setuptools entry_points (#12118) * Two scripts are basically entry_points already * Move and rename scripts/* to synapse/_scripts/*.py * Delete sync_room_to_group.pl * Expose entry points in setup.py * Update linter script and config * Fixup scripts & docs mentioning scripts that moved Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- .ci/scripts/test_export_data_command.sh | 4 +- .ci/scripts/test_synapse_port_db.sh | 12 +- .dockerignore | 1 - MANIFEST.in | 1 - changelog.d/12118.misc | 1 + docker/Dockerfile | 1 - docs/development/database_schema.md | 6 +- docs/usage/administration/admin_api/README.md | 2 +- mypy.ini | 4 + scripts-dev/generate_sample_config | 10 +- scripts-dev/lint.sh | 7 - scripts-dev/make_full_schema.sh | 6 +- scripts/export_signing_key | 100 -- scripts/generate_config | 78 -- scripts/generate_log_config | 44 - scripts/generate_signing_key.py | 36 - scripts/hash_password | 79 -- scripts/move_remote_media_to_new_store.py | 118 -- scripts/register_new_matrix_user | 19 - scripts/synapse_port_db | 1253 ------------------- scripts/synapse_review_recent_signups | 19 - scripts/sync_room_to_group.pl | 45 - scripts/update_synapse_database | 117 -- setup.py | 14 +- snap/snapcraft.yaml | 2 +- synapse/_scripts/export_signing_key.py | 103 ++ synapse/_scripts/generate_config.py | 83 ++ synapse/_scripts/generate_log_config.py | 49 + synapse/_scripts/generate_signing_key.py | 41 + synapse/_scripts/hash_password.py | 83 ++ synapse/_scripts/move_remote_media_to_new_store.py | 118 ++ synapse/_scripts/synapse_port_db.py | 1257 ++++++++++++++++++++ synapse/_scripts/update_synapse_database.py | 117 ++ synapse/config/_base.py | 2 +- tox.ini | 8 - 35 files changed, 1891 insertions(+), 1949 deletions(-) create mode 100644 changelog.d/12118.misc delete mode 100755 scripts/export_signing_key delete mode 100755 scripts/generate_config delete mode 100755 scripts/generate_log_config delete mode 100755 scripts/generate_signing_key.py delete mode 100755 scripts/hash_password delete mode 100755 scripts/move_remote_media_to_new_store.py delete mode 100755 scripts/register_new_matrix_user delete mode 100755 scripts/synapse_port_db delete mode 100755 scripts/synapse_review_recent_signups delete mode 100755 scripts/sync_room_to_group.pl delete mode 100755 scripts/update_synapse_database create mode 100755 synapse/_scripts/export_signing_key.py create mode 100755 synapse/_scripts/generate_config.py create mode 100755 synapse/_scripts/generate_log_config.py create mode 100755 synapse/_scripts/generate_signing_key.py create mode 100755 synapse/_scripts/hash_password.py create mode 100755 synapse/_scripts/move_remote_media_to_new_store.py create mode 100755 synapse/_scripts/synapse_port_db.py create mode 100755 synapse/_scripts/update_synapse_database.py diff --git a/.ci/scripts/test_export_data_command.sh b/.ci/scripts/test_export_data_command.sh index ab96387a0a..224cae9216 100755 --- a/.ci/scripts/test_export_data_command.sh +++ b/.ci/scripts/test_export_data_command.sh @@ -21,7 +21,7 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # Run the export-data command on the sqlite test database python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \ @@ -41,7 +41,7 @@ fi # Port the SQLite databse to postgres so we can check command works against postgres echo "+++ Port SQLite3 databse to postgres" -scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml # Run the export-data command on postgres database python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \ diff --git a/.ci/scripts/test_synapse_port_db.sh b/.ci/scripts/test_synapse_port_db.sh index 797904e64c..91bd966f32 100755 --- a/.ci/scripts/test_synapse_port_db.sh +++ b/.ci/scripts/test_synapse_port_db.sh @@ -25,17 +25,19 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # Create the PostgreSQL database. .ci/scripts/postgres_exec.py "CREATE DATABASE synapse" echo "+++ Run synapse_port_db against test database" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`, +# but coverage seems unable to find the entrypoints installed by `pip install -e .`. +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml # We should be able to run twice against the same database. echo "+++ Run synapse_port_db a second time" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml ##### @@ -46,7 +48,7 @@ echo "--- Prepare empty SQLite database" # we do this by deleting the sqlite db, and then doing the same again. rm .ci/test_db.db -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # re-create the PostgreSQL database. .ci/scripts/postgres_exec.py \ @@ -54,4 +56,4 @@ scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-b "CREATE DATABASE synapse" echo "+++ Run synapse_port_db against empty database" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml diff --git a/.dockerignore b/.dockerignore index f6c638b0a2..617f701597 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,7 +3,6 @@ # things to include !docker -!scripts !synapse !MANIFEST.in !README.rst diff --git a/MANIFEST.in b/MANIFEST.in index 76d14eb642..7e903518e1 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -17,7 +17,6 @@ recursive-include synapse/storage *.txt recursive-include synapse/storage *.md recursive-include docs * -recursive-include scripts * recursive-include scripts-dev * recursive-include synapse *.pyi recursive-include tests *.py diff --git a/changelog.d/12118.misc b/changelog.d/12118.misc new file mode 100644 index 0000000000..a2c397d907 --- /dev/null +++ b/changelog.d/12118.misc @@ -0,0 +1 @@ +Move scripts to Synapse package and expose as setuptools entry points. diff --git a/docker/Dockerfile b/docker/Dockerfile index a8bb9b0e7f..327275a9ca 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -46,7 +46,6 @@ RUN \ && rm -rf /var/lib/apt/lists/* # Copy just what we need to pip install -COPY scripts /synapse/scripts/ COPY MANIFEST.in README.rst setup.py synctl /synapse/ COPY synapse/__init__.py /synapse/synapse/__init__.py COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md index a767d3af9f..d996a7caa2 100644 --- a/docs/development/database_schema.md +++ b/docs/development/database_schema.md @@ -158,9 +158,9 @@ same as integers. There are three separate aspects to this: * Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in - `scripts/synapse_port_db`. This tells the port script to cast the integer - value from SQLite to a boolean before writing the value to the postgres - database. + `synapse/_scripts/synapse_port_db.py`. This tells the port script to cast + the integer value from SQLite to a boolean before writing the value to the + postgres database. * Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md index 2fca96f8be..3cbedc5dfa 100644 --- a/docs/usage/administration/admin_api/README.md +++ b/docs/usage/administration/admin_api/README.md @@ -12,7 +12,7 @@ UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'; ``` A new server admin user can also be created using the `register_new_matrix_user` -command. This is a script that is located in the `scripts/` directory, or possibly +command. This is a script that is distributed as part of synapse. It is possibly already on your `$PATH` depending on how Synapse was installed. Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. diff --git a/mypy.ini b/mypy.ini index 38ff787609..6b1e995e64 100644 --- a/mypy.ini +++ b/mypy.ini @@ -23,6 +23,10 @@ files = # https://docs.python.org/3/library/re.html#re.X exclude = (?x) ^( + |synapse/_scripts/export_signing_key.py + |synapse/_scripts/move_remote_media_to_new_store.py + |synapse/_scripts/synapse_port_db.py + |synapse/_scripts/update_synapse_database.py |synapse/storage/databases/__init__.py |synapse/storage/databases/main/__init__.py |synapse/storage/databases/main/cache.py diff --git a/scripts-dev/generate_sample_config b/scripts-dev/generate_sample_config index 4cd1d1d5b8..185e277933 100755 --- a/scripts-dev/generate_sample_config +++ b/scripts-dev/generate_sample_config @@ -10,19 +10,19 @@ SAMPLE_CONFIG="docs/sample_config.yaml" SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml" check() { - diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || return 1 + diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || return 1 } if [ "$1" == "--check" ]; then - diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || { + diff -u "$SAMPLE_CONFIG" <(synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml) >/dev/null || { echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 exit 1 } - diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || { + diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || { echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 exit 1 } else - ./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG" - ./scripts/generate_log_config -o "$SAMPLE_LOG_CONFIG" + synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG" + synapse/_scripts/generate_log_config.py -o "$SAMPLE_LOG_CONFIG" fi diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index b6554a73c1..df4d4934d0 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -84,13 +84,6 @@ else files=( "synapse" "docker" "tests" # annoyingly, black doesn't find these so we have to list them - "scripts/export_signing_key" - "scripts/generate_config" - "scripts/generate_log_config" - "scripts/hash_password" - "scripts/register_new_matrix_user" - "scripts/synapse_port_db" - "scripts/update_synapse_database" "scripts-dev" "scripts-dev/build_debian_packages" "scripts-dev/sign_json" diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index c3c90f4ec6..f0e22d4ca2 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -147,7 +147,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG" # Make sure the SQLite3 database is using the latest schema and has no pending background update. echo "Running db background jobs..." -scripts/update_synapse_database --database-config --run-background-updates "$SQLITE_CONFIG" +synapse/_scripts/update_synapse_database.py --database-config --run-background-updates "$SQLITE_CONFIG" # Create the PostgreSQL database. echo "Creating postgres database..." @@ -156,10 +156,10 @@ createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME" echo "Copying data from SQLite3 to Postgres with synapse_port_db..." if [ -z "$COVERAGE" ]; then # No coverage needed - scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" + synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" else # Coverage desired - coverage run scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" + coverage run synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" fi # Delete schema_version, applied_schema_deltas and applied_module_schemas tables diff --git a/scripts/export_signing_key b/scripts/export_signing_key deleted file mode 100755 index bf0139bd64..0000000000 --- a/scripts/export_signing_key +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import argparse -import sys -import time -from typing import Optional - -import nacl.signing -from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys - - -def exit(status: int = 0, message: Optional[str] = None): - if message: - print(message, file=sys.stderr) - sys.exit(status) - - -def format_plain(public_key: nacl.signing.VerifyKey): - print( - "%s:%s %s" - % ( - public_key.alg, - public_key.version, - encode_verify_key_base64(public_key), - ) - ) - - -def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int): - print( - ' "%s:%s": { key: "%s", expired_ts: %i }' - % ( - public_key.alg, - public_key.version, - encode_verify_key_base64(public_key), - expiry_ts, - ) - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "key_file", - nargs="+", - type=argparse.FileType("r"), - help="The key file to read", - ) - - parser.add_argument( - "-x", - action="store_true", - dest="for_config", - help="format the output for inclusion in the old_signing_keys config setting", - ) - - parser.add_argument( - "--expiry-ts", - type=int, - default=int(time.time() * 1000) + 6 * 3600000, - help=( - "The expiry time to use for -x, in milliseconds since 1970. The default " - "is (now+6h)." - ), - ) - - args = parser.parse_args() - - formatter = ( - (lambda k: format_for_config(k, args.expiry_ts)) - if args.for_config - else format_plain - ) - - keys = [] - for file in args.key_file: - try: - res = read_signing_keys(file) - except Exception as e: - exit( - status=1, - message="Error reading key from file %s: %s %s" - % (file.name, type(e), e), - ) - res = [] - for key in res: - formatter(get_verify_key(key)) diff --git a/scripts/generate_config b/scripts/generate_config deleted file mode 100755 index 931b40c045..0000000000 --- a/scripts/generate_config +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import shutil -import sys - -from synapse.config.homeserver import HomeServerConfig - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--config-dir", - default="CONFDIR", - help="The path where the config files are kept. Used to create filenames for " - "things like the log config and the signing key. Default: %(default)s", - ) - - parser.add_argument( - "--data-dir", - default="DATADIR", - help="The path where the data files are kept. Used to create filenames for " - "things like the database and media store. Default: %(default)s", - ) - - parser.add_argument( - "--server-name", - default="SERVERNAME", - help="The server name. Used to initialise the server_name config param, but also " - "used in the names of some of the config files. Default: %(default)s", - ) - - parser.add_argument( - "--report-stats", - action="store", - help="Whether the generated config reports anonymized usage statistics", - choices=["yes", "no"], - ) - - parser.add_argument( - "--generate-secrets", - action="store_true", - help="Enable generation of new secrets for things like the macaroon_secret_key." - "By default, these parameters will be left unset.", - ) - - parser.add_argument( - "-o", - "--output-file", - type=argparse.FileType("w"), - default=sys.stdout, - help="File to write the configuration to. Default: stdout", - ) - - parser.add_argument( - "--header-file", - type=argparse.FileType("r"), - help="File from which to read a header, which will be printed before the " - "generated config.", - ) - - args = parser.parse_args() - - report_stats = args.report_stats - if report_stats is not None: - report_stats = report_stats == "yes" - - conf = HomeServerConfig().generate_config( - config_dir_path=args.config_dir, - data_dir_path=args.data_dir, - server_name=args.server_name, - generate_secrets=args.generate_secrets, - report_stats=report_stats, - ) - - if args.header_file: - shutil.copyfileobj(args.header_file, args.output_file) - - args.output_file.write(conf) diff --git a/scripts/generate_log_config b/scripts/generate_log_config deleted file mode 100755 index e72a0dafb7..0000000000 --- a/scripts/generate_log_config +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2020 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import sys - -from synapse.config.logger import DEFAULT_LOG_CONFIG - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "-o", - "--output-file", - type=argparse.FileType("w"), - default=sys.stdout, - help="File to write the configuration to. Default: stdout", - ) - - parser.add_argument( - "-f", - "--log-file", - type=str, - default="/var/log/matrix-synapse/homeserver.log", - help="name of the log file", - ) - - args = parser.parse_args() - out = args.output_file - out.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file)) - out.flush() diff --git a/scripts/generate_signing_key.py b/scripts/generate_signing_key.py deleted file mode 100755 index 07df25a809..0000000000 --- a/scripts/generate_signing_key.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import argparse -import sys - -from signedjson.key import generate_signing_key, write_signing_keys - -from synapse.util.stringutils import random_string - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "-o", - "--output_file", - type=argparse.FileType("w"), - default=sys.stdout, - help="Where to write the output to", - ) - args = parser.parse_args() - - key_id = "a_" + random_string(4) - key = (generate_signing_key(key_id),) - write_signing_keys(args.output_file, key) diff --git a/scripts/hash_password b/scripts/hash_password deleted file mode 100755 index 1d6fb0d700..0000000000 --- a/scripts/hash_password +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python - -import argparse -import getpass -import sys -import unicodedata - -import bcrypt -import yaml - -bcrypt_rounds = 12 -password_pepper = "" - - -def prompt_for_pass(): - password = getpass.getpass("Password: ") - - if not password: - raise Exception("Password cannot be blank.") - - confirm_password = getpass.getpass("Confirm password: ") - - if password != confirm_password: - raise Exception("Passwords do not match.") - - return password - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=( - "Calculate the hash of a new password, so that passwords can be reset" - ) - ) - parser.add_argument( - "-p", - "--password", - default=None, - help="New password for user. Will prompt if omitted.", - ) - parser.add_argument( - "-c", - "--config", - type=argparse.FileType("r"), - help=( - "Path to server config file. " - "Used to read in bcrypt_rounds and password_pepper." - ), - ) - - args = parser.parse_args() - if "config" in args and args.config: - config = yaml.safe_load(args.config) - bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds) - password_config = config.get("password_config", None) or {} - password_pepper = password_config.get("pepper", password_pepper) - password = args.password - - if not password: - password = prompt_for_pass() - - # On Python 2, make sure we decode it to Unicode before we normalise it - if isinstance(password, bytes): - try: - password = password.decode(sys.stdin.encoding) - except UnicodeDecodeError: - print( - "ERROR! Your password is not decodable using your terminal encoding (%s)." - % (sys.stdin.encoding,) - ) - - pw = unicodedata.normalize("NFKC", password) - - hashed = bcrypt.hashpw( - pw.encode("utf8") + password_pepper.encode("utf8"), - bcrypt.gensalt(bcrypt_rounds), - ).decode("ascii") - - print(hashed) diff --git a/scripts/move_remote_media_to_new_store.py b/scripts/move_remote_media_to_new_store.py deleted file mode 100755 index 875aa4781f..0000000000 --- a/scripts/move_remote_media_to_new_store.py +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env python -# Copyright 2017 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Moves a list of remote media from one media store to another. - -The input should be a list of media files to be moved, one per line. Each line -should be formatted:: - - | - -This can be extracted from postgres with:: - - psql --tuples-only -A -c "select media_origin, filesystem_id from - matrix.remote_media_cache where ..." - -To use, pipe the above into:: - - PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py -""" - -import argparse -import logging -import os -import shutil -import sys - -from synapse.rest.media.v1.filepath import MediaFilePaths - -logger = logging.getLogger() - - -def main(src_repo, dest_repo): - src_paths = MediaFilePaths(src_repo) - dest_paths = MediaFilePaths(dest_repo) - for line in sys.stdin: - line = line.strip() - parts = line.split("|") - if len(parts) != 2: - print("Unable to parse input line %s" % line, file=sys.stderr) - sys.exit(1) - - move_media(parts[0], parts[1], src_paths, dest_paths) - - -def move_media(origin_server, file_id, src_paths, dest_paths): - """Move the given file, and any thumbnails, to the dest repo - - Args: - origin_server (str): - file_id (str): - src_paths (MediaFilePaths): - dest_paths (MediaFilePaths): - """ - logger.info("%s/%s", origin_server, file_id) - - # check that the original exists - original_file = src_paths.remote_media_filepath(origin_server, file_id) - if not os.path.exists(original_file): - logger.warning( - "Original for %s/%s (%s) does not exist", - origin_server, - file_id, - original_file, - ) - else: - mkdir_and_move( - original_file, dest_paths.remote_media_filepath(origin_server, file_id) - ) - - # now look for thumbnails - original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id) - if not os.path.exists(original_thumb_dir): - return - - mkdir_and_move( - original_thumb_dir, - dest_paths.remote_media_thumbnail_dir(origin_server, file_id), - ) - - -def mkdir_and_move(original_file, dest_file): - dirname = os.path.dirname(dest_file) - if not os.path.exists(dirname): - logger.debug("mkdir %s", dirname) - os.makedirs(dirname) - logger.debug("mv %s %s", original_file, dest_file) - shutil.move(original_file, dest_file) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter - ) - parser.add_argument("-v", action="store_true", help="enable debug logging") - parser.add_argument("src_repo", help="Path to source content repo") - parser.add_argument("dest_repo", help="Path to source content repo") - args = parser.parse_args() - - logging_config = { - "level": logging.DEBUG if args.v else logging.INFO, - "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", - } - logging.basicConfig(**logging_config) - - main(args.src_repo, args.dest_repo) diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user deleted file mode 100755 index 00104b9d62..0000000000 --- a/scripts/register_new_matrix_user +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse._scripts.register_new_matrix_user import main - -if __name__ == "__main__": - main() diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db deleted file mode 100755 index db354b3c8c..0000000000 --- a/scripts/synapse_port_db +++ /dev/null @@ -1,1253 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015, 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import curses -import logging -import sys -import time -import traceback -from typing import Dict, Iterable, Optional, Set - -import yaml -from matrix_common.versionstring import get_distribution_version_string - -from twisted.internet import defer, reactor - -from synapse.config.database import DatabaseConnectionConfig -from synapse.config.homeserver import HomeServerConfig -from synapse.logging.context import ( - LoggingContext, - make_deferred_yieldable, - run_in_background, -) -from synapse.storage.database import DatabasePool, make_conn -from synapse.storage.databases.main import PushRuleStore -from synapse.storage.databases.main.account_data import AccountDataWorkerStore -from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore -from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore -from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore -from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore -from synapse.storage.databases.main.events_bg_updates import ( - EventsBackgroundUpdatesStore, -) -from synapse.storage.databases.main.group_server import GroupServerWorkerStore -from synapse.storage.databases.main.media_repository import ( - MediaRepositoryBackgroundUpdateStore, -) -from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore -from synapse.storage.databases.main.pusher import PusherWorkerStore -from synapse.storage.databases.main.registration import ( - RegistrationBackgroundUpdateStore, - find_max_generated_user_id_localpart, -) -from synapse.storage.databases.main.room import RoomBackgroundUpdateStore -from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore -from synapse.storage.databases.main.search import SearchBackgroundUpdateStore -from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore -from synapse.storage.databases.main.stats import StatsStore -from synapse.storage.databases.main.user_directory import ( - UserDirectoryBackgroundUpdateStore, -) -from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore -from synapse.storage.engines import create_engine -from synapse.storage.prepare_database import prepare_database -from synapse.util import Clock - -logger = logging.getLogger("synapse_port_db") - - -BOOLEAN_COLUMNS = { - "events": ["processed", "outlier", "contains_url"], - "rooms": ["is_public", "has_auth_chain_index"], - "event_edges": ["is_state"], - "presence_list": ["accepted"], - "presence_stream": ["currently_active"], - "public_room_list_stream": ["visibility"], - "devices": ["hidden"], - "device_lists_outbound_pokes": ["sent"], - "users_who_share_rooms": ["share_private"], - "groups": ["is_public"], - "group_rooms": ["is_public"], - "group_users": ["is_public", "is_admin"], - "group_summary_rooms": ["is_public"], - "group_room_categories": ["is_public"], - "group_summary_users": ["is_public"], - "group_roles": ["is_public"], - "local_group_membership": ["is_publicised", "is_admin"], - "e2e_room_keys": ["is_verified"], - "account_validity": ["email_sent"], - "redactions": ["have_censored"], - "room_stats_state": ["is_federatable"], - "local_media_repository": ["safe_from_quarantine"], - "users": ["shadow_banned"], - "e2e_fallback_keys_json": ["used"], - "access_tokens": ["used"], -} - - -APPEND_ONLY_TABLES = [ - "event_reference_hashes", - "events", - "event_json", - "state_events", - "room_memberships", - "topics", - "room_names", - "rooms", - "local_media_repository", - "local_media_repository_thumbnails", - "remote_media_cache", - "remote_media_cache_thumbnails", - "redactions", - "event_edges", - "event_auth", - "received_transactions", - "sent_transactions", - "transaction_id_to_pdu", - "users", - "state_groups", - "state_groups_state", - "event_to_state_groups", - "rejections", - "event_search", - "presence_stream", - "push_rules_stream", - "ex_outlier_stream", - "cache_invalidation_stream_by_instance", - "public_room_list_stream", - "state_group_edges", - "stream_ordering_to_exterm", -] - - -IGNORED_TABLES = { - # We don't port these tables, as they're a faff and we can regenerate - # them anyway. - "user_directory", - "user_directory_search", - "user_directory_search_content", - "user_directory_search_docsize", - "user_directory_search_segdir", - "user_directory_search_segments", - "user_directory_search_stat", - "user_directory_search_pos", - "users_who_share_private_rooms", - "users_in_public_room", - # UI auth sessions have foreign keys so additional care needs to be taken, - # the sessions are transient anyway, so ignore them. - "ui_auth_sessions", - "ui_auth_sessions_credentials", - "ui_auth_sessions_ips", -} - - -# Error returned by the run function. Used at the top-level part of the script to -# handle errors and return codes. -end_error = None # type: Optional[str] -# The exec_info for the error, if any. If error is defined but not exec_info the script -# will show only the error message without the stacktrace, if exec_info is defined but -# not the error then the script will show nothing outside of what's printed in the run -# function. If both are defined, the script will print both the error and the stacktrace. -end_error_exec_info = None - - -class Store( - ClientIpBackgroundUpdateStore, - DeviceInboxBackgroundUpdateStore, - DeviceBackgroundUpdateStore, - EventsBackgroundUpdatesStore, - MediaRepositoryBackgroundUpdateStore, - RegistrationBackgroundUpdateStore, - RoomBackgroundUpdateStore, - RoomMemberBackgroundUpdateStore, - SearchBackgroundUpdateStore, - StateBackgroundUpdateStore, - MainStateBackgroundUpdateStore, - UserDirectoryBackgroundUpdateStore, - EndToEndKeyBackgroundStore, - StatsStore, - AccountDataWorkerStore, - PushRuleStore, - PusherWorkerStore, - PresenceBackgroundUpdateStore, - GroupServerWorkerStore, -): - def execute(self, f, *args, **kwargs): - return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) - - def execute_sql(self, sql, *args): - def r(txn): - txn.execute(sql, args) - return txn.fetchall() - - return self.db_pool.runInteraction("execute_sql", r) - - def insert_many_txn(self, txn, table, headers, rows): - sql = "INSERT INTO %s (%s) VALUES (%s)" % ( - table, - ", ".join(k for k in headers), - ", ".join("%s" for _ in headers), - ) - - try: - txn.executemany(sql, rows) - except Exception: - logger.exception("Failed to insert: %s", table) - raise - - def set_room_is_public(self, room_id, is_public): - raise Exception( - "Attempt to set room_is_public during port_db: database not empty?" - ) - - -class MockHomeserver: - def __init__(self, config): - self.clock = Clock(reactor) - self.config = config - self.hostname = config.server.server_name - self.version_string = "Synapse/" + get_distribution_version_string( - "matrix-synapse" - ) - - def get_clock(self): - return self.clock - - def get_reactor(self): - return reactor - - def get_instance_name(self): - return "master" - - -class Porter(object): - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - - async def setup_table(self, table): - if table in APPEND_ONLY_TABLES: - # It's safe to just carry on inserting. - row = await self.postgres_store.db_pool.simple_select_one( - table="port_from_sqlite3", - keyvalues={"table_name": table}, - retcols=("forward_rowid", "backward_rowid"), - allow_none=True, - ) - - total_to_port = None - if row is None: - if table == "sent_transactions": - ( - forward_chunk, - already_ported, - total_to_port, - ) = await self._setup_sent_transactions() - backward_chunk = 0 - else: - await self.postgres_store.db_pool.simple_insert( - table="port_from_sqlite3", - values={ - "table_name": table, - "forward_rowid": 1, - "backward_rowid": 0, - }, - ) - - forward_chunk = 1 - backward_chunk = 0 - already_ported = 0 - else: - forward_chunk = row["forward_rowid"] - backward_chunk = row["backward_rowid"] - - if total_to_port is None: - already_ported, total_to_port = await self._get_total_count_to_port( - table, forward_chunk, backward_chunk - ) - else: - - def delete_all(txn): - txn.execute( - "DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,) - ) - txn.execute("TRUNCATE %s CASCADE" % (table,)) - - await self.postgres_store.execute(delete_all) - - await self.postgres_store.db_pool.simple_insert( - table="port_from_sqlite3", - values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0}, - ) - - forward_chunk = 1 - backward_chunk = 0 - - already_ported, total_to_port = await self._get_total_count_to_port( - table, forward_chunk, backward_chunk - ) - - return table, already_ported, total_to_port, forward_chunk, backward_chunk - - async def get_table_constraints(self) -> Dict[str, Set[str]]: - """Returns a map of tables that have foreign key constraints to tables they depend on.""" - - def _get_constraints(txn): - # We can pull the information about foreign key constraints out from - # the postgres schema tables. - sql = """ - SELECT DISTINCT - tc.table_name, - ccu.table_name AS foreign_table_name - FROM - information_schema.table_constraints AS tc - INNER JOIN information_schema.constraint_column_usage AS ccu - USING (table_schema, constraint_name) - WHERE tc.constraint_type = 'FOREIGN KEY' - AND tc.table_name != ccu.table_name; - """ - txn.execute(sql) - - results = {} - for table, foreign_table in txn: - results.setdefault(table, set()).add(foreign_table) - return results - - return await self.postgres_store.db_pool.runInteraction( - "get_table_constraints", _get_constraints - ) - - async def handle_table( - self, table, postgres_size, table_size, forward_chunk, backward_chunk - ): - logger.info( - "Table %s: %i/%i (rows %i-%i) already ported", - table, - postgres_size, - table_size, - backward_chunk + 1, - forward_chunk - 1, - ) - - if not table_size: - return - - self.progress.add_table(table, postgres_size, table_size) - - if table == "event_search": - await self.handle_search_table( - postgres_size, table_size, forward_chunk, backward_chunk - ) - return - - if table in IGNORED_TABLES: - self.progress.update(table, table_size) # Mark table as done - return - - if table == "user_directory_stream_pos": - # We need to make sure there is a single row, `(X, null), as that is - # what synapse expects to be there. - await self.postgres_store.db_pool.simple_insert( - table=table, values={"stream_id": None} - ) - self.progress.update(table, table_size) # Mark table as done - return - - forward_select = ( - "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,) - ) - - backward_select = ( - "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" % (table,) - ) - - do_forward = [True] - do_backward = [True] - - while True: - - def r(txn): - forward_rows = [] - backward_rows = [] - if do_forward[0]: - txn.execute(forward_select, (forward_chunk, self.batch_size)) - forward_rows = txn.fetchall() - if not forward_rows: - do_forward[0] = False - - if do_backward[0]: - txn.execute(backward_select, (backward_chunk, self.batch_size)) - backward_rows = txn.fetchall() - if not backward_rows: - do_backward[0] = False - - if forward_rows or backward_rows: - headers = [column[0] for column in txn.description] - else: - headers = None - - return headers, forward_rows, backward_rows - - headers, frows, brows = await self.sqlite_store.db_pool.runInteraction( - "select", r - ) - - if frows or brows: - if frows: - forward_chunk = max(row[0] for row in frows) + 1 - if brows: - backward_chunk = min(row[0] for row in brows) - 1 - - rows = frows + brows - rows = self._convert_rows(table, headers, rows) - - def insert(txn): - self.postgres_store.insert_many_txn(txn, table, headers[1:], rows) - - self.postgres_store.db_pool.simple_update_one_txn( - txn, - table="port_from_sqlite3", - keyvalues={"table_name": table}, - updatevalues={ - "forward_rowid": forward_chunk, - "backward_rowid": backward_chunk, - }, - ) - - await self.postgres_store.execute(insert) - - postgres_size += len(rows) - - self.progress.update(table, postgres_size) - else: - return - - async def handle_search_table( - self, postgres_size, table_size, forward_chunk, backward_chunk - ): - select = ( - "SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering" - " FROM event_search as es" - " INNER JOIN events AS e USING (event_id, room_id)" - " WHERE es.rowid >= ?" - " ORDER BY es.rowid LIMIT ?" - ) - - while True: - - def r(txn): - txn.execute(select, (forward_chunk, self.batch_size)) - rows = txn.fetchall() - headers = [column[0] for column in txn.description] - - return headers, rows - - headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r) - - if rows: - forward_chunk = rows[-1][0] + 1 - - # We have to treat event_search differently since it has a - # different structure in the two different databases. - def insert(txn): - sql = ( - "INSERT INTO event_search (event_id, room_id, key," - " sender, vector, origin_server_ts, stream_ordering)" - " VALUES (?,?,?,?,to_tsvector('english', ?),?,?)" - ) - - rows_dict = [] - for row in rows: - d = dict(zip(headers, row)) - if "\0" in d["value"]: - logger.warning("dropping search row %s", d) - else: - rows_dict.append(d) - - txn.executemany( - sql, - [ - ( - row["event_id"], - row["room_id"], - row["key"], - row["sender"], - row["value"], - row["origin_server_ts"], - row["stream_ordering"], - ) - for row in rows_dict - ], - ) - - self.postgres_store.db_pool.simple_update_one_txn( - txn, - table="port_from_sqlite3", - keyvalues={"table_name": "event_search"}, - updatevalues={ - "forward_rowid": forward_chunk, - "backward_rowid": backward_chunk, - }, - ) - - await self.postgres_store.execute(insert) - - postgres_size += len(rows) - - self.progress.update("event_search", postgres_size) - - else: - return - - def build_db_store( - self, - db_config: DatabaseConnectionConfig, - allow_outdated_version: bool = False, - ): - """Builds and returns a database store using the provided configuration. - - Args: - db_config: The database configuration - allow_outdated_version: True to suppress errors about the database server - version being too old to run a complete synapse - - Returns: - The built Store object. - """ - self.progress.set_state("Preparing %s" % db_config.config["name"]) - - engine = create_engine(db_config.config) - - hs = MockHomeserver(self.hs_config) - - with make_conn(db_config, engine, "portdb") as db_conn: - engine.check_database( - db_conn, allow_outdated_version=allow_outdated_version - ) - prepare_database(db_conn, engine, config=self.hs_config) - store = Store(DatabasePool(hs, db_config, engine), db_conn, hs) - db_conn.commit() - - return store - - async def run_background_updates_on_postgres(self): - # Manually apply all background updates on the PostgreSQL database. - postgres_ready = ( - await self.postgres_store.db_pool.updates.has_completed_background_updates() - ) - - if not postgres_ready: - # Only say that we're running background updates when there are background - # updates to run. - self.progress.set_state("Running background updates on PostgreSQL") - - while not postgres_ready: - await self.postgres_store.db_pool.updates.do_next_background_update(100) - postgres_ready = await ( - self.postgres_store.db_pool.updates.has_completed_background_updates() - ) - - async def run(self): - """Ports the SQLite database to a PostgreSQL database. - - When a fatal error is met, its message is assigned to the global "end_error" - variable. When this error comes with a stacktrace, its exec_info is assigned to - the global "end_error_exec_info" variable. - """ - global end_error - - try: - # we allow people to port away from outdated versions of sqlite. - self.sqlite_store = self.build_db_store( - DatabaseConnectionConfig("master-sqlite", self.sqlite_config), - allow_outdated_version=True, - ) - - # Check if all background updates are done, abort if not. - updates_complete = ( - await self.sqlite_store.db_pool.updates.has_completed_background_updates() - ) - if not updates_complete: - end_error = ( - "Pending background updates exist in the SQLite3 database." - " Please start Synapse again and wait until every update has finished" - " before running this script.\n" - ) - return - - self.postgres_store = self.build_db_store( - self.hs_config.database.get_single_database() - ) - - await self.run_background_updates_on_postgres() - - self.progress.set_state("Creating port tables") - - def create_port_table(txn): - txn.execute( - "CREATE TABLE IF NOT EXISTS port_from_sqlite3 (" - " table_name varchar(100) NOT NULL UNIQUE," - " forward_rowid bigint NOT NULL," - " backward_rowid bigint NOT NULL" - ")" - ) - - # The old port script created a table with just a "rowid" column. - # We want people to be able to rerun this script from an old port - # so that they can pick up any missing events that were not - # ported across. - def alter_table(txn): - txn.execute( - "ALTER TABLE IF EXISTS port_from_sqlite3" - " RENAME rowid TO forward_rowid" - ) - txn.execute( - "ALTER TABLE IF EXISTS port_from_sqlite3" - " ADD backward_rowid bigint NOT NULL DEFAULT 0" - ) - - try: - await self.postgres_store.db_pool.runInteraction( - "alter_table", alter_table - ) - except Exception: - # On Error Resume Next - pass - - await self.postgres_store.db_pool.runInteraction( - "create_port_table", create_port_table - ) - - # Step 2. Set up sequences - # - # We do this before porting the tables so that event if we fail half - # way through the postgres DB always have sequences that are greater - # than their respective tables. If we don't then creating the - # `DataStore` object will fail due to the inconsistency. - self.progress.set_state("Setting up sequence generators") - await self._setup_state_group_id_seq() - await self._setup_user_id_seq() - await self._setup_events_stream_seqs() - await self._setup_sequence( - "device_inbox_sequence", ("device_inbox", "device_federation_outbox") - ) - await self._setup_sequence( - "account_data_sequence", - ("room_account_data", "room_tags_revisions", "account_data"), - ) - await self._setup_sequence("receipts_sequence", ("receipts_linearized",)) - await self._setup_sequence("presence_stream_sequence", ("presence_stream",)) - await self._setup_auth_chain_sequence() - - # Step 3. Get tables. - self.progress.set_state("Fetching tables") - sqlite_tables = await self.sqlite_store.db_pool.simple_select_onecol( - table="sqlite_master", keyvalues={"type": "table"}, retcol="name" - ) - - postgres_tables = await self.postgres_store.db_pool.simple_select_onecol( - table="information_schema.tables", - keyvalues={}, - retcol="distinct table_name", - ) - - tables = set(sqlite_tables) & set(postgres_tables) - logger.info("Found %d tables", len(tables)) - - # Step 4. Figure out what still needs copying - self.progress.set_state("Checking on port progress") - setup_res = await make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background(self.setup_table, table) - for table in tables - if table not in ["schema_version", "applied_schema_deltas"] - and not table.startswith("sqlite_") - ], - consumeErrors=True, - ) - ) - # Map from table name to args passed to `handle_table`, i.e. a tuple - # of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`. - tables_to_port_info_map = {r[0]: r[1:] for r in setup_res} - - # Step 5. Do the copying. - # - # This is slightly convoluted as we need to ensure tables are ported - # in the correct order due to foreign key constraints. - self.progress.set_state("Copying to postgres") - - constraints = await self.get_table_constraints() - tables_ported = set() # type: Set[str] - - while tables_to_port_info_map: - # Pulls out all tables that are still to be ported and which - # only depend on tables that are already ported (if any). - tables_to_port = [ - table - for table in tables_to_port_info_map - if not constraints.get(table, set()) - tables_ported - ] - - await make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.handle_table, - table, - *tables_to_port_info_map.pop(table), - ) - for table in tables_to_port - ], - consumeErrors=True, - ) - ) - - tables_ported.update(tables_to_port) - - self.progress.done() - except Exception as e: - global end_error_exec_info - end_error = str(e) - end_error_exec_info = sys.exc_info() - logger.exception("") - finally: - reactor.stop() - - def _convert_rows(self, table, headers, rows): - bool_col_names = BOOLEAN_COLUMNS.get(table, []) - - bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names] - - class BadValueException(Exception): - pass - - def conv(j, col): - if j in bool_cols: - return bool(col) - if isinstance(col, bytes): - return bytearray(col) - elif isinstance(col, str) and "\0" in col: - logger.warning( - "DROPPING ROW: NUL value in table %s col %s: %r", - table, - headers[j], - col, - ) - raise BadValueException() - return col - - outrows = [] - for row in rows: - try: - outrows.append( - tuple(conv(j, col) for j, col in enumerate(row) if j > 0) - ) - except BadValueException: - pass - - return outrows - - async def _setup_sent_transactions(self): - # Only save things from the last day - yesterday = int(time.time() * 1000) - 86400000 - - # And save the max transaction id from each destination - select = ( - "SELECT rowid, * FROM sent_transactions WHERE rowid IN (" - "SELECT max(rowid) FROM sent_transactions" - " GROUP BY destination" - ")" - ) - - def r(txn): - txn.execute(select) - rows = txn.fetchall() - headers = [column[0] for column in txn.description] - - ts_ind = headers.index("ts") - - return headers, [r for r in rows if r[ts_ind] < yesterday] - - headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r) - - rows = self._convert_rows("sent_transactions", headers, rows) - - inserted_rows = len(rows) - if inserted_rows: - max_inserted_rowid = max(r[0] for r in rows) - - def insert(txn): - self.postgres_store.insert_many_txn( - txn, "sent_transactions", headers[1:], rows - ) - - await self.postgres_store.execute(insert) - else: - max_inserted_rowid = 0 - - def get_start_id(txn): - txn.execute( - "SELECT rowid FROM sent_transactions WHERE ts >= ?" - " ORDER BY rowid ASC LIMIT 1", - (yesterday,), - ) - - rows = txn.fetchall() - if rows: - return rows[0][0] - else: - return 1 - - next_chunk = await self.sqlite_store.execute(get_start_id) - next_chunk = max(max_inserted_rowid + 1, next_chunk) - - await self.postgres_store.db_pool.simple_insert( - table="port_from_sqlite3", - values={ - "table_name": "sent_transactions", - "forward_rowid": next_chunk, - "backward_rowid": 0, - }, - ) - - def get_sent_table_size(txn): - txn.execute( - "SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,) - ) - (size,) = txn.fetchone() - return int(size) - - remaining_count = await self.sqlite_store.execute(get_sent_table_size) - - total_count = remaining_count + inserted_rows - - return next_chunk, inserted_rows, total_count - - async def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk): - frows = await self.sqlite_store.execute_sql( - "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk - ) - - brows = await self.sqlite_store.execute_sql( - "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk - ) - - return frows[0][0] + brows[0][0] - - async def _get_already_ported_count(self, table): - rows = await self.postgres_store.execute_sql( - "SELECT count(*) FROM %s" % (table,) - ) - - return rows[0][0] - - async def _get_total_count_to_port(self, table, forward_chunk, backward_chunk): - remaining, done = await make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self._get_remaining_count_to_port, - table, - forward_chunk, - backward_chunk, - ), - run_in_background(self._get_already_ported_count, table), - ], - ) - ) - - remaining = int(remaining) if remaining else 0 - done = int(done) if done else 0 - - return done, remaining + done - - async def _setup_state_group_id_seq(self) -> None: - curr_id = await self.sqlite_store.db_pool.simple_select_one_onecol( - table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True - ) - - if not curr_id: - return - - def r(txn): - next_id = curr_id + 1 - txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,)) - - await self.postgres_store.db_pool.runInteraction("setup_state_group_id_seq", r) - - async def _setup_user_id_seq(self) -> None: - curr_id = await self.sqlite_store.db_pool.runInteraction( - "setup_user_id_seq", find_max_generated_user_id_localpart - ) - - def r(txn): - next_id = curr_id + 1 - txn.execute("ALTER SEQUENCE user_id_seq RESTART WITH %s", (next_id,)) - - await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r) - - async def _setup_events_stream_seqs(self) -> None: - """Set the event stream sequences to the correct values.""" - - # We get called before we've ported the events table, so we need to - # fetch the current positions from the SQLite store. - curr_forward_id = await self.sqlite_store.db_pool.simple_select_one_onecol( - table="events", keyvalues={}, retcol="MAX(stream_ordering)", allow_none=True - ) - - curr_backward_id = await self.sqlite_store.db_pool.simple_select_one_onecol( - table="events", - keyvalues={}, - retcol="MAX(-MIN(stream_ordering), 1)", - allow_none=True, - ) - - def _setup_events_stream_seqs_set_pos(txn): - if curr_forward_id: - txn.execute( - "ALTER SEQUENCE events_stream_seq RESTART WITH %s", - (curr_forward_id + 1,), - ) - - if curr_backward_id: - txn.execute( - "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", - (curr_backward_id + 1,), - ) - - await self.postgres_store.db_pool.runInteraction( - "_setup_events_stream_seqs", - _setup_events_stream_seqs_set_pos, - ) - - async def _setup_sequence( - self, sequence_name: str, stream_id_tables: Iterable[str] - ) -> None: - """Set a sequence to the correct value.""" - current_stream_ids = [] - for stream_id_table in stream_id_tables: - max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol( - table=stream_id_table, - keyvalues={}, - retcol="COALESCE(MAX(stream_id), 1)", - allow_none=True, - ) - current_stream_ids.append(max_stream_id) - - next_id = max(current_stream_ids) + 1 - - def r(txn): - sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,) - txn.execute(sql + " %s", (next_id,)) - - await self.postgres_store.db_pool.runInteraction( - "_setup_%s" % (sequence_name,), r - ) - - async def _setup_auth_chain_sequence(self) -> None: - curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol( - table="event_auth_chains", - keyvalues={}, - retcol="MAX(chain_id)", - allow_none=True, - ) - - def r(txn): - txn.execute( - "ALTER SEQUENCE event_auth_chain_id RESTART WITH %s", - (curr_chain_id + 1,), - ) - - if curr_chain_id is not None: - await self.postgres_store.db_pool.runInteraction( - "_setup_event_auth_chain_id", - r, - ) - - -############################################## -# The following is simply UI stuff -############################################## - - -class Progress(object): - """Used to report progress of the port""" - - def __init__(self): - self.tables = {} - - self.start_time = int(time.time()) - - def add_table(self, table, cur, size): - self.tables[table] = { - "start": cur, - "num_done": cur, - "total": size, - "perc": int(cur * 100 / size), - } - - def update(self, table, num_done): - data = self.tables[table] - data["num_done"] = num_done - data["perc"] = int(num_done * 100 / data["total"]) - - def done(self): - pass - - -class CursesProgress(Progress): - """Reports progress to a curses window""" - - def __init__(self, stdscr): - self.stdscr = stdscr - - curses.use_default_colors() - curses.curs_set(0) - - curses.init_pair(1, curses.COLOR_RED, -1) - curses.init_pair(2, curses.COLOR_GREEN, -1) - - self.last_update = 0 - - self.finished = False - - self.total_processed = 0 - self.total_remaining = 0 - - super(CursesProgress, self).__init__() - - def update(self, table, num_done): - super(CursesProgress, self).update(table, num_done) - - self.total_processed = 0 - self.total_remaining = 0 - for data in self.tables.values(): - self.total_processed += data["num_done"] - data["start"] - self.total_remaining += data["total"] - data["num_done"] - - self.render() - - def render(self, force=False): - now = time.time() - - if not force and now - self.last_update < 0.2: - # reactor.callLater(1, self.render) - return - - self.stdscr.clear() - - rows, cols = self.stdscr.getmaxyx() - - duration = int(now) - int(self.start_time) - - minutes, seconds = divmod(duration, 60) - duration_str = "%02dm %02ds" % (minutes, seconds) - - if self.finished: - status = "Time spent: %s (Done!)" % (duration_str,) - else: - - if self.total_processed > 0: - left = float(self.total_remaining) / self.total_processed - - est_remaining = (int(now) - self.start_time) * left - est_remaining_str = "%02dm %02ds remaining" % divmod(est_remaining, 60) - else: - est_remaining_str = "Unknown" - status = "Time spent: %s (est. remaining: %s)" % ( - duration_str, - est_remaining_str, - ) - - self.stdscr.addstr(0, 0, status, curses.A_BOLD) - - max_len = max(len(t) for t in self.tables.keys()) - - left_margin = 5 - middle_space = 1 - - items = self.tables.items() - items = sorted(items, key=lambda i: (i[1]["perc"], i[0])) - - for i, (table, data) in enumerate(items): - if i + 2 >= rows: - break - - perc = data["perc"] - - color = curses.color_pair(2) if perc == 100 else curses.color_pair(1) - - self.stdscr.addstr( - i + 2, left_margin + max_len - len(table), table, curses.A_BOLD | color - ) - - size = 20 - - progress = "[%s%s]" % ( - "#" * int(perc * size / 100), - " " * (size - int(perc * size / 100)), - ) - - self.stdscr.addstr( - i + 2, - left_margin + max_len + middle_space, - "%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]), - ) - - if self.finished: - self.stdscr.addstr(rows - 1, 0, "Press any key to exit...") - - self.stdscr.refresh() - self.last_update = time.time() - - def done(self): - self.finished = True - self.render(True) - self.stdscr.getch() - - def set_state(self, state): - self.stdscr.clear() - self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD) - self.stdscr.refresh() - - -class TerminalProgress(Progress): - """Just prints progress to the terminal""" - - def update(self, table, num_done): - super(TerminalProgress, self).update(table, num_done) - - data = self.tables[table] - - print( - "%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"]) - ) - - def set_state(self, state): - print(state + "...") - - -############################################## -############################################## - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="A script to port an existing synapse SQLite database to" - " a new PostgreSQL database." - ) - parser.add_argument("-v", action="store_true") - parser.add_argument( - "--sqlite-database", - required=True, - help="The snapshot of the SQLite database file. This must not be" - " currently used by a running synapse server", - ) - parser.add_argument( - "--postgres-config", - type=argparse.FileType("r"), - required=True, - help="The database config file for the PostgreSQL database", - ) - parser.add_argument( - "--curses", action="store_true", help="display a curses based progress UI" - ) - - parser.add_argument( - "--batch-size", - type=int, - default=1000, - help="The number of rows to select from the SQLite table each" - " iteration [default=1000]", - ) - - args = parser.parse_args() - - logging_config = { - "level": logging.DEBUG if args.v else logging.INFO, - "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", - } - - if args.curses: - logging_config["filename"] = "port-synapse.log" - - logging.basicConfig(**logging_config) - - sqlite_config = { - "name": "sqlite3", - "args": { - "database": args.sqlite_database, - "cp_min": 1, - "cp_max": 1, - "check_same_thread": False, - }, - } - - hs_config = yaml.safe_load(args.postgres_config) - - if "database" not in hs_config: - sys.stderr.write("The configuration file must have a 'database' section.\n") - sys.exit(4) - - postgres_config = hs_config["database"] - - if "name" not in postgres_config: - sys.stderr.write("Malformed database config: no 'name'\n") - sys.exit(2) - if postgres_config["name"] != "psycopg2": - sys.stderr.write("Database must use the 'psycopg2' connector.\n") - sys.exit(3) - - config = HomeServerConfig() - config.parse_config_dict(hs_config, "", "") - - def start(stdscr=None): - if stdscr: - progress = CursesProgress(stdscr) - else: - progress = TerminalProgress() - - porter = Porter( - sqlite_config=sqlite_config, - progress=progress, - batch_size=args.batch_size, - hs_config=config, - ) - - @defer.inlineCallbacks - def run(): - with LoggingContext("synapse_port_db_run"): - yield defer.ensureDeferred(porter.run()) - - reactor.callWhenRunning(run) - - reactor.run() - - if args.curses: - curses.wrapper(start) - else: - start() - - if end_error: - if end_error_exec_info: - exc_type, exc_value, exc_traceback = end_error_exec_info - traceback.print_exception(exc_type, exc_value, exc_traceback) - - sys.stderr.write(end_error) - - sys.exit(5) diff --git a/scripts/synapse_review_recent_signups b/scripts/synapse_review_recent_signups deleted file mode 100755 index a36d46e14c..0000000000 --- a/scripts/synapse_review_recent_signups +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# Copyright 2021 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse._scripts.review_recent_signups import main - -if __name__ == "__main__": - main() diff --git a/scripts/sync_room_to_group.pl b/scripts/sync_room_to_group.pl deleted file mode 100755 index f0c2dfadfa..0000000000 --- a/scripts/sync_room_to_group.pl +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env perl - -use strict; -use warnings; - -use JSON::XS; -use LWP::UserAgent; -use URI::Escape; - -if (@ARGV < 4) { - die "usage: $0 \n"; -} - -my ($hs, $access_token, $room_id, $group_id) = @ARGV; -my $ua = LWP::UserAgent->new(); -$ua->timeout(10); - -if ($room_id =~ /^#/) { - $room_id = uri_escape($room_id); - $room_id = decode_json($ua->get("${hs}/_matrix/client/r0/directory/room/${room_id}?access_token=${access_token}")->decoded_content)->{room_id}; -} - -my $room_users = [ keys %{decode_json($ua->get("${hs}/_matrix/client/r0/rooms/${room_id}/joined_members?access_token=${access_token}")->decoded_content)->{joined}} ]; -my $group_users = [ - (map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/users?access_token=${access_token}" )->decoded_content)->{chunk}}), - (map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/invited_users?access_token=${access_token}" )->decoded_content)->{chunk}}), -]; - -die "refusing to sync from empty room" unless (@$room_users); -die "refusing to sync to empty group" unless (@$group_users); - -my $diff = {}; -foreach my $user (@$room_users) { $diff->{$user}++ } -foreach my $user (@$group_users) { $diff->{$user}-- } - -foreach my $user (keys %$diff) { - if ($diff->{$user} == 1) { - warn "inviting $user"; - print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/invite/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n"; - } - elsif ($diff->{$user} == -1) { - warn "removing $user"; - print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/remove/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n"; - } -} diff --git a/scripts/update_synapse_database b/scripts/update_synapse_database deleted file mode 100755 index f43676afaa..0000000000 --- a/scripts/update_synapse_database +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import logging -import sys - -import yaml -from matrix_common.versionstring import get_distribution_version_string - -from twisted.internet import defer, reactor - -from synapse.config.homeserver import HomeServerConfig -from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.server import HomeServer -from synapse.storage import DataStore - -logger = logging.getLogger("update_database") - - -class MockHomeserver(HomeServer): - DATASTORE_CLASS = DataStore - - def __init__(self, config, **kwargs): - super(MockHomeserver, self).__init__( - config.server.server_name, reactor=reactor, config=config, **kwargs - ) - - self.version_string = "Synapse/" + get_distribution_version_string( - "matrix-synapse" - ) - - -def run_background_updates(hs): - store = hs.get_datastores().main - - async def run_background_updates(): - await store.db_pool.updates.run_background_updates(sleep=False) - # Stop the reactor to exit the script once every background update is run. - reactor.stop() - - def run(): - # Apply all background updates on the database. - defer.ensureDeferred( - run_as_background_process("background_updates", run_background_updates) - ) - - reactor.callWhenRunning(run) - - reactor.run() - - -def main(): - parser = argparse.ArgumentParser( - description=( - "Updates a synapse database to the latest schema and optionally runs background updates" - " on it." - ) - ) - parser.add_argument("-v", action="store_true") - parser.add_argument( - "--database-config", - type=argparse.FileType("r"), - required=True, - help="Synapse configuration file, giving the details of the database to be updated", - ) - parser.add_argument( - "--run-background-updates", - action="store_true", - required=False, - help="run background updates after upgrading the database schema", - ) - - args = parser.parse_args() - - logging_config = { - "level": logging.DEBUG if args.v else logging.INFO, - "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", - } - - logging.basicConfig(**logging_config) - - # Load, process and sanity-check the config. - hs_config = yaml.safe_load(args.database_config) - - if "database" not in hs_config: - sys.stderr.write("The configuration file must have a 'database' section.\n") - sys.exit(4) - - config = HomeServerConfig() - config.parse_config_dict(hs_config, "", "") - - # Instantiate and initialise the homeserver object. - hs = MockHomeserver(config) - - # Setup instantiates the store within the homeserver object and updates the - # DB. - hs.setup() - - if args.run_background_updates: - run_background_updates(hs) - - -if __name__ == "__main__": - main() diff --git a/setup.py b/setup.py index 26f4650348..318df16766 100755 --- a/setup.py +++ b/setup.py @@ -15,7 +15,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import glob import os from typing import Any, Dict @@ -153,8 +152,19 @@ setup( python_requires="~=3.7", entry_points={ "console_scripts": [ + # Application "synapse_homeserver = synapse.app.homeserver:main", "synapse_worker = synapse.app.generic_worker:main", + # Scripts + "export_signing_key = synapse._scripts.export_signing_key:main", + "generate_config = synapse._scripts.generate_config:main", + "generate_log_config = synapse._scripts.generate_log_config:main", + "generate_signing_key = synapse._scripts.generate_signing_key:main", + "hash_password = synapse._scripts.hash_password:main", + "register_new_matrix_user = synapse._scripts.register_new_matrix_user:main", + "synapse_port_db = synapse._scripts.synapse_port_db:main", + "synapse_review_recent_signups = synapse._scripts.review_recent_signups:main", + "update_synapse_database = synapse._scripts.update_synapse_database:main", ] }, classifiers=[ @@ -167,6 +177,6 @@ setup( "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], - scripts=["synctl"] + glob.glob("scripts/*"), + scripts=["synctl"], cmdclass={"test": TestCommand}, ) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 9a01152c15..dd4c8478d5 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -20,7 +20,7 @@ apps: generate-config: command: generate_config generate-signing-key: - command: generate_signing_key.py + command: generate_signing_key register-new-matrix-user: command: register_new_matrix_user plugs: [network] diff --git a/synapse/_scripts/export_signing_key.py b/synapse/_scripts/export_signing_key.py new file mode 100755 index 0000000000..3d254348f1 --- /dev/null +++ b/synapse/_scripts/export_signing_key.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import sys +import time +from typing import Optional + +import nacl.signing +from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys + + +def exit(status: int = 0, message: Optional[str] = None): + if message: + print(message, file=sys.stderr) + sys.exit(status) + + +def format_plain(public_key: nacl.signing.VerifyKey): + print( + "%s:%s %s" + % ( + public_key.alg, + public_key.version, + encode_verify_key_base64(public_key), + ) + ) + + +def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int): + print( + ' "%s:%s": { key: "%s", expired_ts: %i }' + % ( + public_key.alg, + public_key.version, + encode_verify_key_base64(public_key), + expiry_ts, + ) + ) + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "key_file", + nargs="+", + type=argparse.FileType("r"), + help="The key file to read", + ) + + parser.add_argument( + "-x", + action="store_true", + dest="for_config", + help="format the output for inclusion in the old_signing_keys config setting", + ) + + parser.add_argument( + "--expiry-ts", + type=int, + default=int(time.time() * 1000) + 6 * 3600000, + help=( + "The expiry time to use for -x, in milliseconds since 1970. The default " + "is (now+6h)." + ), + ) + + args = parser.parse_args() + + formatter = ( + (lambda k: format_for_config(k, args.expiry_ts)) + if args.for_config + else format_plain + ) + + for file in args.key_file: + try: + res = read_signing_keys(file) + except Exception as e: + exit( + status=1, + message="Error reading key from file %s: %s %s" + % (file.name, type(e), e), + ) + res = [] + for key in res: + formatter(get_verify_key(key)) + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/generate_config.py b/synapse/_scripts/generate_config.py new file mode 100755 index 0000000000..75fce20b12 --- /dev/null +++ b/synapse/_scripts/generate_config.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +import argparse +import shutil +import sys + +from synapse.config.homeserver import HomeServerConfig + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--config-dir", + default="CONFDIR", + help="The path where the config files are kept. Used to create filenames for " + "things like the log config and the signing key. Default: %(default)s", + ) + + parser.add_argument( + "--data-dir", + default="DATADIR", + help="The path where the data files are kept. Used to create filenames for " + "things like the database and media store. Default: %(default)s", + ) + + parser.add_argument( + "--server-name", + default="SERVERNAME", + help="The server name. Used to initialise the server_name config param, but also " + "used in the names of some of the config files. Default: %(default)s", + ) + + parser.add_argument( + "--report-stats", + action="store", + help="Whether the generated config reports anonymized usage statistics", + choices=["yes", "no"], + ) + + parser.add_argument( + "--generate-secrets", + action="store_true", + help="Enable generation of new secrets for things like the macaroon_secret_key." + "By default, these parameters will be left unset.", + ) + + parser.add_argument( + "-o", + "--output-file", + type=argparse.FileType("w"), + default=sys.stdout, + help="File to write the configuration to. Default: stdout", + ) + + parser.add_argument( + "--header-file", + type=argparse.FileType("r"), + help="File from which to read a header, which will be printed before the " + "generated config.", + ) + + args = parser.parse_args() + + report_stats = args.report_stats + if report_stats is not None: + report_stats = report_stats == "yes" + + conf = HomeServerConfig().generate_config( + config_dir_path=args.config_dir, + data_dir_path=args.data_dir, + server_name=args.server_name, + generate_secrets=args.generate_secrets, + report_stats=report_stats, + ) + + if args.header_file: + shutil.copyfileobj(args.header_file, args.output_file) + + args.output_file.write(conf) + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/generate_log_config.py b/synapse/_scripts/generate_log_config.py new file mode 100755 index 0000000000..82fc763140 --- /dev/null +++ b/synapse/_scripts/generate_log_config.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 + +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import sys + +from synapse.config.logger import DEFAULT_LOG_CONFIG + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "-o", + "--output-file", + type=argparse.FileType("w"), + default=sys.stdout, + help="File to write the configuration to. Default: stdout", + ) + + parser.add_argument( + "-f", + "--log-file", + type=str, + default="/var/log/matrix-synapse/homeserver.log", + help="name of the log file", + ) + + args = parser.parse_args() + out = args.output_file + out.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file)) + out.flush() + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/generate_signing_key.py b/synapse/_scripts/generate_signing_key.py new file mode 100755 index 0000000000..bc26d25bfd --- /dev/null +++ b/synapse/_scripts/generate_signing_key.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import sys + +from signedjson.key import generate_signing_key, write_signing_keys + +from synapse.util.stringutils import random_string + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "-o", + "--output_file", + type=argparse.FileType("w"), + default=sys.stdout, + help="Where to write the output to", + ) + args = parser.parse_args() + + key_id = "a_" + random_string(4) + key = (generate_signing_key(key_id),) + write_signing_keys(args.output_file, key) + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/hash_password.py b/synapse/_scripts/hash_password.py new file mode 100755 index 0000000000..708640c7de --- /dev/null +++ b/synapse/_scripts/hash_password.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +import argparse +import getpass +import sys +import unicodedata + +import bcrypt +import yaml + + +def prompt_for_pass(): + password = getpass.getpass("Password: ") + + if not password: + raise Exception("Password cannot be blank.") + + confirm_password = getpass.getpass("Confirm password: ") + + if password != confirm_password: + raise Exception("Passwords do not match.") + + return password + + +def main(): + bcrypt_rounds = 12 + password_pepper = "" + + parser = argparse.ArgumentParser( + description=( + "Calculate the hash of a new password, so that passwords can be reset" + ) + ) + parser.add_argument( + "-p", + "--password", + default=None, + help="New password for user. Will prompt if omitted.", + ) + parser.add_argument( + "-c", + "--config", + type=argparse.FileType("r"), + help=( + "Path to server config file. " + "Used to read in bcrypt_rounds and password_pepper." + ), + ) + + args = parser.parse_args() + if "config" in args and args.config: + config = yaml.safe_load(args.config) + bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds) + password_config = config.get("password_config", None) or {} + password_pepper = password_config.get("pepper", password_pepper) + password = args.password + + if not password: + password = prompt_for_pass() + + # On Python 2, make sure we decode it to Unicode before we normalise it + if isinstance(password, bytes): + try: + password = password.decode(sys.stdin.encoding) + except UnicodeDecodeError: + print( + "ERROR! Your password is not decodable using your terminal encoding (%s)." + % (sys.stdin.encoding,) + ) + + pw = unicodedata.normalize("NFKC", password) + + hashed = bcrypt.hashpw( + pw.encode("utf8") + password_pepper.encode("utf8"), + bcrypt.gensalt(bcrypt_rounds), + ).decode("ascii") + + print(hashed) + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/move_remote_media_to_new_store.py b/synapse/_scripts/move_remote_media_to_new_store.py new file mode 100755 index 0000000000..9667d95dfe --- /dev/null +++ b/synapse/_scripts/move_remote_media_to_new_store.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Moves a list of remote media from one media store to another. + +The input should be a list of media files to be moved, one per line. Each line +should be formatted:: + + | + +This can be extracted from postgres with:: + + psql --tuples-only -A -c "select media_origin, filesystem_id from + matrix.remote_media_cache where ..." + +To use, pipe the above into:: + + PYTHON_PATH=. synapse/_scripts/move_remote_media_to_new_store.py +""" + +import argparse +import logging +import os +import shutil +import sys + +from synapse.rest.media.v1.filepath import MediaFilePaths + +logger = logging.getLogger() + + +def main(src_repo, dest_repo): + src_paths = MediaFilePaths(src_repo) + dest_paths = MediaFilePaths(dest_repo) + for line in sys.stdin: + line = line.strip() + parts = line.split("|") + if len(parts) != 2: + print("Unable to parse input line %s" % line, file=sys.stderr) + sys.exit(1) + + move_media(parts[0], parts[1], src_paths, dest_paths) + + +def move_media(origin_server, file_id, src_paths, dest_paths): + """Move the given file, and any thumbnails, to the dest repo + + Args: + origin_server (str): + file_id (str): + src_paths (MediaFilePaths): + dest_paths (MediaFilePaths): + """ + logger.info("%s/%s", origin_server, file_id) + + # check that the original exists + original_file = src_paths.remote_media_filepath(origin_server, file_id) + if not os.path.exists(original_file): + logger.warning( + "Original for %s/%s (%s) does not exist", + origin_server, + file_id, + original_file, + ) + else: + mkdir_and_move( + original_file, dest_paths.remote_media_filepath(origin_server, file_id) + ) + + # now look for thumbnails + original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id) + if not os.path.exists(original_thumb_dir): + return + + mkdir_and_move( + original_thumb_dir, + dest_paths.remote_media_thumbnail_dir(origin_server, file_id), + ) + + +def mkdir_and_move(original_file, dest_file): + dirname = os.path.dirname(dest_file) + if not os.path.exists(dirname): + logger.debug("mkdir %s", dirname) + os.makedirs(dirname) + logger.debug("mv %s %s", original_file, dest_file) + shutil.move(original_file, dest_file) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("-v", action="store_true", help="enable debug logging") + parser.add_argument("src_repo", help="Path to source content repo") + parser.add_argument("dest_repo", help="Path to source content repo") + args = parser.parse_args() + + logging_config = { + "level": logging.DEBUG if args.v else logging.INFO, + "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", + } + logging.basicConfig(**logging_config) + + main(args.src_repo, args.dest_repo) diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py new file mode 100755 index 0000000000..c38666da18 --- /dev/null +++ b/synapse/_scripts/synapse_port_db.py @@ -0,0 +1,1257 @@ +#!/usr/bin/env python +# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import curses +import logging +import sys +import time +import traceback +from typing import Dict, Iterable, Optional, Set + +import yaml +from matrix_common.versionstring import get_distribution_version_string + +from twisted.internet import defer, reactor + +from synapse.config.database import DatabaseConnectionConfig +from synapse.config.homeserver import HomeServerConfig +from synapse.logging.context import ( + LoggingContext, + make_deferred_yieldable, + run_in_background, +) +from synapse.storage.database import DatabasePool, make_conn +from synapse.storage.databases.main import PushRuleStore +from synapse.storage.databases.main.account_data import AccountDataWorkerStore +from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore +from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore +from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore +from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore +from synapse.storage.databases.main.events_bg_updates import ( + EventsBackgroundUpdatesStore, +) +from synapse.storage.databases.main.group_server import GroupServerWorkerStore +from synapse.storage.databases.main.media_repository import ( + MediaRepositoryBackgroundUpdateStore, +) +from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore +from synapse.storage.databases.main.pusher import PusherWorkerStore +from synapse.storage.databases.main.registration import ( + RegistrationBackgroundUpdateStore, + find_max_generated_user_id_localpart, +) +from synapse.storage.databases.main.room import RoomBackgroundUpdateStore +from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore +from synapse.storage.databases.main.search import SearchBackgroundUpdateStore +from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore +from synapse.storage.databases.main.stats import StatsStore +from synapse.storage.databases.main.user_directory import ( + UserDirectoryBackgroundUpdateStore, +) +from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore +from synapse.storage.engines import create_engine +from synapse.storage.prepare_database import prepare_database +from synapse.util import Clock + +logger = logging.getLogger("synapse_port_db") + + +BOOLEAN_COLUMNS = { + "events": ["processed", "outlier", "contains_url"], + "rooms": ["is_public", "has_auth_chain_index"], + "event_edges": ["is_state"], + "presence_list": ["accepted"], + "presence_stream": ["currently_active"], + "public_room_list_stream": ["visibility"], + "devices": ["hidden"], + "device_lists_outbound_pokes": ["sent"], + "users_who_share_rooms": ["share_private"], + "groups": ["is_public"], + "group_rooms": ["is_public"], + "group_users": ["is_public", "is_admin"], + "group_summary_rooms": ["is_public"], + "group_room_categories": ["is_public"], + "group_summary_users": ["is_public"], + "group_roles": ["is_public"], + "local_group_membership": ["is_publicised", "is_admin"], + "e2e_room_keys": ["is_verified"], + "account_validity": ["email_sent"], + "redactions": ["have_censored"], + "room_stats_state": ["is_federatable"], + "local_media_repository": ["safe_from_quarantine"], + "users": ["shadow_banned"], + "e2e_fallback_keys_json": ["used"], + "access_tokens": ["used"], +} + + +APPEND_ONLY_TABLES = [ + "event_reference_hashes", + "events", + "event_json", + "state_events", + "room_memberships", + "topics", + "room_names", + "rooms", + "local_media_repository", + "local_media_repository_thumbnails", + "remote_media_cache", + "remote_media_cache_thumbnails", + "redactions", + "event_edges", + "event_auth", + "received_transactions", + "sent_transactions", + "transaction_id_to_pdu", + "users", + "state_groups", + "state_groups_state", + "event_to_state_groups", + "rejections", + "event_search", + "presence_stream", + "push_rules_stream", + "ex_outlier_stream", + "cache_invalidation_stream_by_instance", + "public_room_list_stream", + "state_group_edges", + "stream_ordering_to_exterm", +] + + +IGNORED_TABLES = { + # We don't port these tables, as they're a faff and we can regenerate + # them anyway. + "user_directory", + "user_directory_search", + "user_directory_search_content", + "user_directory_search_docsize", + "user_directory_search_segdir", + "user_directory_search_segments", + "user_directory_search_stat", + "user_directory_search_pos", + "users_who_share_private_rooms", + "users_in_public_room", + # UI auth sessions have foreign keys so additional care needs to be taken, + # the sessions are transient anyway, so ignore them. + "ui_auth_sessions", + "ui_auth_sessions_credentials", + "ui_auth_sessions_ips", +} + + +# Error returned by the run function. Used at the top-level part of the script to +# handle errors and return codes. +end_error = None # type: Optional[str] +# The exec_info for the error, if any. If error is defined but not exec_info the script +# will show only the error message without the stacktrace, if exec_info is defined but +# not the error then the script will show nothing outside of what's printed in the run +# function. If both are defined, the script will print both the error and the stacktrace. +end_error_exec_info = None + + +class Store( + ClientIpBackgroundUpdateStore, + DeviceInboxBackgroundUpdateStore, + DeviceBackgroundUpdateStore, + EventsBackgroundUpdatesStore, + MediaRepositoryBackgroundUpdateStore, + RegistrationBackgroundUpdateStore, + RoomBackgroundUpdateStore, + RoomMemberBackgroundUpdateStore, + SearchBackgroundUpdateStore, + StateBackgroundUpdateStore, + MainStateBackgroundUpdateStore, + UserDirectoryBackgroundUpdateStore, + EndToEndKeyBackgroundStore, + StatsStore, + AccountDataWorkerStore, + PushRuleStore, + PusherWorkerStore, + PresenceBackgroundUpdateStore, + GroupServerWorkerStore, +): + def execute(self, f, *args, **kwargs): + return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) + + def execute_sql(self, sql, *args): + def r(txn): + txn.execute(sql, args) + return txn.fetchall() + + return self.db_pool.runInteraction("execute_sql", r) + + def insert_many_txn(self, txn, table, headers, rows): + sql = "INSERT INTO %s (%s) VALUES (%s)" % ( + table, + ", ".join(k for k in headers), + ", ".join("%s" for _ in headers), + ) + + try: + txn.executemany(sql, rows) + except Exception: + logger.exception("Failed to insert: %s", table) + raise + + def set_room_is_public(self, room_id, is_public): + raise Exception( + "Attempt to set room_is_public during port_db: database not empty?" + ) + + +class MockHomeserver: + def __init__(self, config): + self.clock = Clock(reactor) + self.config = config + self.hostname = config.server.server_name + self.version_string = "Synapse/" + get_distribution_version_string( + "matrix-synapse" + ) + + def get_clock(self): + return self.clock + + def get_reactor(self): + return reactor + + def get_instance_name(self): + return "master" + + +class Porter(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + async def setup_table(self, table): + if table in APPEND_ONLY_TABLES: + # It's safe to just carry on inserting. + row = await self.postgres_store.db_pool.simple_select_one( + table="port_from_sqlite3", + keyvalues={"table_name": table}, + retcols=("forward_rowid", "backward_rowid"), + allow_none=True, + ) + + total_to_port = None + if row is None: + if table == "sent_transactions": + ( + forward_chunk, + already_ported, + total_to_port, + ) = await self._setup_sent_transactions() + backward_chunk = 0 + else: + await self.postgres_store.db_pool.simple_insert( + table="port_from_sqlite3", + values={ + "table_name": table, + "forward_rowid": 1, + "backward_rowid": 0, + }, + ) + + forward_chunk = 1 + backward_chunk = 0 + already_ported = 0 + else: + forward_chunk = row["forward_rowid"] + backward_chunk = row["backward_rowid"] + + if total_to_port is None: + already_ported, total_to_port = await self._get_total_count_to_port( + table, forward_chunk, backward_chunk + ) + else: + + def delete_all(txn): + txn.execute( + "DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,) + ) + txn.execute("TRUNCATE %s CASCADE" % (table,)) + + await self.postgres_store.execute(delete_all) + + await self.postgres_store.db_pool.simple_insert( + table="port_from_sqlite3", + values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0}, + ) + + forward_chunk = 1 + backward_chunk = 0 + + already_ported, total_to_port = await self._get_total_count_to_port( + table, forward_chunk, backward_chunk + ) + + return table, already_ported, total_to_port, forward_chunk, backward_chunk + + async def get_table_constraints(self) -> Dict[str, Set[str]]: + """Returns a map of tables that have foreign key constraints to tables they depend on.""" + + def _get_constraints(txn): + # We can pull the information about foreign key constraints out from + # the postgres schema tables. + sql = """ + SELECT DISTINCT + tc.table_name, + ccu.table_name AS foreign_table_name + FROM + information_schema.table_constraints AS tc + INNER JOIN information_schema.constraint_column_usage AS ccu + USING (table_schema, constraint_name) + WHERE tc.constraint_type = 'FOREIGN KEY' + AND tc.table_name != ccu.table_name; + """ + txn.execute(sql) + + results = {} + for table, foreign_table in txn: + results.setdefault(table, set()).add(foreign_table) + return results + + return await self.postgres_store.db_pool.runInteraction( + "get_table_constraints", _get_constraints + ) + + async def handle_table( + self, table, postgres_size, table_size, forward_chunk, backward_chunk + ): + logger.info( + "Table %s: %i/%i (rows %i-%i) already ported", + table, + postgres_size, + table_size, + backward_chunk + 1, + forward_chunk - 1, + ) + + if not table_size: + return + + self.progress.add_table(table, postgres_size, table_size) + + if table == "event_search": + await self.handle_search_table( + postgres_size, table_size, forward_chunk, backward_chunk + ) + return + + if table in IGNORED_TABLES: + self.progress.update(table, table_size) # Mark table as done + return + + if table == "user_directory_stream_pos": + # We need to make sure there is a single row, `(X, null), as that is + # what synapse expects to be there. + await self.postgres_store.db_pool.simple_insert( + table=table, values={"stream_id": None} + ) + self.progress.update(table, table_size) # Mark table as done + return + + forward_select = ( + "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,) + ) + + backward_select = ( + "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" % (table,) + ) + + do_forward = [True] + do_backward = [True] + + while True: + + def r(txn): + forward_rows = [] + backward_rows = [] + if do_forward[0]: + txn.execute(forward_select, (forward_chunk, self.batch_size)) + forward_rows = txn.fetchall() + if not forward_rows: + do_forward[0] = False + + if do_backward[0]: + txn.execute(backward_select, (backward_chunk, self.batch_size)) + backward_rows = txn.fetchall() + if not backward_rows: + do_backward[0] = False + + if forward_rows or backward_rows: + headers = [column[0] for column in txn.description] + else: + headers = None + + return headers, forward_rows, backward_rows + + headers, frows, brows = await self.sqlite_store.db_pool.runInteraction( + "select", r + ) + + if frows or brows: + if frows: + forward_chunk = max(row[0] for row in frows) + 1 + if brows: + backward_chunk = min(row[0] for row in brows) - 1 + + rows = frows + brows + rows = self._convert_rows(table, headers, rows) + + def insert(txn): + self.postgres_store.insert_many_txn(txn, table, headers[1:], rows) + + self.postgres_store.db_pool.simple_update_one_txn( + txn, + table="port_from_sqlite3", + keyvalues={"table_name": table}, + updatevalues={ + "forward_rowid": forward_chunk, + "backward_rowid": backward_chunk, + }, + ) + + await self.postgres_store.execute(insert) + + postgres_size += len(rows) + + self.progress.update(table, postgres_size) + else: + return + + async def handle_search_table( + self, postgres_size, table_size, forward_chunk, backward_chunk + ): + select = ( + "SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering" + " FROM event_search as es" + " INNER JOIN events AS e USING (event_id, room_id)" + " WHERE es.rowid >= ?" + " ORDER BY es.rowid LIMIT ?" + ) + + while True: + + def r(txn): + txn.execute(select, (forward_chunk, self.batch_size)) + rows = txn.fetchall() + headers = [column[0] for column in txn.description] + + return headers, rows + + headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r) + + if rows: + forward_chunk = rows[-1][0] + 1 + + # We have to treat event_search differently since it has a + # different structure in the two different databases. + def insert(txn): + sql = ( + "INSERT INTO event_search (event_id, room_id, key," + " sender, vector, origin_server_ts, stream_ordering)" + " VALUES (?,?,?,?,to_tsvector('english', ?),?,?)" + ) + + rows_dict = [] + for row in rows: + d = dict(zip(headers, row)) + if "\0" in d["value"]: + logger.warning("dropping search row %s", d) + else: + rows_dict.append(d) + + txn.executemany( + sql, + [ + ( + row["event_id"], + row["room_id"], + row["key"], + row["sender"], + row["value"], + row["origin_server_ts"], + row["stream_ordering"], + ) + for row in rows_dict + ], + ) + + self.postgres_store.db_pool.simple_update_one_txn( + txn, + table="port_from_sqlite3", + keyvalues={"table_name": "event_search"}, + updatevalues={ + "forward_rowid": forward_chunk, + "backward_rowid": backward_chunk, + }, + ) + + await self.postgres_store.execute(insert) + + postgres_size += len(rows) + + self.progress.update("event_search", postgres_size) + + else: + return + + def build_db_store( + self, + db_config: DatabaseConnectionConfig, + allow_outdated_version: bool = False, + ): + """Builds and returns a database store using the provided configuration. + + Args: + db_config: The database configuration + allow_outdated_version: True to suppress errors about the database server + version being too old to run a complete synapse + + Returns: + The built Store object. + """ + self.progress.set_state("Preparing %s" % db_config.config["name"]) + + engine = create_engine(db_config.config) + + hs = MockHomeserver(self.hs_config) + + with make_conn(db_config, engine, "portdb") as db_conn: + engine.check_database( + db_conn, allow_outdated_version=allow_outdated_version + ) + prepare_database(db_conn, engine, config=self.hs_config) + store = Store(DatabasePool(hs, db_config, engine), db_conn, hs) + db_conn.commit() + + return store + + async def run_background_updates_on_postgres(self): + # Manually apply all background updates on the PostgreSQL database. + postgres_ready = ( + await self.postgres_store.db_pool.updates.has_completed_background_updates() + ) + + if not postgres_ready: + # Only say that we're running background updates when there are background + # updates to run. + self.progress.set_state("Running background updates on PostgreSQL") + + while not postgres_ready: + await self.postgres_store.db_pool.updates.do_next_background_update(100) + postgres_ready = await ( + self.postgres_store.db_pool.updates.has_completed_background_updates() + ) + + async def run(self): + """Ports the SQLite database to a PostgreSQL database. + + When a fatal error is met, its message is assigned to the global "end_error" + variable. When this error comes with a stacktrace, its exec_info is assigned to + the global "end_error_exec_info" variable. + """ + global end_error + + try: + # we allow people to port away from outdated versions of sqlite. + self.sqlite_store = self.build_db_store( + DatabaseConnectionConfig("master-sqlite", self.sqlite_config), + allow_outdated_version=True, + ) + + # Check if all background updates are done, abort if not. + updates_complete = ( + await self.sqlite_store.db_pool.updates.has_completed_background_updates() + ) + if not updates_complete: + end_error = ( + "Pending background updates exist in the SQLite3 database." + " Please start Synapse again and wait until every update has finished" + " before running this script.\n" + ) + return + + self.postgres_store = self.build_db_store( + self.hs_config.database.get_single_database() + ) + + await self.run_background_updates_on_postgres() + + self.progress.set_state("Creating port tables") + + def create_port_table(txn): + txn.execute( + "CREATE TABLE IF NOT EXISTS port_from_sqlite3 (" + " table_name varchar(100) NOT NULL UNIQUE," + " forward_rowid bigint NOT NULL," + " backward_rowid bigint NOT NULL" + ")" + ) + + # The old port script created a table with just a "rowid" column. + # We want people to be able to rerun this script from an old port + # so that they can pick up any missing events that were not + # ported across. + def alter_table(txn): + txn.execute( + "ALTER TABLE IF EXISTS port_from_sqlite3" + " RENAME rowid TO forward_rowid" + ) + txn.execute( + "ALTER TABLE IF EXISTS port_from_sqlite3" + " ADD backward_rowid bigint NOT NULL DEFAULT 0" + ) + + try: + await self.postgres_store.db_pool.runInteraction( + "alter_table", alter_table + ) + except Exception: + # On Error Resume Next + pass + + await self.postgres_store.db_pool.runInteraction( + "create_port_table", create_port_table + ) + + # Step 2. Set up sequences + # + # We do this before porting the tables so that event if we fail half + # way through the postgres DB always have sequences that are greater + # than their respective tables. If we don't then creating the + # `DataStore` object will fail due to the inconsistency. + self.progress.set_state("Setting up sequence generators") + await self._setup_state_group_id_seq() + await self._setup_user_id_seq() + await self._setup_events_stream_seqs() + await self._setup_sequence( + "device_inbox_sequence", ("device_inbox", "device_federation_outbox") + ) + await self._setup_sequence( + "account_data_sequence", + ("room_account_data", "room_tags_revisions", "account_data"), + ) + await self._setup_sequence("receipts_sequence", ("receipts_linearized",)) + await self._setup_sequence("presence_stream_sequence", ("presence_stream",)) + await self._setup_auth_chain_sequence() + + # Step 3. Get tables. + self.progress.set_state("Fetching tables") + sqlite_tables = await self.sqlite_store.db_pool.simple_select_onecol( + table="sqlite_master", keyvalues={"type": "table"}, retcol="name" + ) + + postgres_tables = await self.postgres_store.db_pool.simple_select_onecol( + table="information_schema.tables", + keyvalues={}, + retcol="distinct table_name", + ) + + tables = set(sqlite_tables) & set(postgres_tables) + logger.info("Found %d tables", len(tables)) + + # Step 4. Figure out what still needs copying + self.progress.set_state("Checking on port progress") + setup_res = await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background(self.setup_table, table) + for table in tables + if table not in ["schema_version", "applied_schema_deltas"] + and not table.startswith("sqlite_") + ], + consumeErrors=True, + ) + ) + # Map from table name to args passed to `handle_table`, i.e. a tuple + # of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`. + tables_to_port_info_map = {r[0]: r[1:] for r in setup_res} + + # Step 5. Do the copying. + # + # This is slightly convoluted as we need to ensure tables are ported + # in the correct order due to foreign key constraints. + self.progress.set_state("Copying to postgres") + + constraints = await self.get_table_constraints() + tables_ported = set() # type: Set[str] + + while tables_to_port_info_map: + # Pulls out all tables that are still to be ported and which + # only depend on tables that are already ported (if any). + tables_to_port = [ + table + for table in tables_to_port_info_map + if not constraints.get(table, set()) - tables_ported + ] + + await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.handle_table, + table, + *tables_to_port_info_map.pop(table), + ) + for table in tables_to_port + ], + consumeErrors=True, + ) + ) + + tables_ported.update(tables_to_port) + + self.progress.done() + except Exception as e: + global end_error_exec_info + end_error = str(e) + end_error_exec_info = sys.exc_info() + logger.exception("") + finally: + reactor.stop() + + def _convert_rows(self, table, headers, rows): + bool_col_names = BOOLEAN_COLUMNS.get(table, []) + + bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names] + + class BadValueException(Exception): + pass + + def conv(j, col): + if j in bool_cols: + return bool(col) + if isinstance(col, bytes): + return bytearray(col) + elif isinstance(col, str) and "\0" in col: + logger.warning( + "DROPPING ROW: NUL value in table %s col %s: %r", + table, + headers[j], + col, + ) + raise BadValueException() + return col + + outrows = [] + for row in rows: + try: + outrows.append( + tuple(conv(j, col) for j, col in enumerate(row) if j > 0) + ) + except BadValueException: + pass + + return outrows + + async def _setup_sent_transactions(self): + # Only save things from the last day + yesterday = int(time.time() * 1000) - 86400000 + + # And save the max transaction id from each destination + select = ( + "SELECT rowid, * FROM sent_transactions WHERE rowid IN (" + "SELECT max(rowid) FROM sent_transactions" + " GROUP BY destination" + ")" + ) + + def r(txn): + txn.execute(select) + rows = txn.fetchall() + headers = [column[0] for column in txn.description] + + ts_ind = headers.index("ts") + + return headers, [r for r in rows if r[ts_ind] < yesterday] + + headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r) + + rows = self._convert_rows("sent_transactions", headers, rows) + + inserted_rows = len(rows) + if inserted_rows: + max_inserted_rowid = max(r[0] for r in rows) + + def insert(txn): + self.postgres_store.insert_many_txn( + txn, "sent_transactions", headers[1:], rows + ) + + await self.postgres_store.execute(insert) + else: + max_inserted_rowid = 0 + + def get_start_id(txn): + txn.execute( + "SELECT rowid FROM sent_transactions WHERE ts >= ?" + " ORDER BY rowid ASC LIMIT 1", + (yesterday,), + ) + + rows = txn.fetchall() + if rows: + return rows[0][0] + else: + return 1 + + next_chunk = await self.sqlite_store.execute(get_start_id) + next_chunk = max(max_inserted_rowid + 1, next_chunk) + + await self.postgres_store.db_pool.simple_insert( + table="port_from_sqlite3", + values={ + "table_name": "sent_transactions", + "forward_rowid": next_chunk, + "backward_rowid": 0, + }, + ) + + def get_sent_table_size(txn): + txn.execute( + "SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,) + ) + (size,) = txn.fetchone() + return int(size) + + remaining_count = await self.sqlite_store.execute(get_sent_table_size) + + total_count = remaining_count + inserted_rows + + return next_chunk, inserted_rows, total_count + + async def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk): + frows = await self.sqlite_store.execute_sql( + "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk + ) + + brows = await self.sqlite_store.execute_sql( + "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk + ) + + return frows[0][0] + brows[0][0] + + async def _get_already_ported_count(self, table): + rows = await self.postgres_store.execute_sql( + "SELECT count(*) FROM %s" % (table,) + ) + + return rows[0][0] + + async def _get_total_count_to_port(self, table, forward_chunk, backward_chunk): + remaining, done = await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self._get_remaining_count_to_port, + table, + forward_chunk, + backward_chunk, + ), + run_in_background(self._get_already_ported_count, table), + ], + ) + ) + + remaining = int(remaining) if remaining else 0 + done = int(done) if done else 0 + + return done, remaining + done + + async def _setup_state_group_id_seq(self) -> None: + curr_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True + ) + + if not curr_id: + return + + def r(txn): + next_id = curr_id + 1 + txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,)) + + await self.postgres_store.db_pool.runInteraction("setup_state_group_id_seq", r) + + async def _setup_user_id_seq(self) -> None: + curr_id = await self.sqlite_store.db_pool.runInteraction( + "setup_user_id_seq", find_max_generated_user_id_localpart + ) + + def r(txn): + next_id = curr_id + 1 + txn.execute("ALTER SEQUENCE user_id_seq RESTART WITH %s", (next_id,)) + + await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r) + + async def _setup_events_stream_seqs(self) -> None: + """Set the event stream sequences to the correct values.""" + + # We get called before we've ported the events table, so we need to + # fetch the current positions from the SQLite store. + curr_forward_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="events", keyvalues={}, retcol="MAX(stream_ordering)", allow_none=True + ) + + curr_backward_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="events", + keyvalues={}, + retcol="MAX(-MIN(stream_ordering), 1)", + allow_none=True, + ) + + def _setup_events_stream_seqs_set_pos(txn): + if curr_forward_id: + txn.execute( + "ALTER SEQUENCE events_stream_seq RESTART WITH %s", + (curr_forward_id + 1,), + ) + + if curr_backward_id: + txn.execute( + "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", + (curr_backward_id + 1,), + ) + + await self.postgres_store.db_pool.runInteraction( + "_setup_events_stream_seqs", + _setup_events_stream_seqs_set_pos, + ) + + async def _setup_sequence( + self, sequence_name: str, stream_id_tables: Iterable[str] + ) -> None: + """Set a sequence to the correct value.""" + current_stream_ids = [] + for stream_id_table in stream_id_tables: + max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table=stream_id_table, + keyvalues={}, + retcol="COALESCE(MAX(stream_id), 1)", + allow_none=True, + ) + current_stream_ids.append(max_stream_id) + + next_id = max(current_stream_ids) + 1 + + def r(txn): + sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,) + txn.execute(sql + " %s", (next_id,)) + + await self.postgres_store.db_pool.runInteraction( + "_setup_%s" % (sequence_name,), r + ) + + async def _setup_auth_chain_sequence(self) -> None: + curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="event_auth_chains", + keyvalues={}, + retcol="MAX(chain_id)", + allow_none=True, + ) + + def r(txn): + txn.execute( + "ALTER SEQUENCE event_auth_chain_id RESTART WITH %s", + (curr_chain_id + 1,), + ) + + if curr_chain_id is not None: + await self.postgres_store.db_pool.runInteraction( + "_setup_event_auth_chain_id", + r, + ) + + +############################################## +# The following is simply UI stuff +############################################## + + +class Progress(object): + """Used to report progress of the port""" + + def __init__(self): + self.tables = {} + + self.start_time = int(time.time()) + + def add_table(self, table, cur, size): + self.tables[table] = { + "start": cur, + "num_done": cur, + "total": size, + "perc": int(cur * 100 / size), + } + + def update(self, table, num_done): + data = self.tables[table] + data["num_done"] = num_done + data["perc"] = int(num_done * 100 / data["total"]) + + def done(self): + pass + + +class CursesProgress(Progress): + """Reports progress to a curses window""" + + def __init__(self, stdscr): + self.stdscr = stdscr + + curses.use_default_colors() + curses.curs_set(0) + + curses.init_pair(1, curses.COLOR_RED, -1) + curses.init_pair(2, curses.COLOR_GREEN, -1) + + self.last_update = 0 + + self.finished = False + + self.total_processed = 0 + self.total_remaining = 0 + + super(CursesProgress, self).__init__() + + def update(self, table, num_done): + super(CursesProgress, self).update(table, num_done) + + self.total_processed = 0 + self.total_remaining = 0 + for data in self.tables.values(): + self.total_processed += data["num_done"] - data["start"] + self.total_remaining += data["total"] - data["num_done"] + + self.render() + + def render(self, force=False): + now = time.time() + + if not force and now - self.last_update < 0.2: + # reactor.callLater(1, self.render) + return + + self.stdscr.clear() + + rows, cols = self.stdscr.getmaxyx() + + duration = int(now) - int(self.start_time) + + minutes, seconds = divmod(duration, 60) + duration_str = "%02dm %02ds" % (minutes, seconds) + + if self.finished: + status = "Time spent: %s (Done!)" % (duration_str,) + else: + + if self.total_processed > 0: + left = float(self.total_remaining) / self.total_processed + + est_remaining = (int(now) - self.start_time) * left + est_remaining_str = "%02dm %02ds remaining" % divmod(est_remaining, 60) + else: + est_remaining_str = "Unknown" + status = "Time spent: %s (est. remaining: %s)" % ( + duration_str, + est_remaining_str, + ) + + self.stdscr.addstr(0, 0, status, curses.A_BOLD) + + max_len = max(len(t) for t in self.tables.keys()) + + left_margin = 5 + middle_space = 1 + + items = self.tables.items() + items = sorted(items, key=lambda i: (i[1]["perc"], i[0])) + + for i, (table, data) in enumerate(items): + if i + 2 >= rows: + break + + perc = data["perc"] + + color = curses.color_pair(2) if perc == 100 else curses.color_pair(1) + + self.stdscr.addstr( + i + 2, left_margin + max_len - len(table), table, curses.A_BOLD | color + ) + + size = 20 + + progress = "[%s%s]" % ( + "#" * int(perc * size / 100), + " " * (size - int(perc * size / 100)), + ) + + self.stdscr.addstr( + i + 2, + left_margin + max_len + middle_space, + "%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]), + ) + + if self.finished: + self.stdscr.addstr(rows - 1, 0, "Press any key to exit...") + + self.stdscr.refresh() + self.last_update = time.time() + + def done(self): + self.finished = True + self.render(True) + self.stdscr.getch() + + def set_state(self, state): + self.stdscr.clear() + self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD) + self.stdscr.refresh() + + +class TerminalProgress(Progress): + """Just prints progress to the terminal""" + + def update(self, table, num_done): + super(TerminalProgress, self).update(table, num_done) + + data = self.tables[table] + + print( + "%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"]) + ) + + def set_state(self, state): + print(state + "...") + + +############################################## +############################################## + + +def main(): + parser = argparse.ArgumentParser( + description="A script to port an existing synapse SQLite database to" + " a new PostgreSQL database." + ) + parser.add_argument("-v", action="store_true") + parser.add_argument( + "--sqlite-database", + required=True, + help="The snapshot of the SQLite database file. This must not be" + " currently used by a running synapse server", + ) + parser.add_argument( + "--postgres-config", + type=argparse.FileType("r"), + required=True, + help="The database config file for the PostgreSQL database", + ) + parser.add_argument( + "--curses", action="store_true", help="display a curses based progress UI" + ) + + parser.add_argument( + "--batch-size", + type=int, + default=1000, + help="The number of rows to select from the SQLite table each" + " iteration [default=1000]", + ) + + args = parser.parse_args() + + logging_config = { + "level": logging.DEBUG if args.v else logging.INFO, + "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", + } + + if args.curses: + logging_config["filename"] = "port-synapse.log" + + logging.basicConfig(**logging_config) + + sqlite_config = { + "name": "sqlite3", + "args": { + "database": args.sqlite_database, + "cp_min": 1, + "cp_max": 1, + "check_same_thread": False, + }, + } + + hs_config = yaml.safe_load(args.postgres_config) + + if "database" not in hs_config: + sys.stderr.write("The configuration file must have a 'database' section.\n") + sys.exit(4) + + postgres_config = hs_config["database"] + + if "name" not in postgres_config: + sys.stderr.write("Malformed database config: no 'name'\n") + sys.exit(2) + if postgres_config["name"] != "psycopg2": + sys.stderr.write("Database must use the 'psycopg2' connector.\n") + sys.exit(3) + + config = HomeServerConfig() + config.parse_config_dict(hs_config, "", "") + + def start(stdscr=None): + if stdscr: + progress = CursesProgress(stdscr) + else: + progress = TerminalProgress() + + porter = Porter( + sqlite_config=sqlite_config, + progress=progress, + batch_size=args.batch_size, + hs_config=config, + ) + + @defer.inlineCallbacks + def run(): + with LoggingContext("synapse_port_db_run"): + yield defer.ensureDeferred(porter.run()) + + reactor.callWhenRunning(run) + + reactor.run() + + if args.curses: + curses.wrapper(start) + else: + start() + + if end_error: + if end_error_exec_info: + exc_type, exc_value, exc_traceback = end_error_exec_info + traceback.print_exception(exc_type, exc_value, exc_traceback) + + sys.stderr.write(end_error) + + sys.exit(5) + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py new file mode 100755 index 0000000000..f43676afaa --- /dev/null +++ b/synapse/_scripts/update_synapse_database.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +import sys + +import yaml +from matrix_common.versionstring import get_distribution_version_string + +from twisted.internet import defer, reactor + +from synapse.config.homeserver import HomeServerConfig +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.server import HomeServer +from synapse.storage import DataStore + +logger = logging.getLogger("update_database") + + +class MockHomeserver(HomeServer): + DATASTORE_CLASS = DataStore + + def __init__(self, config, **kwargs): + super(MockHomeserver, self).__init__( + config.server.server_name, reactor=reactor, config=config, **kwargs + ) + + self.version_string = "Synapse/" + get_distribution_version_string( + "matrix-synapse" + ) + + +def run_background_updates(hs): + store = hs.get_datastores().main + + async def run_background_updates(): + await store.db_pool.updates.run_background_updates(sleep=False) + # Stop the reactor to exit the script once every background update is run. + reactor.stop() + + def run(): + # Apply all background updates on the database. + defer.ensureDeferred( + run_as_background_process("background_updates", run_background_updates) + ) + + reactor.callWhenRunning(run) + + reactor.run() + + +def main(): + parser = argparse.ArgumentParser( + description=( + "Updates a synapse database to the latest schema and optionally runs background updates" + " on it." + ) + ) + parser.add_argument("-v", action="store_true") + parser.add_argument( + "--database-config", + type=argparse.FileType("r"), + required=True, + help="Synapse configuration file, giving the details of the database to be updated", + ) + parser.add_argument( + "--run-background-updates", + action="store_true", + required=False, + help="run background updates after upgrading the database schema", + ) + + args = parser.parse_args() + + logging_config = { + "level": logging.DEBUG if args.v else logging.INFO, + "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", + } + + logging.basicConfig(**logging_config) + + # Load, process and sanity-check the config. + hs_config = yaml.safe_load(args.database_config) + + if "database" not in hs_config: + sys.stderr.write("The configuration file must have a 'database' section.\n") + sys.exit(4) + + config = HomeServerConfig() + config.parse_config_dict(hs_config, "", "") + + # Instantiate and initialise the homeserver object. + hs = MockHomeserver(config) + + # Setup instantiates the store within the homeserver object and updates the + # DB. + hs.setup() + + if args.run_background_updates: + run_background_updates(hs) + + +if __name__ == "__main__": + main() diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 1265738dc1..8e19e2fc26 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -383,7 +383,7 @@ class RootConfig: Build a default configuration file This is used when the user explicitly asks us to generate a config file - (eg with --generate_config). + (eg with --generate-config). Args: config_dir_path: The path where the config files are kept. Used to diff --git a/tox.ini b/tox.ini index 04b972e2c5..8d6aa7580b 100644 --- a/tox.ini +++ b/tox.ini @@ -38,15 +38,7 @@ lint_targets = setup.py synapse tests - scripts # annoyingly, black doesn't find these so we have to list them - scripts/export_signing_key - scripts/generate_config - scripts/generate_log_config - scripts/hash_password - scripts/register_new_matrix_user - scripts/synapse_port_db - scripts/update_synapse_database scripts-dev scripts-dev/build_debian_packages scripts-dev/sign_json -- cgit 1.4.1 From 1103c5fe8a795eafc4aeedc547faa1b68d5a12f5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Mar 2022 08:18:51 -0500 Subject: Check if instances are lists, not sequences. (#12128) As a str is a sequence, the checks were not granular enough and would allow lists or strings, when only lists were valid. --- changelog.d/12128.misc | 1 + synapse/federation/federation_client.py | 8 ++++---- synapse/handlers/room_summary.py | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/12128.misc diff --git a/changelog.d/12128.misc b/changelog.d/12128.misc new file mode 100644 index 0000000000..0570a8e327 --- /dev/null +++ b/changelog.d/12128.misc @@ -0,0 +1 @@ +Fix data validation to compare to lists, not sequences. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 64e595e748..467275b98c 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1428,7 +1428,7 @@ class FederationClient(FederationBase): # Validate children_state of the room. children_state = room.pop("children_state", []) - if not isinstance(children_state, Sequence): + if not isinstance(children_state, list): raise InvalidResponseError("'room.children_state' must be a list") if any(not isinstance(e, dict) for e in children_state): raise InvalidResponseError("Invalid event in 'children_state' list") @@ -1440,14 +1440,14 @@ class FederationClient(FederationBase): # Validate the children rooms. children = res.get("children", []) - if not isinstance(children, Sequence): + if not isinstance(children, list): raise InvalidResponseError("'children' must be a list") if any(not isinstance(r, dict) for r in children): raise InvalidResponseError("Invalid room in 'children' list") # Validate the inaccessible children. inaccessible_children = res.get("inaccessible_children", []) - if not isinstance(inaccessible_children, Sequence): + if not isinstance(inaccessible_children, list): raise InvalidResponseError("'inaccessible_children' must be a list") if any(not isinstance(r, str) for r in inaccessible_children): raise InvalidResponseError( @@ -1630,7 +1630,7 @@ def _validate_hierarchy_event(d: JsonDict) -> None: raise ValueError("Invalid event: 'content' must be a dict") via = content.get("via") - if not isinstance(via, Sequence): + if not isinstance(via, list): raise ValueError("Invalid event: 'via' must be a list") if any(not isinstance(v, str) for v in via): raise ValueError("Invalid event: 'via' must be a list of strings") diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 55c2cbdba8..3979cbba71 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -857,7 +857,7 @@ class _RoomEntry: def _has_valid_via(e: EventBase) -> bool: via = e.content.get("via") - if not via or not isinstance(via, Sequence): + if not via or not isinstance(via, list): return False for v in via: if not isinstance(v, str): -- cgit 1.4.1 From b4461e7d8ab6cfe150f39f62aa68f7f13ef97a24 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 2 Mar 2022 16:11:16 +0000 Subject: Enable complexity checking in complexity checking docs example (#11998) --- changelog.d/11998.doc | 1 + .../running_synapse_on_single_board_computers.md | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 9 deletions(-) create mode 100644 changelog.d/11998.doc diff --git a/changelog.d/11998.doc b/changelog.d/11998.doc new file mode 100644 index 0000000000..33ab7b7880 --- /dev/null +++ b/changelog.d/11998.doc @@ -0,0 +1 @@ +Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. \ No newline at end of file diff --git a/docs/other/running_synapse_on_single_board_computers.md b/docs/other/running_synapse_on_single_board_computers.md index ea14afa8b2..dcf96f0056 100644 --- a/docs/other/running_synapse_on_single_board_computers.md +++ b/docs/other/running_synapse_on_single_board_computers.md @@ -31,28 +31,29 @@ Anything that requires modifying the device list [#7721](https://github.com/matr Put the below in a new file at /etc/matrix-synapse/conf.d/sbc.yaml to override the defaults in homeserver.yaml. ``` -# Set to false to disable presence tracking on this homeserver. +# Disable presence tracking, which is currently fairly resource intensive +# More info: https://github.com/matrix-org/synapse/issues/9478 use_presence: false -# When this is enabled, the room "complexity" will be checked before a user -# joins a new remote room. If it is above the complexity limit, the server will -# disallow joining, or will instantly leave. +# Set a small complexity limit, preventing users from joining large rooms +# which may be resource-intensive to remain a part of. +# +# Note that this will not prevent users from joining smaller rooms that +# eventually become complex. limit_remote_rooms: - # Uncomment to enable room complexity checking. - #enabled: true + enabled: true complexity: 3.0 # Database configuration database: + # Use postgres for the best performance name: psycopg2 args: user: matrix-synapse - # Generate a long, secure one with a password manager + # Generate a long, secure password using a password manager password: hunter2 database: matrix-synapse host: localhost - cp_min: 5 - cp_max: 10 ``` Currently the complexity is measured by [current_state_events / 500](https://github.com/matrix-org/synapse/blob/v1.20.1/synapse/storage/databases/main/events_worker.py#L986). You can find join times and your most complex rooms like this: -- cgit 1.4.1 From 2ffaf30803f93273a4d8a65c9e6c3110c8433488 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 2 Mar 2022 17:34:14 +0100 Subject: Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint --- changelog.d/12108.misc | 1 + mypy.ini | 6 - tests/rest/client/test_account.py | 290 +++++++++++++++------------- tests/rest/client/test_filter.py | 29 +-- tests/rest/client/test_relations.py | 4 +- tests/rest/client/test_report_event.py | 25 ++- tests/rest/client/test_rooms.py | 271 +++++++++++++------------- tests/rest/client/test_third_party_rules.py | 108 +++++++---- tests/rest/client/test_typing.py | 41 ++-- 9 files changed, 423 insertions(+), 352 deletions(-) create mode 100644 changelog.d/12108.misc diff --git a/changelog.d/12108.misc b/changelog.d/12108.misc new file mode 100644 index 0000000000..0360dbd61e --- /dev/null +++ b/changelog.d/12108.misc @@ -0,0 +1 @@ +Add type hints to `tests/rest/client`. diff --git a/mypy.ini b/mypy.ini index 6b1e995e64..23ca4eaa5a 100644 --- a/mypy.ini +++ b/mypy.ini @@ -78,13 +78,7 @@ exclude = (?x) |tests/push/test_http.py |tests/push/test_presentable_names.py |tests/push/test_push_rule_evaluator.py - |tests/rest/client/test_account.py - |tests/rest/client/test_filter.py - |tests/rest/client/test_report_event.py - |tests/rest/client/test_rooms.py - |tests/rest/client/test_third_party_rules.py |tests/rest/client/test_transactions.py - |tests/rest/client/test_typing.py |tests/rest/key/v2/test_remote_key_resource.py |tests/rest/media/v1/test_base.py |tests/rest/media/v1/test_media_storage.py diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 6c4462e74a..def836054d 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -15,11 +15,12 @@ import json import os import re from email.parser import Parser -from typing import Dict, List, Optional +from typing import Any, Dict, List, Optional, Union from unittest.mock import Mock import pkg_resources +from twisted.internet.interfaces import IReactorTCP from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -30,6 +31,7 @@ from synapse.rest import admin from synapse.rest.client import account, login, register, room from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource from synapse.server import HomeServer +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest @@ -46,7 +48,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() # Email config. @@ -67,20 +69,27 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): hs = self.setup_test_homeserver(config=config) async def sendmail( - reactor, smtphost, smtpport, from_addr, to_addrs, msg, **kwargs - ): - self.email_attempts.append(msg) - - self.email_attempts = [] + reactor: IReactorTCP, + smtphost: str, + smtpport: int, + from_addr: str, + to_addr: str, + msg_bytes: bytes, + *args: Any, + **kwargs: Any, + ) -> None: + self.email_attempts.append(msg_bytes) + + self.email_attempts: List[bytes] = [] hs.get_send_email_handler()._sendmail = sendmail return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.submit_token_resource = PasswordResetSubmitTokenResource(hs) - def test_basic_password_reset(self): + def test_basic_password_reset(self) -> None: """Test basic password reset flow""" old_password = "monkey" new_password = "kangeroo" @@ -118,7 +127,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): self.attempt_wrong_password_login("kermit", old_password) @override_config({"rc_3pid_validation": {"burst_count": 3}}) - def test_ratelimit_by_email(self): + def test_ratelimit_by_email(self) -> None: """Test that we ratelimit /requestToken for the same email.""" old_password = "monkey" new_password = "kangeroo" @@ -139,7 +148,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): ) ) - def reset(ip): + def reset(ip: str) -> None: client_secret = "foobar" session_id = self._request_token(email, client_secret, ip) @@ -166,7 +175,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): self.assertEqual(cm.exception.code, 429) - def test_basic_password_reset_canonicalise_email(self): + def test_basic_password_reset_canonicalise_email(self) -> None: """Test basic password reset flow Request password reset with different spelling """ @@ -206,7 +215,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): # Assert we can't log in with the old password self.attempt_wrong_password_login("kermit", old_password) - def test_cant_reset_password_without_clicking_link(self): + def test_cant_reset_password_without_clicking_link(self) -> None: """Test that we do actually need to click the link in the email""" old_password = "monkey" new_password = "kangeroo" @@ -241,7 +250,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): # Assert we can't log in with the new password self.attempt_wrong_password_login("kermit", new_password) - def test_no_valid_token(self): + def test_no_valid_token(self) -> None: """Test that we do actually need to request a token and can't just make a session up. """ @@ -277,7 +286,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): self.attempt_wrong_password_login("kermit", new_password) @unittest.override_config({"request_token_inhibit_3pid_errors": True}) - def test_password_reset_bad_email_inhibit_error(self): + def test_password_reset_bad_email_inhibit_error(self) -> None: """Test that triggering a password reset with an email address that isn't bound to an account doesn't leak the lack of binding for that address if configured that way. @@ -292,7 +301,12 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): self.assertIsNotNone(session_id) - def _request_token(self, email, client_secret, ip="127.0.0.1"): + def _request_token( + self, + email: str, + client_secret: str, + ip: str = "127.0.0.1", + ) -> str: channel = self.make_request( "POST", b"account/password/email/requestToken", @@ -309,7 +323,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): return channel.json_body["sid"] - def _validate_token(self, link): + def _validate_token(self, link: str) -> None: # Remove the host path = link.replace("https://example.com", "") @@ -339,7 +353,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): ) self.assertEqual(200, channel.code, channel.result) - def _get_link_from_email(self): + def _get_link_from_email(self) -> str: assert self.email_attempts, "No emails have been sent" raw_msg = self.email_attempts[-1].decode("UTF-8") @@ -354,14 +368,19 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): if not text: self.fail("Could not find text portion of email to parse") + assert text is not None match = re.search(r"https://example.com\S+", text) assert match, "Could not find link in email" return match.group(0) def _reset_password( - self, new_password, session_id, client_secret, expected_code=200 - ): + self, + new_password: str, + session_id: str, + client_secret: str, + expected_code: int = 200, + ) -> None: channel = self.make_request( "POST", b"account/password", @@ -388,11 +407,11 @@ class DeactivateTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver() return self.hs - def test_deactivate_account(self): + def test_deactivate_account(self) -> None: user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") @@ -407,7 +426,7 @@ class DeactivateTestCase(unittest.HomeserverTestCase): channel = self.make_request("GET", "account/whoami", access_token=tok) self.assertEqual(channel.code, 401) - def test_pending_invites(self): + def test_pending_invites(self) -> None: """Tests that deactivating a user rejects every pending invite for them.""" store = self.hs.get_datastores().main @@ -448,7 +467,7 @@ class DeactivateTestCase(unittest.HomeserverTestCase): self.assertEqual(len(memberships), 1, memberships) self.assertEqual(memberships[0].room_id, room_id, memberships) - def deactivate(self, user_id, tok): + def deactivate(self, user_id: str, tok: str) -> None: request_data = json.dumps( { "auth": { @@ -474,12 +493,12 @@ class WhoamiTestCase(unittest.HomeserverTestCase): register.register_servlets, ] - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["allow_guest_access"] = True return config - def test_GET_whoami(self): + def test_GET_whoami(self) -> None: device_id = "wouldgohere" user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test", device_id=device_id) @@ -496,7 +515,7 @@ class WhoamiTestCase(unittest.HomeserverTestCase): }, ) - def test_GET_whoami_guests(self): + def test_GET_whoami_guests(self) -> None: channel = self.make_request( b"POST", b"/_matrix/client/r0/register?kind=guest", b"{}" ) @@ -516,7 +535,7 @@ class WhoamiTestCase(unittest.HomeserverTestCase): }, ) - def test_GET_whoami_appservices(self): + def test_GET_whoami_appservices(self) -> None: user_id = "@as:test" as_token = "i_am_an_app_service" @@ -541,7 +560,7 @@ class WhoamiTestCase(unittest.HomeserverTestCase): ) self.assertFalse(hasattr(whoami, "device_id")) - def _whoami(self, tok): + def _whoami(self, tok: str) -> JsonDict: channel = self.make_request("GET", "account/whoami", {}, access_token=tok) self.assertEqual(channel.code, 200) return channel.json_body @@ -555,7 +574,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): synapse.rest.admin.register_servlets_for_client_rest_resource, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() # Email config. @@ -576,16 +595,23 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): self.hs = self.setup_test_homeserver(config=config) async def sendmail( - reactor, smtphost, smtpport, from_addr, to_addrs, msg, **kwargs - ): - self.email_attempts.append(msg) - - self.email_attempts = [] + reactor: IReactorTCP, + smtphost: str, + smtpport: int, + from_addr: str, + to_addr: str, + msg_bytes: bytes, + *args: Any, + **kwargs: Any, + ) -> None: + self.email_attempts.append(msg_bytes) + + self.email_attempts: List[bytes] = [] self.hs.get_send_email_handler()._sendmail = sendmail return self.hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.user_id = self.register_user("kermit", "test") @@ -593,83 +619,73 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): self.email = "test@example.com" self.url_3pid = b"account/3pid" - def test_add_valid_email(self): - self.get_success(self._add_email(self.email, self.email)) + def test_add_valid_email(self) -> None: + self._add_email(self.email, self.email) - def test_add_valid_email_second_time(self): - self.get_success(self._add_email(self.email, self.email)) - self.get_success( - self._request_token_invalid_email( - self.email, - expected_errcode=Codes.THREEPID_IN_USE, - expected_error="Email is already in use", - ) + def test_add_valid_email_second_time(self) -> None: + self._add_email(self.email, self.email) + self._request_token_invalid_email( + self.email, + expected_errcode=Codes.THREEPID_IN_USE, + expected_error="Email is already in use", ) - def test_add_valid_email_second_time_canonicalise(self): - self.get_success(self._add_email(self.email, self.email)) - self.get_success( - self._request_token_invalid_email( - "TEST@EXAMPLE.COM", - expected_errcode=Codes.THREEPID_IN_USE, - expected_error="Email is already in use", - ) + def test_add_valid_email_second_time_canonicalise(self) -> None: + self._add_email(self.email, self.email) + self._request_token_invalid_email( + "TEST@EXAMPLE.COM", + expected_errcode=Codes.THREEPID_IN_USE, + expected_error="Email is already in use", ) - def test_add_email_no_at(self): - self.get_success( - self._request_token_invalid_email( - "address-without-at.bar", - expected_errcode=Codes.UNKNOWN, - expected_error="Unable to parse email address", - ) + def test_add_email_no_at(self) -> None: + self._request_token_invalid_email( + "address-without-at.bar", + expected_errcode=Codes.UNKNOWN, + expected_error="Unable to parse email address", ) - def test_add_email_two_at(self): - self.get_success( - self._request_token_invalid_email( - "foo@foo@test.bar", - expected_errcode=Codes.UNKNOWN, - expected_error="Unable to parse email address", - ) + def test_add_email_two_at(self) -> None: + self._request_token_invalid_email( + "foo@foo@test.bar", + expected_errcode=Codes.UNKNOWN, + expected_error="Unable to parse email address", ) - def test_add_email_bad_format(self): - self.get_success( - self._request_token_invalid_email( - "user@bad.example.net@good.example.com", - expected_errcode=Codes.UNKNOWN, - expected_error="Unable to parse email address", - ) + def test_add_email_bad_format(self) -> None: + self._request_token_invalid_email( + "user@bad.example.net@good.example.com", + expected_errcode=Codes.UNKNOWN, + expected_error="Unable to parse email address", ) - def test_add_email_domain_to_lower(self): - self.get_success(self._add_email("foo@TEST.BAR", "foo@test.bar")) + def test_add_email_domain_to_lower(self) -> None: + self._add_email("foo@TEST.BAR", "foo@test.bar") - def test_add_email_domain_with_umlaut(self): - self.get_success(self._add_email("foo@Öumlaut.com", "foo@öumlaut.com")) + def test_add_email_domain_with_umlaut(self) -> None: + self._add_email("foo@Öumlaut.com", "foo@öumlaut.com") - def test_add_email_address_casefold(self): - self.get_success(self._add_email("Strauß@Example.com", "strauss@example.com")) + def test_add_email_address_casefold(self) -> None: + self._add_email("Strauß@Example.com", "strauss@example.com") - def test_address_trim(self): - self.get_success(self._add_email(" foo@test.bar ", "foo@test.bar")) + def test_address_trim(self) -> None: + self._add_email(" foo@test.bar ", "foo@test.bar") @override_config({"rc_3pid_validation": {"burst_count": 3}}) - def test_ratelimit_by_ip(self): + def test_ratelimit_by_ip(self) -> None: """Tests that adding emails is ratelimited by IP""" # We expect to be able to set three emails before getting ratelimited. - self.get_success(self._add_email("foo1@test.bar", "foo1@test.bar")) - self.get_success(self._add_email("foo2@test.bar", "foo2@test.bar")) - self.get_success(self._add_email("foo3@test.bar", "foo3@test.bar")) + self._add_email("foo1@test.bar", "foo1@test.bar") + self._add_email("foo2@test.bar", "foo2@test.bar") + self._add_email("foo3@test.bar", "foo3@test.bar") with self.assertRaises(HttpResponseException) as cm: - self.get_success(self._add_email("foo4@test.bar", "foo4@test.bar")) + self._add_email("foo4@test.bar", "foo4@test.bar") self.assertEqual(cm.exception.code, 429) - def test_add_email_if_disabled(self): + def test_add_email_if_disabled(self) -> None: """Test adding email to profile when doing so is disallowed""" self.hs.config.registration.enable_3pid_changes = False @@ -695,7 +711,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): }, access_token=self.user_id_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Get user @@ -705,10 +721,10 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) - def test_delete_email(self): + def test_delete_email(self) -> None: """Test deleting an email from profile""" # Add a threepid self.get_success( @@ -727,7 +743,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): {"medium": "email", "address": self.email}, access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) # Get user channel = self.make_request( @@ -736,10 +752,10 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) - def test_delete_email_if_disabled(self): + def test_delete_email_if_disabled(self) -> None: """Test deleting an email from profile when disallowed""" self.hs.config.registration.enable_3pid_changes = False @@ -761,7 +777,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Get user @@ -771,11 +787,11 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual(self.email, channel.json_body["threepids"][0]["address"]) - def test_cant_add_email_without_clicking_link(self): + def test_cant_add_email_without_clicking_link(self) -> None: """Test that we do actually need to click the link in the email""" client_secret = "foobar" session_id = self._request_token(self.email, client_secret) @@ -797,7 +813,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): }, access_token=self.user_id_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) # Get user @@ -807,10 +823,10 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) - def test_no_valid_token(self): + def test_no_valid_token(self) -> None: """Test that we do actually need to request a token and can't just make a session up. """ @@ -832,7 +848,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): }, access_token=self.user_id_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) # Get user @@ -842,11 +858,11 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) @override_config({"next_link_domain_whitelist": None}) - def test_next_link(self): + def test_next_link(self) -> None: """Tests a valid next_link parameter value with no whitelist (good case)""" self._request_token( "something@example.com", @@ -856,7 +872,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): ) @override_config({"next_link_domain_whitelist": None}) - def test_next_link_exotic_protocol(self): + def test_next_link_exotic_protocol(self) -> None: """Tests using a esoteric protocol as a next_link parameter value. Someone may be hosting a client on IPFS etc. """ @@ -868,7 +884,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): ) @override_config({"next_link_domain_whitelist": None}) - def test_next_link_file_uri(self): + def test_next_link_file_uri(self) -> None: """Tests next_link parameters cannot be file URI""" # Attempt to use a next_link value that points to the local disk self._request_token( @@ -879,7 +895,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): ) @override_config({"next_link_domain_whitelist": ["example.com", "example.org"]}) - def test_next_link_domain_whitelist(self): + def test_next_link_domain_whitelist(self) -> None: """Tests next_link parameters must fit the whitelist if provided""" # Ensure not providing a next_link parameter still works @@ -912,7 +928,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): ) @override_config({"next_link_domain_whitelist": []}) - def test_empty_next_link_domain_whitelist(self): + def test_empty_next_link_domain_whitelist(self) -> None: """Tests an empty next_lint_domain_whitelist value, meaning next_link is essentially disallowed """ @@ -962,28 +978,28 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): def _request_token_invalid_email( self, - email, - expected_errcode, - expected_error, - client_secret="foobar", - ): + email: str, + expected_errcode: str, + expected_error: str, + client_secret: str = "foobar", + ) -> None: channel = self.make_request( "POST", b"account/3pid/email/requestToken", {"client_secret": client_secret, "email": email, "send_attempt": 1}, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) self.assertEqual(expected_errcode, channel.json_body["errcode"]) self.assertEqual(expected_error, channel.json_body["error"]) - def _validate_token(self, link): + def _validate_token(self, link: str) -> None: # Remove the host path = link.replace("https://example.com", "") channel = self.make_request("GET", path, shorthand=False) self.assertEqual(200, channel.code, channel.result) - def _get_link_from_email(self): + def _get_link_from_email(self) -> str: assert self.email_attempts, "No emails have been sent" raw_msg = self.email_attempts[-1].decode("UTF-8") @@ -998,12 +1014,13 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): if not text: self.fail("Could not find text portion of email to parse") + assert text is not None match = re.search(r"https://example.com\S+", text) assert match, "Could not find link in email" return match.group(0) - def _add_email(self, request_email, expected_email): + def _add_email(self, request_email: str, expected_email: str) -> None: """Test adding an email to profile""" previous_email_attempts = len(self.email_attempts) @@ -1030,7 +1047,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) # Get user channel = self.make_request( @@ -1039,7 +1056,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) threepids = {threepid["address"] for threepid in channel.json_body["threepids"]} @@ -1055,18 +1072,18 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): url = "/_matrix/client/unstable/org.matrix.msc3720/account_status" - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["experimental_features"] = {"msc3720_enabled": True} return self.setup_test_homeserver(config=config) - def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.requester = self.register_user("requester", "password") self.requester_tok = self.login("requester", "password") - self.server_name = homeserver.config.server.server_name + self.server_name = hs.config.server.server_name - def test_missing_mxid(self): + def test_missing_mxid(self) -> None: """Tests that not providing any MXID raises an error.""" self._test_status( users=None, @@ -1074,7 +1091,7 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): expected_errcode=Codes.MISSING_PARAM, ) - def test_invalid_mxid(self): + def test_invalid_mxid(self) -> None: """Tests that providing an invalid MXID raises an error.""" self._test_status( users=["bad:test"], @@ -1082,7 +1099,7 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): expected_errcode=Codes.INVALID_PARAM, ) - def test_local_user_not_exists(self): + def test_local_user_not_exists(self) -> None: """Tests that the account status endpoints correctly reports that a user doesn't exist. """ @@ -1098,7 +1115,7 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): expected_failures=[], ) - def test_local_user_exists(self): + def test_local_user_exists(self) -> None: """Tests that the account status endpoint correctly reports that a user doesn't exist. """ @@ -1115,7 +1132,7 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): expected_failures=[], ) - def test_local_user_deactivated(self): + def test_local_user_deactivated(self) -> None: """Tests that the account status endpoint correctly reports a deactivated user.""" user = self.register_user("someuser", "password") self.get_success( @@ -1135,7 +1152,7 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): expected_failures=[], ) - def test_mixed_local_and_remote_users(self): + def test_mixed_local_and_remote_users(self) -> None: """Tests that if some users are remote the account status endpoint correctly merges the remote responses with the local result. """ @@ -1150,7 +1167,13 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): "@bad:badremote", ] - async def post_json(destination, path, data, *a, **kwa): + async def post_json( + destination: str, + path: str, + data: Optional[JsonDict] = None, + *a: Any, + **kwa: Any, + ) -> Union[JsonDict, list]: if destination == "remote": return { "account_statuses": { @@ -1160,9 +1183,7 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): }, } } - if destination == "otherremote": - return {} - if destination == "badremote": + elif destination == "badremote": # badremote tries to overwrite the status of a user that doesn't belong # to it (i.e. users[1]) with false data, which Synapse is expected to # ignore. @@ -1176,6 +1197,9 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): }, } } + # if destination == "otherremote" + else: + return {} # Register a mock that will return the expected result depending on the remote. self.hs.get_federation_http_client().post_json = Mock(side_effect=post_json) @@ -1205,7 +1229,7 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): expected_statuses: Optional[Dict[str, Dict[str, bool]]] = None, expected_failures: Optional[List[str]] = None, expected_errcode: Optional[str] = None, - ): + ) -> None: """Send a request to the account status endpoint and check that the response matches with what's expected. diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index 5c31a54421..823e8ab8c4 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import Codes from synapse.rest.client import filter +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -30,11 +32,11 @@ class FilterTestCase(unittest.HomeserverTestCase): EXAMPLE_FILTER_JSON = b'{"room": {"timeline": {"types": ["m.room.message"]}}}' servlets = [filter.register_servlets] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.filtering = hs.get_filtering() self.store = hs.get_datastores().main - def test_add_filter(self): + def test_add_filter(self) -> None: channel = self.make_request( "POST", "/_matrix/client/r0/user/%s/filter" % (self.user_id), @@ -43,11 +45,13 @@ class FilterTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"200") self.assertEqual(channel.json_body, {"filter_id": "0"}) - filter = self.store.get_user_filter(user_localpart="apple", filter_id=0) + filter = self.get_success( + self.store.get_user_filter(user_localpart="apple", filter_id=0) + ) self.pump() - self.assertEqual(filter.result, self.EXAMPLE_FILTER) + self.assertEqual(filter, self.EXAMPLE_FILTER) - def test_add_filter_for_other_user(self): + def test_add_filter_for_other_user(self) -> None: channel = self.make_request( "POST", "/_matrix/client/r0/user/%s/filter" % ("@watermelon:test"), @@ -57,7 +61,7 @@ class FilterTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"403") self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) - def test_add_filter_non_local_user(self): + def test_add_filter_non_local_user(self) -> None: _is_mine = self.hs.is_mine self.hs.is_mine = lambda target_user: False channel = self.make_request( @@ -70,14 +74,13 @@ class FilterTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"403") self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) - def test_get_filter(self): - filter_id = defer.ensureDeferred( + def test_get_filter(self) -> None: + filter_id = self.get_success( self.filtering.add_user_filter( user_localpart="apple", user_filter=self.EXAMPLE_FILTER ) ) self.reactor.advance(1) - filter_id = filter_id.result channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/%s" % (self.user_id, filter_id) ) @@ -85,7 +88,7 @@ class FilterTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"200") self.assertEqual(channel.json_body, self.EXAMPLE_FILTER) - def test_get_filter_non_existant(self): + def test_get_filter_non_existant(self) -> None: channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/12382148321" % (self.user_id) ) @@ -95,7 +98,7 @@ class FilterTestCase(unittest.HomeserverTestCase): # Currently invalid params do not have an appropriate errcode # in errors.py - def test_get_filter_invalid_id(self): + def test_get_filter_invalid_id(self) -> None: channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/foobar" % (self.user_id) ) @@ -103,7 +106,7 @@ class FilterTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"400") # No ID also returns an invalid_id error - def test_get_filter_no_id(self): + def test_get_filter_no_id(self) -> None: channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/" % (self.user_id) ) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index a087cd7b21..709f851a38 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -15,7 +15,7 @@ import itertools import urllib.parse -from typing import Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple from unittest.mock import patch from twisted.test.proto_helpers import MemoryReactor @@ -45,7 +45,7 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): ] hijack_auth = False - def default_config(self) -> dict: + def default_config(self) -> Dict[str, Any]: # We need to enable msc1849 support for aggregations config = super().default_config() diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_report_event.py index ee6b0b9ebf..20a259fc43 100644 --- a/tests/rest/client/test_report_event.py +++ b/tests/rest/client/test_report_event.py @@ -14,8 +14,13 @@ import json +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.rest.client import login, report_event, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest @@ -28,7 +33,7 @@ class ReportEventTestCase(unittest.HomeserverTestCase): report_event.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") self.other_user = self.register_user("user", "pass") @@ -42,35 +47,35 @@ class ReportEventTestCase(unittest.HomeserverTestCase): self.event_id = resp["event_id"] self.report_path = f"rooms/{self.room_id}/report/{self.event_id}" - def test_reason_str_and_score_int(self): + def test_reason_str_and_score_int(self) -> None: data = {"reason": "this makes me sad", "score": -100} self._assert_status(200, data) - def test_no_reason(self): + def test_no_reason(self) -> None: data = {"score": 0} self._assert_status(200, data) - def test_no_score(self): + def test_no_score(self) -> None: data = {"reason": "this makes me sad"} self._assert_status(200, data) - def test_no_reason_and_no_score(self): - data = {} + def test_no_reason_and_no_score(self) -> None: + data: JsonDict = {} self._assert_status(200, data) - def test_reason_int_and_score_str(self): + def test_reason_int_and_score_str(self) -> None: data = {"reason": 10, "score": "string"} self._assert_status(400, data) - def test_reason_zero_and_score_blank(self): + def test_reason_zero_and_score_blank(self) -> None: data = {"reason": 0, "score": ""} self._assert_status(400, data) - def test_reason_and_score_null(self): + def test_reason_and_score_null(self) -> None: data = {"reason": None, "score": None} self._assert_status(400, data) - def _assert_status(self, response_status, data): + def _assert_status(self, response_status: int, data: JsonDict) -> None: channel = self.make_request( "POST", self.report_path, diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index e0b11e7264..37866ee330 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -18,11 +18,12 @@ """Tests REST events for /rooms paths.""" import json -from typing import Iterable, List +from typing import Any, Dict, Iterable, List, Optional from unittest.mock import Mock, call from urllib import parse as urlparse from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.constants import ( @@ -35,7 +36,9 @@ from synapse.api.errors import Codes, HttpResponseException from synapse.handlers.pagination import PurgeStatus from synapse.rest import admin from synapse.rest.client import account, directory, login, profile, room, sync +from synapse.server import HomeServer from synapse.types import JsonDict, RoomAlias, UserID, create_requester +from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest @@ -45,11 +48,11 @@ PATH_PREFIX = b"/_matrix/client/api/v1" class RoomBase(unittest.HomeserverTestCase): - rmcreator_id = None + rmcreator_id: Optional[str] = None servlets = [room.register_servlets, room.register_deprecated_servlets] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver( "red", @@ -57,15 +60,15 @@ class RoomBase(unittest.HomeserverTestCase): federation_client=Mock(), ) - self.hs.get_federation_handler = Mock() + self.hs.get_federation_handler = Mock() # type: ignore[assignment] self.hs.get_federation_handler.return_value.maybe_backfill = Mock( return_value=make_awaitable(None) ) - async def _insert_client_ip(*args, **kwargs): + async def _insert_client_ip(*args: Any, **kwargs: Any) -> None: return None - self.hs.get_datastores().main.insert_client_ip = _insert_client_ip + self.hs.get_datastores().main.insert_client_ip = _insert_client_ip # type: ignore[assignment] return self.hs @@ -76,7 +79,7 @@ class RoomPermissionsTestCase(RoomBase): user_id = "@sid1:red" rmcreator_id = "@notme:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.helper.auth_user_id = self.rmcreator_id # create some rooms under the name rmcreator_id @@ -108,12 +111,12 @@ class RoomPermissionsTestCase(RoomBase): # auth as user_id now self.helper.auth_user_id = self.user_id - def test_can_do_action(self): + def test_can_do_action(self) -> None: msg_content = b'{"msgtype":"m.text","body":"hello"}' seq = iter(range(100)) - def send_msg_path(): + def send_msg_path() -> str: return "/rooms/%s/send/m.room.message/mid%s" % ( self.created_rmid, str(next(seq)), @@ -148,7 +151,7 @@ class RoomPermissionsTestCase(RoomBase): channel = self.make_request("PUT", send_msg_path(), msg_content) self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_topic_perms(self): + def test_topic_perms(self) -> None: topic_content = b'{"topic":"My Topic Name"}' topic_path = "/rooms/%s/state/m.room.topic" % self.created_rmid @@ -214,14 +217,14 @@ class RoomPermissionsTestCase(RoomBase): self.assertEqual(403, channel.code, msg=channel.result["body"]) def _test_get_membership( - self, room=None, members: Iterable = frozenset(), expect_code=None - ): + self, room: str, members: Iterable = frozenset(), expect_code: int = 200 + ) -> None: for member in members: path = "/rooms/%s/state/m.room.member/%s" % (room, member) channel = self.make_request("GET", path) self.assertEqual(expect_code, channel.code) - def test_membership_basic_room_perms(self): + def test_membership_basic_room_perms(self) -> None: # === room does not exist === room = self.uncreated_rmid # get membership of self, get membership of other, uncreated room @@ -241,7 +244,7 @@ class RoomPermissionsTestCase(RoomBase): self.helper.join(room=room, user=usr, expect_code=404) self.helper.leave(room=room, user=usr, expect_code=404) - def test_membership_private_room_perms(self): + def test_membership_private_room_perms(self) -> None: room = self.created_rmid # get membership of self, get membership of other, private room + invite # expect all 403s @@ -264,7 +267,7 @@ class RoomPermissionsTestCase(RoomBase): members=[self.user_id, self.rmcreator_id], room=room, expect_code=200 ) - def test_membership_public_room_perms(self): + def test_membership_public_room_perms(self) -> None: room = self.created_public_rmid # get membership of self, get membership of other, public room + invite # expect 403 @@ -287,7 +290,7 @@ class RoomPermissionsTestCase(RoomBase): members=[self.user_id, self.rmcreator_id], room=room, expect_code=200 ) - def test_invited_permissions(self): + def test_invited_permissions(self) -> None: room = self.created_rmid self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id) @@ -310,7 +313,7 @@ class RoomPermissionsTestCase(RoomBase): expect_code=403, ) - def test_joined_permissions(self): + def test_joined_permissions(self) -> None: room = self.created_rmid self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id) self.helper.join(room=room, user=self.user_id) @@ -348,7 +351,7 @@ class RoomPermissionsTestCase(RoomBase): # set left of self, expect 200 self.helper.leave(room=room, user=self.user_id) - def test_leave_permissions(self): + def test_leave_permissions(self) -> None: room = self.created_rmid self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id) self.helper.join(room=room, user=self.user_id) @@ -383,7 +386,7 @@ class RoomPermissionsTestCase(RoomBase): ) # tests the "from banned" line from the table in https://spec.matrix.org/unstable/client-server-api/#mroommember - def test_member_event_from_ban(self): + def test_member_event_from_ban(self) -> None: room = self.created_rmid self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id) self.helper.join(room=room, user=self.user_id) @@ -475,21 +478,21 @@ class RoomsMemberListTestCase(RoomBase): user_id = "@sid1:red" - def test_get_member_list(self): + def test_get_member_list(self) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request("GET", "/rooms/%s/members" % room_id) self.assertEqual(200, channel.code, msg=channel.result["body"]) - def test_get_member_list_no_room(self): + def test_get_member_list_no_room(self) -> None: channel = self.make_request("GET", "/rooms/roomdoesnotexist/members") self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_get_member_list_no_permission(self): + def test_get_member_list_no_permission(self) -> None: room_id = self.helper.create_room_as("@some_other_guy:red") channel = self.make_request("GET", "/rooms/%s/members" % room_id) self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_get_member_list_no_permission_with_at_token(self): + def test_get_member_list_no_permission_with_at_token(self) -> None: """ Tests that a stranger to the room cannot get the member list (in the case that they use an at token). @@ -509,7 +512,7 @@ class RoomsMemberListTestCase(RoomBase): ) self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_get_member_list_no_permission_former_member(self): + def test_get_member_list_no_permission_former_member(self) -> None: """ Tests that a former member of the room can not get the member list. """ @@ -529,7 +532,7 @@ class RoomsMemberListTestCase(RoomBase): channel = self.make_request("GET", "/rooms/%s/members" % room_id) self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_get_member_list_no_permission_former_member_with_at_token(self): + def test_get_member_list_no_permission_former_member_with_at_token(self) -> None: """ Tests that a former member of the room can not get the member list (in the case that they use an at token). @@ -569,7 +572,7 @@ class RoomsMemberListTestCase(RoomBase): ) self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_get_member_list_mixed_memberships(self): + def test_get_member_list_mixed_memberships(self) -> None: room_creator = "@some_other_guy:red" room_id = self.helper.create_room_as(room_creator) room_path = "/rooms/%s/members" % room_id @@ -594,26 +597,26 @@ class RoomsCreateTestCase(RoomBase): user_id = "@sid1:red" - def test_post_room_no_keys(self): + def test_post_room_no_keys(self) -> None: # POST with no config keys, expect new room id channel = self.make_request("POST", "/createRoom", "{}") self.assertEqual(200, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) - def test_post_room_visibility_key(self): + def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id channel = self.make_request("POST", "/createRoom", b'{"visibility":"private"}') self.assertEqual(200, channel.code) self.assertTrue("room_id" in channel.json_body) - def test_post_room_custom_key(self): + def test_post_room_custom_key(self) -> None: # POST with custom config keys, expect new room id channel = self.make_request("POST", "/createRoom", b'{"custom":"stuff"}') self.assertEqual(200, channel.code) self.assertTrue("room_id" in channel.json_body) - def test_post_room_known_and_unknown_keys(self): + def test_post_room_known_and_unknown_keys(self) -> None: # POST with custom + known config keys, expect new room id channel = self.make_request( "POST", "/createRoom", b'{"visibility":"private","custom":"things"}' @@ -621,7 +624,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(200, channel.code) self.assertTrue("room_id" in channel.json_body) - def test_post_room_invalid_content(self): + def test_post_room_invalid_content(self) -> None: # POST with invalid content / paths, expect 400 channel = self.make_request("POST", "/createRoom", b'{"visibili') self.assertEqual(400, channel.code) @@ -629,7 +632,7 @@ class RoomsCreateTestCase(RoomBase): channel = self.make_request("POST", "/createRoom", b'["hello"]') self.assertEqual(400, channel.code) - def test_post_room_invitees_invalid_mxid(self): + def test_post_room_invitees_invalid_mxid(self) -> None: # POST with invalid invitee, see https://github.com/matrix-org/synapse/issues/4088 # Note the trailing space in the MXID here! channel = self.make_request( @@ -638,7 +641,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(400, channel.code) @unittest.override_config({"rc_invites": {"per_room": {"burst_count": 3}}}) - def test_post_room_invitees_ratelimit(self): + def test_post_room_invitees_ratelimit(self) -> None: """Test that invites sent when creating a room are ratelimited by a RateLimiter, which ratelimits them correctly, including by not limiting when the requester is exempt from ratelimiting. @@ -674,7 +677,7 @@ class RoomsCreateTestCase(RoomBase): channel = self.make_request("POST", "/createRoom", content) self.assertEqual(200, channel.code) - def test_spam_checker_may_join_room(self): + def test_spam_checker_may_join_room(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly bypassed when creating a new room. """ @@ -704,12 +707,12 @@ class RoomTopicTestCase(RoomBase): user_id = "@sid1:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # create the room self.room_id = self.helper.create_room_as(self.user_id) self.path = "/rooms/%s/state/m.room.topic" % (self.room_id,) - def test_invalid_puts(self): + def test_invalid_puts(self) -> None: # missing keys or invalid json channel = self.make_request("PUT", self.path, "{}") self.assertEqual(400, channel.code, msg=channel.result["body"]) @@ -736,7 +739,7 @@ class RoomTopicTestCase(RoomBase): channel = self.make_request("PUT", self.path, content) self.assertEqual(400, channel.code, msg=channel.result["body"]) - def test_rooms_topic(self): + def test_rooms_topic(self) -> None: # nothing should be there channel = self.make_request("GET", self.path) self.assertEqual(404, channel.code, msg=channel.result["body"]) @@ -751,7 +754,7 @@ class RoomTopicTestCase(RoomBase): self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) - def test_rooms_topic_with_extra_keys(self): + def test_rooms_topic_with_extra_keys(self) -> None: # valid put with extra keys content = '{"topic":"Seasons","subtopic":"Summer"}' channel = self.make_request("PUT", self.path, content) @@ -768,10 +771,10 @@ class RoomMemberStateTestCase(RoomBase): user_id = "@sid1:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_id = self.helper.create_room_as(self.user_id) - def test_invalid_puts(self): + def test_invalid_puts(self) -> None: path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id) # missing keys or invalid json channel = self.make_request("PUT", path, "{}") @@ -801,7 +804,7 @@ class RoomMemberStateTestCase(RoomBase): channel = self.make_request("PUT", path, content.encode("ascii")) self.assertEqual(400, channel.code, msg=channel.result["body"]) - def test_rooms_members_self(self): + def test_rooms_members_self(self) -> None: path = "/rooms/%s/state/m.room.member/%s" % ( urlparse.quote(self.room_id), self.user_id, @@ -812,13 +815,13 @@ class RoomMemberStateTestCase(RoomBase): channel = self.make_request("PUT", path, content.encode("ascii")) self.assertEqual(200, channel.code, msg=channel.result["body"]) - channel = self.make_request("GET", path, None) + channel = self.make_request("GET", path, content=b"") self.assertEqual(200, channel.code, msg=channel.result["body"]) expected_response = {"membership": Membership.JOIN} self.assertEqual(expected_response, channel.json_body) - def test_rooms_members_other(self): + def test_rooms_members_other(self) -> None: self.other_id = "@zzsid1:red" path = "/rooms/%s/state/m.room.member/%s" % ( urlparse.quote(self.room_id), @@ -830,11 +833,11 @@ class RoomMemberStateTestCase(RoomBase): channel = self.make_request("PUT", path, content) self.assertEqual(200, channel.code, msg=channel.result["body"]) - channel = self.make_request("GET", path, None) + channel = self.make_request("GET", path, content=b"") self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertEqual(json.loads(content), channel.json_body) - def test_rooms_members_other_custom_keys(self): + def test_rooms_members_other_custom_keys(self) -> None: self.other_id = "@zzsid1:red" path = "/rooms/%s/state/m.room.member/%s" % ( urlparse.quote(self.room_id), @@ -849,7 +852,7 @@ class RoomMemberStateTestCase(RoomBase): channel = self.make_request("PUT", path, content) self.assertEqual(200, channel.code, msg=channel.result["body"]) - channel = self.make_request("GET", path, None) + channel = self.make_request("GET", path, content=b"") self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertEqual(json.loads(content), channel.json_body) @@ -866,7 +869,7 @@ class RoomInviteRatelimitTestCase(RoomBase): @unittest.override_config( {"rc_invites": {"per_room": {"per_second": 0.5, "burst_count": 3}}} ) - def test_invites_by_rooms_ratelimit(self): + def test_invites_by_rooms_ratelimit(self) -> None: """Tests that invites in a room are actually rate-limited.""" room_id = self.helper.create_room_as(self.user_id) @@ -878,7 +881,7 @@ class RoomInviteRatelimitTestCase(RoomBase): @unittest.override_config( {"rc_invites": {"per_user": {"per_second": 0.5, "burst_count": 3}}} ) - def test_invites_by_users_ratelimit(self): + def test_invites_by_users_ratelimit(self) -> None: """Tests that invites to a specific user are actually rate-limited.""" for _ in range(3): @@ -897,7 +900,7 @@ class RoomJoinTestCase(RoomBase): room.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user1 = self.register_user("thomas", "hackme") self.tok1 = self.login("thomas", "hackme") @@ -908,7 +911,7 @@ class RoomJoinTestCase(RoomBase): self.room2 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) self.room3 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) - def test_spam_checker_may_join_room(self): + def test_spam_checker_may_join_room(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly called and blocks room joins when needed. """ @@ -975,8 +978,8 @@ class RoomJoinRatelimitTestCase(RoomBase): room.register_servlets, ] - def prepare(self, reactor, clock, homeserver): - super().prepare(reactor, clock, homeserver) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) # profile changes expect that the user is actually registered user = UserID.from_string(self.user_id) self.get_success(self.register_user(user.localpart, "supersecretpassword")) @@ -984,7 +987,7 @@ class RoomJoinRatelimitTestCase(RoomBase): @unittest.override_config( {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} ) - def test_join_local_ratelimit(self): + def test_join_local_ratelimit(self) -> None: """Tests that local joins are actually rate-limited.""" for _ in range(3): self.helper.create_room_as(self.user_id) @@ -994,7 +997,7 @@ class RoomJoinRatelimitTestCase(RoomBase): @unittest.override_config( {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} ) - def test_join_local_ratelimit_profile_change(self): + def test_join_local_ratelimit_profile_change(self) -> None: """Tests that sending a profile update into all of the user's joined rooms isn't rate-limited by the rate-limiter on joins.""" @@ -1031,7 +1034,7 @@ class RoomJoinRatelimitTestCase(RoomBase): @unittest.override_config( {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} ) - def test_join_local_ratelimit_idempotent(self): + def test_join_local_ratelimit_idempotent(self) -> None: """Tests that the room join endpoints remain idempotent despite rate-limiting on room joins.""" room_id = self.helper.create_room_as(self.user_id) @@ -1056,7 +1059,7 @@ class RoomJoinRatelimitTestCase(RoomBase): "autocreate_auto_join_rooms": True, }, ) - def test_autojoin_rooms(self): + def test_autojoin_rooms(self) -> None: user_id = self.register_user("testuser", "password") # Check that the new user successfully joined the four rooms @@ -1071,10 +1074,10 @@ class RoomMessagesTestCase(RoomBase): user_id = "@sid1:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_id = self.helper.create_room_as(self.user_id) - def test_invalid_puts(self): + def test_invalid_puts(self) -> None: path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) # missing keys or invalid json channel = self.make_request("PUT", path, b"{}") @@ -1095,7 +1098,7 @@ class RoomMessagesTestCase(RoomBase): channel = self.make_request("PUT", path, b"") self.assertEqual(400, channel.code, msg=channel.result["body"]) - def test_rooms_messages_sent(self): + def test_rooms_messages_sent(self) -> None: path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) content = b'{"body":"test","msgtype":{"type":"a"}}' @@ -1119,11 +1122,11 @@ class RoomInitialSyncTestCase(RoomBase): user_id = "@sid1:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # create the room self.room_id = self.helper.create_room_as(self.user_id) - def test_initial_sync(self): + def test_initial_sync(self) -> None: channel = self.make_request("GET", "/rooms/%s/initialSync" % self.room_id) self.assertEqual(200, channel.code) @@ -1131,7 +1134,7 @@ class RoomInitialSyncTestCase(RoomBase): self.assertEqual("join", channel.json_body["membership"]) # Room state is easier to assert on if we unpack it into a dict - state = {} + state: JsonDict = {} for event in channel.json_body["state"]: if "state_key" not in event: continue @@ -1160,10 +1163,10 @@ class RoomMessageListTestCase(RoomBase): user_id = "@sid1:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_id = self.helper.create_room_as(self.user_id) - def test_topo_token_is_accepted(self): + def test_topo_token_is_accepted(self) -> None: token = "t1-0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) @@ -1174,7 +1177,7 @@ class RoomMessageListTestCase(RoomBase): self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) - def test_stream_token_is_accepted_for_fwd_pagianation(self): + def test_stream_token_is_accepted_for_fwd_pagianation(self) -> None: token = "s0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) @@ -1185,7 +1188,7 @@ class RoomMessageListTestCase(RoomBase): self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) - def test_room_messages_purge(self): + def test_room_messages_purge(self) -> None: store = self.hs.get_datastores().main pagination_handler = self.hs.get_pagination_handler() @@ -1278,10 +1281,10 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): user_id = True hijack_auth = False - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Register the user who does the searching - self.user_id = self.register_user("user", "pass") + self.user_id2 = self.register_user("user", "pass") self.access_token = self.login("user", "pass") # Register the user who sends the message @@ -1289,12 +1292,12 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): self.other_access_token = self.login("otheruser", "pass") # Create a room - self.room = self.helper.create_room_as(self.user_id, tok=self.access_token) + self.room = self.helper.create_room_as(self.user_id2, tok=self.access_token) # Invite the other person self.helper.invite( room=self.room, - src=self.user_id, + src=self.user_id2, tok=self.access_token, targ=self.other_user_id, ) @@ -1304,7 +1307,7 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): room=self.room, user=self.other_user_id, tok=self.other_access_token ) - def test_finds_message(self): + def test_finds_message(self) -> None: """ The search functionality will search for content in messages if asked to do so. @@ -1333,7 +1336,7 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): # No context was requested, so we should get none. self.assertEqual(results["results"][0]["context"], {}) - def test_include_context(self): + def test_include_context(self) -> None: """ When event_context includes include_profile, profile information will be included in the search response. @@ -1379,7 +1382,7 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.url = b"/_matrix/client/r0/publicRooms" @@ -1389,11 +1392,11 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): return self.hs - def test_restricted_no_auth(self): + def test_restricted_no_auth(self) -> None: channel = self.make_request("GET", self.url) self.assertEqual(channel.code, 401, channel.result) - def test_restricted_auth(self): + def test_restricted_auth(self) -> None: self.register_user("user", "pass") tok = self.login("user", "pass") @@ -1412,19 +1415,19 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return self.setup_test_homeserver(federation_client=Mock()) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.register_user("user", "pass") self.token = self.login("user", "pass") self.federation_client = hs.get_federation_client() - def test_simple(self): + def test_simple(self) -> None: "Simple test for searching rooms over federation" - self.federation_client.get_public_rooms.side_effect = ( - lambda *a, **k: defer.succeed({}) + self.federation_client.get_public_rooms.side_effect = lambda *a, **k: defer.succeed( # type: ignore[attr-defined] + {} ) search_filter = {"generic_search_term": "foobar"} @@ -1437,7 +1440,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): ) self.assertEqual(channel.code, 200, channel.result) - self.federation_client.get_public_rooms.assert_called_once_with( + self.federation_client.get_public_rooms.assert_called_once_with( # type: ignore[attr-defined] "testserv", limit=100, since_token=None, @@ -1446,12 +1449,12 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): third_party_instance_id=None, ) - def test_fallback(self): + def test_fallback(self) -> None: "Test that searching public rooms over federation falls back if it gets a 404" # The `get_public_rooms` should be called again if the first call fails # with a 404, when using search filters. - self.federation_client.get_public_rooms.side_effect = ( + self.federation_client.get_public_rooms.side_effect = ( # type: ignore[attr-defined] HttpResponseException(404, "Not Found", b""), defer.succeed({}), ) @@ -1466,7 +1469,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): ) self.assertEqual(channel.code, 200, channel.result) - self.federation_client.get_public_rooms.assert_has_calls( + self.federation_client.get_public_rooms.assert_has_calls( # type: ignore[attr-defined] [ call( "testserv", @@ -1497,14 +1500,14 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): profile.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["allow_per_room_profiles"] = False self.hs = self.setup_test_homeserver(config=config) return self.hs - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("test", "test") self.tok = self.login("test", "test") @@ -1522,7 +1525,7 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - def test_per_room_profile_forbidden(self): + def test_per_room_profile_forbidden(self) -> None: data = {"membership": "join", "displayname": "other test user"} request_data = json.dumps(data) channel = self.make_request( @@ -1557,7 +1560,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.creator = self.register_user("creator", "test") self.creator_tok = self.login("creator", "test") @@ -1566,7 +1569,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): self.room_id = self.helper.create_room_as(self.creator, tok=self.creator_tok) - def test_join_reason(self): + def test_join_reason(self) -> None: reason = "hello" channel = self.make_request( "POST", @@ -1578,7 +1581,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): self._check_for_reason(reason) - def test_leave_reason(self): + def test_leave_reason(self) -> None: self.helper.join(self.room_id, user=self.second_user_id, tok=self.second_tok) reason = "hello" @@ -1592,7 +1595,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): self._check_for_reason(reason) - def test_kick_reason(self): + def test_kick_reason(self) -> None: self.helper.join(self.room_id, user=self.second_user_id, tok=self.second_tok) reason = "hello" @@ -1606,7 +1609,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): self._check_for_reason(reason) - def test_ban_reason(self): + def test_ban_reason(self) -> None: self.helper.join(self.room_id, user=self.second_user_id, tok=self.second_tok) reason = "hello" @@ -1620,7 +1623,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): self._check_for_reason(reason) - def test_unban_reason(self): + def test_unban_reason(self) -> None: reason = "hello" channel = self.make_request( "POST", @@ -1632,7 +1635,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): self._check_for_reason(reason) - def test_invite_reason(self): + def test_invite_reason(self) -> None: reason = "hello" channel = self.make_request( "POST", @@ -1644,7 +1647,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): self._check_for_reason(reason) - def test_reject_invite_reason(self): + def test_reject_invite_reason(self) -> None: self.helper.invite( self.room_id, src=self.creator, @@ -1663,7 +1666,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): self._check_for_reason(reason) - def _check_for_reason(self, reason): + def _check_for_reason(self, reason: str) -> None: channel = self.make_request( "GET", "/_matrix/client/r0/rooms/{}/state/m.room.member/{}".format( @@ -1704,12 +1707,12 @@ class LabelsTestCase(unittest.HomeserverTestCase): "org.matrix.not_labels": ["#notfun"], } - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("test", "test") self.tok = self.login("test", "test") self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - def test_context_filter_labels(self): + def test_context_filter_labels(self) -> None: """Test that we can filter by a label on a /context request.""" event_id = self._send_labelled_messages_in_room() @@ -1739,7 +1742,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): events_after[0]["content"]["body"], "with right label", events_after[0] ) - def test_context_filter_not_labels(self): + def test_context_filter_not_labels(self) -> None: """Test that we can filter by the absence of a label on a /context request.""" event_id = self._send_labelled_messages_in_room() @@ -1772,7 +1775,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): events_after[1]["content"]["body"], "with two wrong labels", events_after[1] ) - def test_context_filter_labels_not_labels(self): + def test_context_filter_labels_not_labels(self) -> None: """Test that we can filter by both a label and the absence of another label on a /context request. """ @@ -1801,7 +1804,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): events_after[0]["content"]["body"], "with wrong label", events_after[0] ) - def test_messages_filter_labels(self): + def test_messages_filter_labels(self) -> None: """Test that we can filter by a label on a /messages request.""" self._send_labelled_messages_in_room() @@ -1818,7 +1821,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): self.assertEqual(events[0]["content"]["body"], "with right label", events[0]) self.assertEqual(events[1]["content"]["body"], "with right label", events[1]) - def test_messages_filter_not_labels(self): + def test_messages_filter_not_labels(self) -> None: """Test that we can filter by the absence of a label on a /messages request.""" self._send_labelled_messages_in_room() @@ -1839,7 +1842,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): events[3]["content"]["body"], "with two wrong labels", events[3] ) - def test_messages_filter_labels_not_labels(self): + def test_messages_filter_labels_not_labels(self) -> None: """Test that we can filter by both a label and the absence of another label on a /messages request. """ @@ -1862,7 +1865,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): self.assertEqual(len(events), 1, [event["content"] for event in events]) self.assertEqual(events[0]["content"]["body"], "with wrong label", events[0]) - def test_search_filter_labels(self): + def test_search_filter_labels(self) -> None: """Test that we can filter by a label on a /search request.""" request_data = json.dumps( { @@ -1899,7 +1902,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): results[1]["result"]["content"]["body"], ) - def test_search_filter_not_labels(self): + def test_search_filter_not_labels(self) -> None: """Test that we can filter by the absence of a label on a /search request.""" request_data = json.dumps( { @@ -1946,7 +1949,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): results[3]["result"]["content"]["body"], ) - def test_search_filter_labels_not_labels(self): + def test_search_filter_labels_not_labels(self) -> None: """Test that we can filter by both a label and the absence of another label on a /search request. """ @@ -1980,7 +1983,7 @@ class LabelsTestCase(unittest.HomeserverTestCase): results[0]["result"]["content"]["body"], ) - def _send_labelled_messages_in_room(self): + def _send_labelled_messages_in_room(self) -> str: """Sends several messages to a room with different labels (or without any) to test filtering by label. Returns: @@ -2056,12 +2059,12 @@ class RelationsTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["experimental_features"] = {"msc3440_enabled": True} return config - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("test", "test") self.tok = self.login("test", "test") self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) @@ -2136,7 +2139,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): return channel.json_body["chunk"] - def test_filter_relation_senders(self): + def test_filter_relation_senders(self) -> None: # Messages which second user reacted to. filter = {"io.element.relation_senders": [self.second_user_id]} chunk = self._filter_messages(filter) @@ -2159,7 +2162,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): [c["event_id"] for c in chunk], [self.event_id_1, self.event_id_2] ) - def test_filter_relation_type(self): + def test_filter_relation_type(self) -> None: # Messages which have annotations. filter = {"io.element.relation_types": [RelationTypes.ANNOTATION]} chunk = self._filter_messages(filter) @@ -2185,7 +2188,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): [c["event_id"] for c in chunk], [self.event_id_1, self.event_id_2] ) - def test_filter_relation_senders_and_type(self): + def test_filter_relation_senders_and_type(self) -> None: # Messages which second user reacted to. filter = { "io.element.relation_senders": [self.second_user_id], @@ -2205,7 +2208,7 @@ class ContextTestCase(unittest.HomeserverTestCase): account.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("user", "password") self.tok = self.login("user", "password") self.room_id = self.helper.create_room_as( @@ -2218,7 +2221,7 @@ class ContextTestCase(unittest.HomeserverTestCase): self.helper.invite(self.room_id, self.user_id, self.other_user_id, tok=self.tok) self.helper.join(self.room_id, self.other_user_id, tok=self.other_tok) - def test_erased_sender(self): + def test_erased_sender(self) -> None: """Test that an erasure request results in the requester's events being hidden from any new member of the room. """ @@ -2332,7 +2335,7 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_owner = self.register_user("room_owner", "test") self.room_owner_tok = self.login("room_owner", "test") @@ -2340,17 +2343,17 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase): self.room_owner, tok=self.room_owner_tok ) - def test_no_aliases(self): + def test_no_aliases(self) -> None: res = self._get_aliases(self.room_owner_tok) self.assertEqual(res["aliases"], []) - def test_not_in_room(self): + def test_not_in_room(self) -> None: self.register_user("user", "test") user_tok = self.login("user", "test") res = self._get_aliases(user_tok, expected_code=403) self.assertEqual(res["errcode"], "M_FORBIDDEN") - def test_admin_user(self): + def test_admin_user(self) -> None: alias1 = self._random_alias() self._set_alias_via_directory(alias1) @@ -2360,7 +2363,7 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase): res = self._get_aliases(user_tok) self.assertEqual(res["aliases"], [alias1]) - def test_with_aliases(self): + def test_with_aliases(self) -> None: alias1 = self._random_alias() alias2 = self._random_alias() @@ -2370,7 +2373,7 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase): res = self._get_aliases(self.room_owner_tok) self.assertEqual(set(res["aliases"]), {alias1, alias2}) - def test_peekable_room(self): + def test_peekable_room(self) -> None: alias1 = self._random_alias() self._set_alias_via_directory(alias1) @@ -2404,7 +2407,7 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase): def _random_alias(self) -> str: return RoomAlias(random_string(5), self.hs.hostname).to_string() - def _set_alias_via_directory(self, alias: str, expected_code: int = 200): + def _set_alias_via_directory(self, alias: str, expected_code: int = 200) -> None: url = "/_matrix/client/r0/directory/room/" + alias data = {"room_id": self.room_id} request_data = json.dumps(data) @@ -2423,7 +2426,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_owner = self.register_user("room_owner", "test") self.room_owner_tok = self.login("room_owner", "test") @@ -2434,7 +2437,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): self.alias = "#alias:test" self._set_alias_via_directory(self.alias) - def _set_alias_via_directory(self, alias: str, expected_code: int = 200): + def _set_alias_via_directory(self, alias: str, expected_code: int = 200) -> None: url = "/_matrix/client/r0/directory/room/" + alias data = {"room_id": self.room_id} request_data = json.dumps(data) @@ -2456,7 +2459,9 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): self.assertIsInstance(res, dict) return res - def _set_canonical_alias(self, content: str, expected_code: int = 200) -> JsonDict: + def _set_canonical_alias( + self, content: JsonDict, expected_code: int = 200 + ) -> JsonDict: """Calls the endpoint under test. returns the json response object.""" channel = self.make_request( "PUT", @@ -2469,7 +2474,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): self.assertIsInstance(res, dict) return res - def test_canonical_alias(self): + def test_canonical_alias(self) -> None: """Test a basic alias message.""" # There is no canonical alias to start with. self._get_canonical_alias(expected_code=404) @@ -2488,7 +2493,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): res = self._get_canonical_alias() self.assertEqual(res, {}) - def test_alt_aliases(self): + def test_alt_aliases(self) -> None: """Test a canonical alias message with alt_aliases.""" # Create an alias. self._set_canonical_alias({"alt_aliases": [self.alias]}) @@ -2504,7 +2509,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): res = self._get_canonical_alias() self.assertEqual(res, {}) - def test_alias_alt_aliases(self): + def test_alias_alt_aliases(self) -> None: """Test a canonical alias message with an alias and alt_aliases.""" # Create an alias. self._set_canonical_alias({"alias": self.alias, "alt_aliases": [self.alias]}) @@ -2520,7 +2525,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): res = self._get_canonical_alias() self.assertEqual(res, {}) - def test_partial_modify(self): + def test_partial_modify(self) -> None: """Test removing only the alt_aliases.""" # Create an alias. self._set_canonical_alias({"alias": self.alias, "alt_aliases": [self.alias]}) @@ -2536,7 +2541,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): res = self._get_canonical_alias() self.assertEqual(res, {"alias": self.alias}) - def test_add_alias(self): + def test_add_alias(self) -> None: """Test removing only the alt_aliases.""" # Create an additional alias. second_alias = "#second:test" @@ -2556,7 +2561,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): res, {"alias": self.alias, "alt_aliases": [self.alias, second_alias]} ) - def test_bad_data(self): + def test_bad_data(self) -> None: """Invalid data for alt_aliases should cause errors.""" self._set_canonical_alias({"alt_aliases": "@bad:test"}, expected_code=400) self._set_canonical_alias({"alt_aliases": None}, expected_code=400) @@ -2566,7 +2571,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): self._set_canonical_alias({"alt_aliases": True}, expected_code=400) self._set_canonical_alias({"alt_aliases": {}}, expected_code=400) - def test_bad_alias(self): + def test_bad_alias(self) -> None: """An alias which does not point to the room raises a SynapseError.""" self._set_canonical_alias({"alias": "@unknown:test"}, expected_code=400) self._set_canonical_alias({"alt_aliases": ["@unknown:test"]}, expected_code=400) @@ -2580,13 +2585,13 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("thomas", "hackme") self.tok = self.login("thomas", "hackme") self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - def test_threepid_invite_spamcheck(self): + def test_threepid_invite_spamcheck(self) -> None: # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for _mock_make_and_store_3pid_invite around so we # can check its call_count later on during the test. diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index bfc04785b7..58f1ea11b7 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -12,16 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. import threading -from typing import TYPE_CHECKING, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EventTypes, LoginType, Membership from synapse.api.errors import SynapseError +from synapse.api.room_versions import RoomVersion from synapse.events import EventBase +from synapse.events.snapshot import EventContext from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.rest import admin from synapse.rest.client import account, login, profile, room +from synapse.server import HomeServer from synapse.types import JsonDict, Requester, StateMap +from synapse.util import Clock from synapse.util.frozenutils import unfreeze from tests import unittest @@ -34,7 +40,7 @@ thread_local = threading.local() class LegacyThirdPartyRulesTestModule: - def __init__(self, config: Dict, module_api: "ModuleApi"): + def __init__(self, config: Dict, module_api: "ModuleApi") -> None: # keep a record of the "current" rules module, so that the test can patch # it if desired. thread_local.rules_module = self @@ -42,32 +48,36 @@ class LegacyThirdPartyRulesTestModule: async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool - ): + ) -> bool: return True - async def check_event_allowed(self, event: EventBase, state: StateMap[EventBase]): + async def check_event_allowed( + self, event: EventBase, state: StateMap[EventBase] + ) -> Union[bool, dict]: return True @staticmethod - def parse_config(config): + def parse_config(config: Dict[str, Any]) -> Dict[str, Any]: return config class LegacyDenyNewRooms(LegacyThirdPartyRulesTestModule): - def __init__(self, config: Dict, module_api: "ModuleApi"): + def __init__(self, config: Dict, module_api: "ModuleApi") -> None: super().__init__(config, module_api) - def on_create_room( + async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool - ): + ) -> bool: return False class LegacyChangeEvents(LegacyThirdPartyRulesTestModule): - def __init__(self, config: Dict, module_api: "ModuleApi"): + def __init__(self, config: Dict, module_api: "ModuleApi") -> None: super().__init__(config, module_api) - async def check_event_allowed(self, event: EventBase, state: StateMap[EventBase]): + async def check_event_allowed( + self, event: EventBase, state: StateMap[EventBase] + ) -> JsonDict: d = event.get_dict() content = unfreeze(event.content) content["foo"] = "bar" @@ -84,7 +94,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): account.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver() load_legacy_third_party_event_rules(hs) @@ -94,22 +104,30 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # Note that these checks are not relevant to this test case. # Have this homeserver auto-approve all event signature checking. - async def approve_all_signature_checking(_, pdu): + async def approve_all_signature_checking( + _: RoomVersion, pdu: EventBase + ) -> EventBase: return pdu - hs.get_federation_server()._check_sigs_and_hash = approve_all_signature_checking + hs.get_federation_server()._check_sigs_and_hash = approve_all_signature_checking # type: ignore[assignment] # Have this homeserver skip event auth checks. This is necessary due to # event auth checks ensuring that events were signed by the sender's homeserver. - async def _check_event_auth(origin, event, context, *args, **kwargs): + async def _check_event_auth( + origin: str, + event: EventBase, + context: EventContext, + *args: Any, + **kwargs: Any, + ) -> EventContext: return context - hs.get_federation_event_handler()._check_event_auth = _check_event_auth + hs.get_federation_event_handler()._check_event_auth = _check_event_auth # type: ignore[assignment] return hs - def prepare(self, reactor, clock, homeserver): - super().prepare(reactor, clock, homeserver) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) # Create some users and a room to play with during the tests self.user_id = self.register_user("kermit", "monkey") self.invitee = self.register_user("invitee", "hackme") @@ -121,13 +139,15 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): except Exception: pass - def test_third_party_rules(self): + def test_third_party_rules(self) -> None: """Tests that a forbidden event is forbidden from being sent, but an allowed one can be sent. """ # patch the rules module with a Mock which will return False for some event # types - async def check(ev, state): + async def check( + ev: EventBase, state: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: return ev.type != "foo.bar.forbidden", None callback = Mock(spec=[], side_effect=check) @@ -161,7 +181,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): ) self.assertEqual(channel.result["code"], b"403", channel.result) - def test_third_party_rules_workaround_synapse_errors_pass_through(self): + def test_third_party_rules_workaround_synapse_errors_pass_through(self) -> None: """ Tests that the workaround introduced by https://github.com/matrix-org/synapse/pull/11042 is functional: that SynapseErrors are passed through from check_event_allowed @@ -172,7 +192,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ class NastyHackException(SynapseError): - def error_dict(self): + def error_dict(self) -> JsonDict: """ This overrides SynapseError's `error_dict` to nastily inject JSON into the error response. @@ -182,7 +202,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): return result # add a callback that will raise our hacky exception - async def check(ev, state) -> Tuple[bool, Optional[JsonDict]]: + async def check( + ev: EventBase, state: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: raise NastyHackException(429, "message") self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check] @@ -202,11 +224,13 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): {"errcode": "M_UNKNOWN", "error": "message", "nasty": "very"}, ) - def test_cannot_modify_event(self): + def test_cannot_modify_event(self) -> None: """cannot accidentally modify an event before it is persisted""" # first patch the event checker so that it will try to modify the event - async def check(ev: EventBase, state): + async def check( + ev: EventBase, state: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: ev.content = {"x": "y"} return True, None @@ -223,10 +247,12 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # 500 Internal Server Error self.assertEqual(channel.code, 500, channel.result) - def test_modify_event(self): + def test_modify_event(self) -> None: """The module can return a modified version of the event""" # first patch the event checker so that it will modify the event - async def check(ev: EventBase, state): + async def check( + ev: EventBase, state: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: d = ev.get_dict() d["content"] = {"x": "y"} return True, d @@ -253,10 +279,12 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): ev = channel.json_body self.assertEqual(ev["content"]["x"], "y") - def test_message_edit(self): + def test_message_edit(self) -> None: """Ensure that the module doesn't cause issues with edited messages.""" # first patch the event checker so that it will modify the event - async def check(ev: EventBase, state): + async def check( + ev: EventBase, state: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: d = ev.get_dict() d["content"] = { "msgtype": "m.text", @@ -315,7 +343,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): ev = channel.json_body self.assertEqual(ev["content"]["body"], "EDITED BODY") - def test_send_event(self): + def test_send_event(self) -> None: """Tests that a module can send an event into a room via the module api""" content = { "msgtype": "m.text", @@ -344,7 +372,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): } } ) - def test_legacy_check_event_allowed(self): + def test_legacy_check_event_allowed(self) -> None: """Tests that the wrapper for legacy check_event_allowed callbacks works correctly. """ @@ -379,13 +407,13 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): } } ) - def test_legacy_on_create_room(self): + def test_legacy_on_create_room(self) -> None: """Tests that the wrapper for legacy on_create_room callbacks works correctly. """ self.helper.create_room_as(self.user_id, tok=self.tok, expect_code=403) - def test_sent_event_end_up_in_room_state(self): + def test_sent_event_end_up_in_room_state(self) -> None: """Tests that a state event sent by a module while processing another state event doesn't get dropped from the state of the room. This is to guard against a bug where Synapse has been observed doing so, see https://github.com/matrix-org/synapse/issues/10830 @@ -400,7 +428,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): api = self.hs.get_module_api() # Define a callback that sends a custom event on power levels update. - async def test_fn(event: EventBase, state_events): + async def test_fn( + event: EventBase, state_events: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: if event.is_state and event.type == EventTypes.PowerLevels: await api.create_and_send_event_into_room( { @@ -436,7 +466,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["i"], i) - def test_on_new_event(self): + def test_on_new_event(self) -> None: """Test that the on_new_event callback is called on new events""" on_new_event = Mock(make_awaitable(None)) self.hs.get_third_party_event_rules()._on_new_event_callbacks.append( @@ -501,7 +531,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): self.assertEqual(channel.code, 200, channel.result) - def _update_power_levels(self, event_default: int = 0): + def _update_power_levels(self, event_default: int = 0) -> None: """Updates the room's power levels. Args: @@ -533,7 +563,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): tok=self.tok, ) - def test_on_profile_update(self): + def test_on_profile_update(self) -> None: """Tests that the on_profile_update module callback is correctly called on profile updates. """ @@ -592,7 +622,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): self.assertEqual(profile_info.display_name, displayname) self.assertEqual(profile_info.avatar_url, avatar_url) - def test_on_profile_update_admin(self): + def test_on_profile_update_admin(self) -> None: """Tests that the on_profile_update module callback is correctly called on profile updates triggered by a server admin. """ @@ -634,7 +664,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): self.assertEqual(profile_info.display_name, displayname) self.assertEqual(profile_info.avatar_url, avatar_url) - def test_on_user_deactivation_status_changed(self): + def test_on_user_deactivation_status_changed(self) -> None: """Tests that the on_user_deactivation_status_changed module callback is called correctly when processing a user's deactivation. """ @@ -691,7 +721,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): args = profile_mock.call_args[0] self.assertTrue(args[3]) - def test_on_user_deactivation_status_changed_admin(self): + def test_on_user_deactivation_status_changed_admin(self) -> None: """Tests that the on_user_deactivation_status_changed module callback is called correctly when processing a user's deactivation triggered by a server admin as well as a reactivation. diff --git a/tests/rest/client/test_typing.py b/tests/rest/client/test_typing.py index 8b2da88e8a..43be711a64 100644 --- a/tests/rest/client/test_typing.py +++ b/tests/rest/client/test_typing.py @@ -14,11 +14,16 @@ # limitations under the License. """Tests REST events for /rooms paths.""" - +from typing import Any from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + from synapse.rest.client import room +from synapse.server import HomeServer +from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import UserID +from synapse.util import Clock from tests import unittest @@ -33,7 +38,7 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): user = UserID.from_string(user_id) servlets = [room.register_servlets] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver( "red", @@ -43,30 +48,34 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): self.event_source = hs.get_event_sources().sources.typing - hs.get_federation_handler = Mock() + hs.get_federation_handler = Mock() # type: ignore[assignment] - async def get_user_by_access_token(token=None, allow_guest=False): - return { - "user": UserID.from_string(self.auth_user_id), - "token_id": 1, - "is_guest": False, - } + async def get_user_by_access_token( + token: str, + rights: str = "access", + allow_expired: bool = False, + ) -> TokenLookupResult: + return TokenLookupResult( + user_id=self.user_id, + is_guest=False, + token_id=1, + ) - hs.get_auth().get_user_by_access_token = get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token # type: ignore[assignment] - async def _insert_client_ip(*args, **kwargs): + async def _insert_client_ip(*args: Any, **kwargs: Any) -> None: return None - hs.get_datastores().main.insert_client_ip = _insert_client_ip + hs.get_datastores().main.insert_client_ip = _insert_client_ip # type: ignore[assignment] return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_id = self.helper.create_room_as(self.user_id) # Need another user to make notifications actually work self.helper.join(self.room_id, user="@jim:red") - def test_set_typing(self): + def test_set_typing(self) -> None: channel = self.make_request( "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id), @@ -95,7 +104,7 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): ], ) - def test_set_not_typing(self): + def test_set_not_typing(self) -> None: channel = self.make_request( "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id), @@ -103,7 +112,7 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): ) self.assertEqual(200, channel.code) - def test_typing_timeout(self): + def test_typing_timeout(self) -> None: channel = self.make_request( "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id), -- cgit 1.4.1 From 106959b3cf1a59ab5469db639223b6a5b84fb7d7 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Mar 2022 17:24:52 +0000 Subject: Remove unused mocks from `test_typing` (#12136) * Remove unused mocks from `test_typing` It's not clear what these do. `get_user_by_access_token` has the wrong signature, including the return type. Tests all pass without these. I think we should nuke them. * Changelog * Fixup imports --- changelog.d/12136.misc | 1 + tests/rest/client/test_typing.py | 32 +------------------------------- 2 files changed, 2 insertions(+), 31 deletions(-) create mode 100644 changelog.d/12136.misc diff --git a/changelog.d/12136.misc b/changelog.d/12136.misc new file mode 100644 index 0000000000..98b1c1c9d8 --- /dev/null +++ b/changelog.d/12136.misc @@ -0,0 +1 @@ +Remove unused mocks from `test_typing`. \ No newline at end of file diff --git a/tests/rest/client/test_typing.py b/tests/rest/client/test_typing.py index 43be711a64..d6da510773 100644 --- a/tests/rest/client/test_typing.py +++ b/tests/rest/client/test_typing.py @@ -14,14 +14,11 @@ # limitations under the License. """Tests REST events for /rooms paths.""" -from typing import Any -from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor from synapse.rest.client import room from synapse.server import HomeServer -from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import UserID from synapse.util import Clock @@ -39,35 +36,8 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): servlets = [room.register_servlets] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - - hs = self.setup_test_homeserver( - "red", - federation_http_client=None, - federation_client=Mock(), - ) - + hs = self.setup_test_homeserver("red") self.event_source = hs.get_event_sources().sources.typing - - hs.get_federation_handler = Mock() # type: ignore[assignment] - - async def get_user_by_access_token( - token: str, - rights: str = "access", - allow_expired: bool = False, - ) -> TokenLookupResult: - return TokenLookupResult( - user_id=self.user_id, - is_guest=False, - token_id=1, - ) - - hs.get_auth().get_user_by_access_token = get_user_by_access_token # type: ignore[assignment] - - async def _insert_client_ip(*args: Any, **kwargs: Any) -> None: - return None - - hs.get_datastores().main.insert_client_ip = _insert_client_ip # type: ignore[assignment] - return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: -- cgit 1.4.1