From 7fe3b908a583a6028a0a055792fe5c6e260196e4 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 20 Oct 2022 17:39:55 +0100 Subject: Update changelog entry for #10015. Per https://github.com/matrix-org/synapse/pull/10015#issuecomment-1282773048 --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index da7213cb68..17e666709b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -21,7 +21,7 @@ Features Bugfixes -------- -- Prevent device names from appearing in device list updates when `allow_device_name_lookup_over_federation` is `false`. ([\#10015](https://github.com/matrix-org/synapse/issues/10015)) +- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015)) - Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813)) - Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034)) - Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053)) -- cgit 1.5.1 From cacda2d1f5cef3fcbb62e3db2a0b0a4a634c86dd Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 20 Oct 2022 23:01:08 +0100 Subject: Build wheels on macos 11, not 10.15 (#14249) --- .github/workflows/release-artifacts.yml | 6 +++--- changelog.d/14249.misc | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14249.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 60db6ad7f0..0601a7dbaf 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -99,7 +99,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-20.04, macos-10.15] + os: [ubuntu-20.04, macos-11] arch: [x86_64, aarch64] # is_pr is a flag used to exclude certain jobs from the matrix on PRs. # It is not read by the rest of the workflow. @@ -109,9 +109,9 @@ jobs: exclude: # Don't build macos wheels on PR CI. - is_pr: true - os: "macos-10.15" + os: "macos-11" # Don't build aarch64 wheels on mac. - - os: "macos-10.15" + - os: "macos-11" arch: aarch64 # Don't build aarch64 wheels on PR CI. - is_pr: true diff --git a/changelog.d/14249.misc b/changelog.d/14249.misc new file mode 100644 index 0000000000..3a2ce1e0c2 --- /dev/null +++ b/changelog.d/14249.misc @@ -0,0 +1 @@ +Build wheels on macos 11, not 10.15. -- cgit 1.5.1 From fab495a9e1442d99e922367f65f41de5eaa488eb Mon Sep 17 00:00:00 2001 From: "DeepBlueV7.X" Date: Fri, 21 Oct 2022 08:49:47 +0000 Subject: Fix event size checks (#13710) --- changelog.d/13710.bugfix | 1 + synapse/event_auth.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/13710.bugfix diff --git a/changelog.d/13710.bugfix b/changelog.d/13710.bugfix new file mode 100644 index 0000000000..4c318d15f5 --- /dev/null +++ b/changelog.d/13710.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where Synapse would count codepoints instead of bytes when validating the size of some fields. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index bab31e33c5..5036604036 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -342,15 +342,15 @@ def check_state_dependent_auth_rules( def _check_size_limits(event: "EventBase") -> None: - if len(event.user_id) > 255: + if len(event.user_id.encode("utf-8")) > 255: raise EventSizeError("'user_id' too large") - if len(event.room_id) > 255: + if len(event.room_id.encode("utf-8")) > 255: raise EventSizeError("'room_id' too large") - if event.is_state() and len(event.state_key) > 255: + if event.is_state() and len(event.state_key.encode("utf-8")) > 255: raise EventSizeError("'state_key' too large") - if len(event.type) > 255: + if len(event.type.encode("utf-8")) > 255: raise EventSizeError("'type' too large") - if len(event.event_id) > 255: + if len(event.event_id.encode("utf-8")) > 255: raise EventSizeError("'event_id' too large") if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE: raise EventSizeError("event too large") -- cgit 1.5.1 From 1433b5d5b64c3a6624e6e4ff4fef22127c49df86 Mon Sep 17 00:00:00 2001 From: Tadeusz Sośnierz Date: Fri, 21 Oct 2022 14:52:44 +0200 Subject: Show erasure status when listing users in the Admin API (#14205) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Show erasure status when listing users in the Admin API * Use USING when joining erased_users * Add changelog entry * Revert "Use USING when joining erased_users" This reverts commit 30bd2bf106415caadcfdbdd1b234ef2b106cc394. * Make the erased check work on postgres * Add a testcase for showing erased user status * Appease the style linter * Explicitly convert `erased` to bool to make SQLite consistent with Postgres This also adds us an easy way in to fix the other accidentally integered columns. * Move erasure status test to UsersListTestCase * Include user erased status when fetching user info via the admin API * Document the erase status in user_admin_api * Appease the linter and mypy * Signpost comments in tests Co-authored-by: Tadeusz Sośnierz Co-authored-by: David Robertson --- changelog.d/14205.feature | 1 + docs/admin_api/user_admin_api.md | 4 ++++ synapse/handlers/admin.py | 1 + synapse/storage/databases/main/__init__.py | 13 +++++++++-- tests/rest/admin/test_user.py | 35 +++++++++++++++++++++++++++++- 5 files changed, 51 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14205.feature diff --git a/changelog.d/14205.feature b/changelog.d/14205.feature new file mode 100644 index 0000000000..6692063352 --- /dev/null +++ b/changelog.d/14205.feature @@ -0,0 +1 @@ +Show erasure status when listing users in the Admin API. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 3625c7b6c5..c95d6c9b05 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -37,6 +37,7 @@ It returns a JSON body like the following: "is_guest": 0, "admin": 0, "deactivated": 0, + "erased": false, "shadow_banned": 0, "creation_ts": 1560432506, "appservice_id": null, @@ -167,6 +168,7 @@ A response body like the following is returned: "admin": 0, "user_type": null, "deactivated": 0, + "erased": false, "shadow_banned": 0, "displayname": "", "avatar_url": null, @@ -177,6 +179,7 @@ A response body like the following is returned: "admin": 1, "user_type": null, "deactivated": 0, + "erased": false, "shadow_banned": 0, "displayname": "", "avatar_url": "", @@ -247,6 +250,7 @@ The following fields are returned in the JSON response body: - `user_type` - string - Type of the user. Normal users are type `None`. This allows user type specific behaviour. There are also types `support` and `bot`. - `deactivated` - bool - Status if that user has been marked as deactivated. + - `erased` - bool - Status if that user has been marked as erased. - `shadow_banned` - bool - Status if that user has been marked as shadow banned. - `displayname` - string - The user's display name if they have set one. - `avatar_url` - string - The user's avatar URL if they have set one. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index f2989cc4a2..5bf8e86387 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -100,6 +100,7 @@ class AdminHandler: user_info_dict["avatar_url"] = profile.avatar_url user_info_dict["threepids"] = threepids user_info_dict["external_ids"] = external_ids + user_info_dict["erased"] = await self.store.is_user_erased(user.to_string()) return user_info_dict diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index a62b4abd4e..cfaedf5e0c 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -201,7 +201,7 @@ class DataStore( name: Optional[str] = None, guests: bool = True, deactivated: bool = False, - order_by: str = UserSortOrder.USER_ID.value, + order_by: str = UserSortOrder.NAME.value, direction: str = "f", approved: bool = True, ) -> Tuple[List[JsonDict], int]: @@ -261,6 +261,7 @@ class DataStore( sql_base = f""" FROM users as u LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ? + LEFT JOIN erased_users AS eu ON u.name = eu.user_id {where_clause} """ sql = "SELECT COUNT(*) as total_users " + sql_base @@ -269,7 +270,8 @@ class DataStore( sql = f""" SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, - displayname, avatar_url, creation_ts * 1000 as creation_ts, approved + displayname, avatar_url, creation_ts * 1000 as creation_ts, approved, + eu.user_id is not null as erased {sql_base} ORDER BY {order_by_column} {order}, u.name ASC LIMIT ? OFFSET ? @@ -277,6 +279,13 @@ class DataStore( args += [limit, start] txn.execute(sql, args) users = self.db_pool.cursor_to_dict(txn) + + # some of those boolean values are returned as integers when we're on SQLite + columns_to_boolify = ["erased"] + for user in users: + for column in columns_to_boolify: + user[column] = bool(user[column]) + return users, count return await self.db_pool.runInteraction( diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 4c1ce33463..63410ffdf1 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -31,7 +31,7 @@ from synapse.api.room_versions import RoomVersions from synapse.rest.client import devices, login, logout, profile, register, room, sync from synapse.rest.media.v1.filepath import MediaFilePaths from synapse.server import HomeServer -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock from tests import unittest @@ -924,6 +924,36 @@ class UsersListTestCase(unittest.HomeserverTestCase): self.assertEqual(1, len(non_admin_user_ids), non_admin_user_ids) self.assertEqual(not_approved_user, non_admin_user_ids[0]) + def test_erasure_status(self) -> None: + # Create a new user. + user_id = self.register_user("eraseme", "eraseme") + + # They should appear in the list users API, marked as not erased. + channel = self.make_request( + "GET", + self.url + "?deactivated=true", + access_token=self.admin_user_tok, + ) + users = {user["name"]: user for user in channel.json_body["users"]} + self.assertIs(users[user_id]["erased"], False) + + # Deactivate that user, requesting erasure. + deactivate_account_handler = self.hs.get_deactivate_account_handler() + self.get_success( + deactivate_account_handler.deactivate_account( + user_id, erase_data=True, requester=create_requester(user_id) + ) + ) + + # Repeat the list users query. They should now be marked as erased. + channel = self.make_request( + "GET", + self.url + "?deactivated=true", + access_token=self.admin_user_tok, + ) + users = {user["name"]: user for user in channel.json_body["users"]} + self.assertIs(users[user_id]["erased"], True) + def _order_test( self, expected_user_list: List[str], @@ -1195,6 +1225,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"]) self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"]) self.assertEqual("User1", channel.json_body["displayname"]) + self.assertFalse(channel.json_body["erased"]) # Deactivate and erase user channel = self.make_request( @@ -1219,6 +1250,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): self.assertEqual(0, len(channel.json_body["threepids"])) self.assertIsNone(channel.json_body["avatar_url"]) self.assertIsNone(channel.json_body["displayname"]) + self.assertTrue(channel.json_body["erased"]) self._is_erased("@user:test", True) @@ -2757,6 +2789,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertIn("avatar_url", content) self.assertIn("admin", content) self.assertIn("deactivated", content) + self.assertIn("erased", content) self.assertIn("shadow_banned", content) self.assertIn("creation_ts", content) self.assertIn("appservice_id", content) -- cgit 1.5.1 From 4dd7aa371b6bc746fa4b0a9af220b2013b17a45d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 21 Oct 2022 09:11:19 -0400 Subject: Properly update the threads table when thread events are redacted. (#14248) When the last event in a thread is redacted we need to update the threads table: * Find the new latest event in the thread and store it into the table; or * Remove the thread from the table if it is no longer a thread (i.e. all events in the thread were redacted). --- changelog.d/14248.bugfix | 1 + synapse/storage/databases/main/events.py | 61 ++++++++++++++--- tests/rest/client/test_relations.py | 110 +++++++++++++++++++++---------- 3 files changed, 129 insertions(+), 43 deletions(-) create mode 100644 changelog.d/14248.bugfix diff --git a/changelog.d/14248.bugfix b/changelog.d/14248.bugfix new file mode 100644 index 0000000000..203c52c16b --- /dev/null +++ b/changelog.d/14248.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 6698cbf664..00880bb37d 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -2028,25 +2028,37 @@ class PersistEventsStore: redacted_event_id: The event that was redacted. """ - # Fetch the current relation of the event being redacted. - redacted_relates_to = self.db_pool.simple_select_one_onecol_txn( + # Fetch the relation of the event being redacted. + row = self.db_pool.simple_select_one_txn( txn, table="event_relations", keyvalues={"event_id": redacted_event_id}, - retcol="relates_to_id", + retcols=("relates_to_id", "relation_type"), allow_none=True, ) + # Nothing to do if no relation is found. + if row is None: + return + + redacted_relates_to = row["relates_to_id"] + rel_type = row["relation_type"] + self.db_pool.simple_delete_txn( + txn, table="event_relations", keyvalues={"event_id": redacted_event_id} + ) + # Any relation information for the related event must be cleared. - if redacted_relates_to is not None: - self.store._invalidate_cache_and_stream( - txn, self.store.get_relations_for_event, (redacted_relates_to,) - ) + self.store._invalidate_cache_and_stream( + txn, self.store.get_relations_for_event, (redacted_relates_to,) + ) + if rel_type == RelationTypes.ANNOTATION: self.store._invalidate_cache_and_stream( txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,) ) + if rel_type == RelationTypes.REPLACE: self.store._invalidate_cache_and_stream( txn, self.store.get_applicable_edit, (redacted_relates_to,) ) + if rel_type == RelationTypes.THREAD: self.store._invalidate_cache_and_stream( txn, self.store.get_thread_summary, (redacted_relates_to,) ) @@ -2057,9 +2069,38 @@ class PersistEventsStore: txn, self.store.get_threads, (room_id,) ) - self.db_pool.simple_delete_txn( - txn, table="event_relations", keyvalues={"event_id": redacted_event_id} - ) + # Find the new latest event in the thread. + sql = """ + SELECT event_id, topological_ordering, stream_ordering + FROM event_relations + INNER JOIN events USING (event_id) + WHERE relates_to_id = ? AND relation_type = ? + ORDER BY topological_ordering DESC, stream_ordering DESC + LIMIT 1 + """ + txn.execute(sql, (redacted_relates_to, RelationTypes.THREAD)) + + # If a latest event is found, update the threads table, this might + # be the same current latest event (if an earlier event in the thread + # was redacted). + latest_event_row = txn.fetchone() + if latest_event_row: + self.db_pool.simple_upsert_txn( + txn, + table="threads", + keyvalues={"room_id": room_id, "thread_id": redacted_relates_to}, + values={ + "latest_event_id": latest_event_row[0], + "topological_ordering": latest_event_row[1], + "stream_ordering": latest_event_row[2], + }, + ) + + # Otherwise, delete the thread: it no longer exists. + else: + self.db_pool.simple_delete_one_txn( + txn, table="threads", keyvalues={"thread_id": redacted_relates_to} + ) def _store_room_topic_txn(self, txn: LoggingTransaction, event: EventBase) -> None: if isinstance(event.content.get("topic"), str): diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index ddf315b894..e3d801f7a8 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1523,6 +1523,26 @@ class RelationRedactionTestCase(BaseRelationsTestCase): ) self.assertEqual(200, channel.code, channel.json_body) + def _get_threads(self) -> List[Tuple[str, str]]: + """Request the threads in the room and returns a list of thread ID and latest event ID.""" + # Request the threads in the room. + channel = self.make_request( + "GET", + f"/_matrix/client/v1/rooms/{self.room}/threads", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + threads = channel.json_body["chunk"] + return [ + ( + t["event_id"], + t["unsigned"]["m.relations"][RelationTypes.THREAD]["latest_event"][ + "event_id" + ], + ) + for t in threads + ] + def test_redact_relation_annotation(self) -> None: """ Test that annotations of an event are properly handled after the @@ -1567,58 +1587,82 @@ class RelationRedactionTestCase(BaseRelationsTestCase): The redacted event should not be included in bundled aggregations or the response to relations. """ - channel = self._send_relation( - RelationTypes.THREAD, - EventTypes.Message, - content={"body": "reply 1", "msgtype": "m.text"}, - ) - unredacted_event_id = channel.json_body["event_id"] + # Create a thread with a few events in it. + thread_replies = [] + for i in range(3): + channel = self._send_relation( + RelationTypes.THREAD, + EventTypes.Message, + content={"body": f"reply {i}", "msgtype": "m.text"}, + ) + thread_replies.append(channel.json_body["event_id"]) - # Note that the *last* event in the thread is redacted, as that gets - # included in the bundled aggregation. - channel = self._send_relation( - RelationTypes.THREAD, - EventTypes.Message, - content={"body": "reply 2", "msgtype": "m.text"}, + ################################################## + # Check the test data is configured as expected. # + ################################################## + self.assertEquals(self._get_related_events(), list(reversed(thread_replies))) + relations = self._get_bundled_aggregations() + self.assertDictContainsSubset( + {"count": 3, "current_user_participated": True}, + relations[RelationTypes.THREAD], + ) + # The latest event is the last sent event. + self.assertEqual( + relations[RelationTypes.THREAD]["latest_event"]["event_id"], + thread_replies[-1], ) - to_redact_event_id = channel.json_body["event_id"] - # Both relations exist. - event_ids = self._get_related_events() + # There should be one thread, the latest event is the event that will be redacted. + self.assertEqual(self._get_threads(), [(self.parent_id, thread_replies[-1])]) + + ########################## + # Redact the last event. # + ########################## + self._redact(thread_replies.pop()) + + # The thread should still exist, but the latest event should be updated. + self.assertEquals(self._get_related_events(), list(reversed(thread_replies))) relations = self._get_bundled_aggregations() - self.assertEquals(event_ids, [to_redact_event_id, unredacted_event_id]) self.assertDictContainsSubset( - { - "count": 2, - "current_user_participated": True, - }, + {"count": 2, "current_user_participated": True}, relations[RelationTypes.THREAD], ) - # And the latest event returned is the event that will be redacted. + # And the latest event is the last unredacted event. self.assertEqual( relations[RelationTypes.THREAD]["latest_event"]["event_id"], - to_redact_event_id, + thread_replies[-1], ) + self.assertEqual(self._get_threads(), [(self.parent_id, thread_replies[-1])]) - # Redact one of the reactions. - self._redact(to_redact_event_id) + ########################################### + # Redact the *first* event in the thread. # + ########################################### + self._redact(thread_replies.pop(0)) - # The unredacted relation should still exist. - event_ids = self._get_related_events() + # Nothing should have changed (except the thread count). + self.assertEquals(self._get_related_events(), thread_replies) relations = self._get_bundled_aggregations() - self.assertEquals(event_ids, [unredacted_event_id]) self.assertDictContainsSubset( - { - "count": 1, - "current_user_participated": True, - }, + {"count": 1, "current_user_participated": True}, relations[RelationTypes.THREAD], ) - # And the latest event is now the unredacted event. + # And the latest event is the last unredacted event. self.assertEqual( relations[RelationTypes.THREAD]["latest_event"]["event_id"], - unredacted_event_id, + thread_replies[-1], ) + self.assertEqual(self._get_threads(), [(self.parent_id, thread_replies[-1])]) + + #################################### + # Redact the last remaining event. # + #################################### + self._redact(thread_replies.pop(0)) + self.assertEquals(thread_replies, []) + + # The event should no longer be considered a thread. + self.assertEquals(self._get_related_events(), []) + self.assertEquals(self._get_bundled_aggregations(), {}) + self.assertEqual(self._get_threads(), []) def test_redact_parent_edit(self) -> None: """Test that edits of an event are redacted when the original event -- cgit 1.5.1 From 5f77b74215b1f1549d776f7ca7a27b907d51a08e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Oct 2022 15:17:19 +0100 Subject: Try building ABI3 wheels for cpython (#14253) --- .ci/scripts/auditwheel_wrapper.py | 132 ++++++++++++++++++++++++++++++++++++++ changelog.d/14253.misc | 1 + pyproject.toml | 9 +++ 3 files changed, 142 insertions(+) create mode 100755 .ci/scripts/auditwheel_wrapper.py create mode 100644 changelog.d/14253.misc diff --git a/.ci/scripts/auditwheel_wrapper.py b/.ci/scripts/auditwheel_wrapper.py new file mode 100755 index 0000000000..c744644c25 --- /dev/null +++ b/.ci/scripts/auditwheel_wrapper.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3 +# compatible wheel, if so rename the wheel before repairing it. + +import argparse +import os +import subprocess +from typing import Optional +from zipfile import ZipFile + +from packaging.tags import Tag +from packaging.utils import parse_wheel_filename +from packaging.version import Version + + +def check_is_abi3_compatible(wheel_file: str) -> None: + """Check the contents of the built wheel for any `.so` files that are *not* + abi3 compatible. + """ + + with ZipFile(wheel_file, "r") as wheel: + for file in wheel.namelist(): + if not file.endswith(".so"): + continue + + if not file.endswith(".abi3.so"): + raise Exception(f"Found non-abi3 lib: {file}") + + +def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str: + """Replaces the cpython wheel file with a ABI3 compatible wheel""" + + if tag.abi == "abi3": + # Nothing to do. + return wheel_file + + check_is_abi3_compatible(wheel_file) + + abi3_tag = Tag(tag.interpreter, "abi3", tag.platform) + + dirname = os.path.dirname(wheel_file) + new_wheel_file = os.path.join( + dirname, + f"{name}-{version}-{abi3_tag}.whl", + ) + + os.rename(wheel_file, new_wheel_file) + + print("Renamed wheel to", new_wheel_file) + + return new_wheel_file + + +def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None: + """Entry point""" + + # Parse the wheel file name into its parts. Note that `parse_wheel_filename` + # normalizes the package name (i.e. it converts matrix_synapse -> + # matrix-synapse), which is not what we want. + _, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file)) + name = os.path.basename(wheel_file).split("-")[0] + + if len(tags) != 1: + # We expect only a wheel file with only a single tag + raise Exception(f"Unexpectedly found multiple tags: {tags}") + + tag = next(iter(tags)) + + if build: + # We don't use build tags in Synapse + raise Exception(f"Unexpected build tag: {build}") + + # If the wheel is for cpython then convert it into an abi3 wheel. + if tag.interpreter.startswith("cp"): + wheel_file = cpython(wheel_file, name, version, tag) + + # Finally, repair the wheel. + if archs is not None: + # If we are given archs then we are on macos and need to use + # `delocate-listdeps`. + subprocess.run(["delocate-listdeps", wheel_file], check=True) + subprocess.run( + ["delocate-wheel" "--require-archs", archs, "-w", dest_dir, wheel_file], + check=True, + ) + else: + subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.") + + parser.add_argument( + "--wheel-dir", + "-w", + metavar="WHEEL_DIR", + help="Directory to store delocated wheels", + required=True, + ) + + parser.add_argument( + "--require-archs", + metavar="archs", + default=None, + ) + + parser.add_argument( + "wheel_file", + metavar="WHEEL_FILE", + ) + + args = parser.parse_args() + + wheel_file = args.wheel_file + wheel_dir = args.wheel_dir + archs = args.require_archs + + main(wheel_file, wheel_dir, archs) diff --git a/changelog.d/14253.misc b/changelog.d/14253.misc new file mode 100644 index 0000000000..c1382ddafa --- /dev/null +++ b/changelog.d/14253.misc @@ -0,0 +1 @@ +Build ABI3 wheels for cpython. diff --git a/pyproject.toml b/pyproject.toml index 6ebac41ed1..78011be490 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -330,3 +330,12 @@ environment= { PATH = "$PATH:$HOME/.cargo/bin" } before-build = "rm -rf {project}/build" build-frontend = "build" test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'" + + +[tool.cibuildwheel.linux] +# Wrap the repair command to correctly rename the built cpython wheels as ABI3. +repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}" + +[tool.cibuildwheel.macos] +# Wrap the repair command to correctly rename the built cpython wheels as ABI3. +repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}" -- cgit 1.5.1 From 1c642156d7aa1b0ed1a0b83a64d86002ec510afa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 Oct 2022 15:22:26 +0100 Subject: Only build aarch64 wheels for cpython manylinux (#14259) --- changelog.d/14259.misc | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14259.misc diff --git a/changelog.d/14259.misc b/changelog.d/14259.misc new file mode 100644 index 0000000000..7425498964 --- /dev/null +++ b/changelog.d/14259.misc @@ -0,0 +1 @@ +Only build aarch64 wheels for cpython manylinux. diff --git a/pyproject.toml b/pyproject.toml index 78011be490..ec0fbd1eb9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -318,7 +318,7 @@ build-backend = "poetry.core.masonry.api" [tool.cibuildwheel] # Skip unsupported platforms (by us or by Rust). -skip = "cp36* *-musllinux_i686" +skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64" # We need a rust compiler before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal" -- cgit 1.5.1 From d24346f53055eae7fb8e9038ef35fa843790742b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 21 Oct 2022 16:03:44 +0100 Subject: Fix logging error on SIGHUP (#14258) --- changelog.d/14258.bugfix | 2 ++ synapse/app/_base.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14258.bugfix diff --git a/changelog.d/14258.bugfix b/changelog.d/14258.bugfix new file mode 100644 index 0000000000..de97945844 --- /dev/null +++ b/changelog.d/14258.bugfix @@ -0,0 +1,2 @@ +Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal, and debug logging was enabled. + diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 000912e86e..a683ebf4cb 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -558,7 +558,7 @@ def reload_cache_config(config: HomeServerConfig) -> None: logger.warning(f) else: logger.debug( - "New cache config. Was:\n %s\nNow:\n", + "New cache config. Was:\n %s\nNow:\n %s", previous_cache_config.__dict__, config.caches.__dict__, ) -- cgit 1.5.1 From 1d45ad8b2ab1c41dd489ccd581d027077bc917e5 Mon Sep 17 00:00:00 2001 From: Germain Date: Fri, 21 Oct 2022 18:44:00 +0100 Subject: Improve aesthetics and reusability of HTML templates. (#13652) Use a base template to create a cohesive feel across the HTML templates provided by Synapse. Adds basic styling to the base template for a more user-friendly look and feel. --- changelog.d/13652.feature | 1 + synapse/res/templates/_base.html | 29 ++ .../res/templates/account_previously_renewed.html | 18 +- synapse/res/templates/account_renewed.html | 18 +- synapse/res/templates/add_threepid.html | 22 +- synapse/res/templates/add_threepid_failure.html | 20 +- synapse/res/templates/add_threepid_success.html | 18 +- synapse/res/templates/auth_success.html | 28 +- synapse/res/templates/invalid_token.html | 17 +- synapse/res/templates/notice_expiry.html | 93 +++--- synapse/res/templates/notif_mail.html | 116 ++++--- synapse/res/templates/password_reset.html | 19 +- .../res/templates/password_reset_confirmation.html | 14 +- synapse/res/templates/password_reset_failure.html | 14 +- synapse/res/templates/password_reset_success.html | 12 +- synapse/res/templates/recaptcha.html | 19 +- synapse/res/templates/registration.html | 21 +- synapse/res/templates/registration_failure.html | 12 +- synapse/res/templates/registration_success.html | 13 +- synapse/res/templates/registration_token.html | 16 +- synapse/res/templates/sso_account_deactivated.html | 49 ++- .../res/templates/sso_auth_account_details.html | 372 ++++++++++----------- synapse/res/templates/sso_auth_bad_user.html | 52 ++- synapse/res/templates/sso_auth_confirm.html | 56 ++-- synapse/res/templates/sso_auth_success.html | 54 ++- synapse/res/templates/sso_error.html | 34 +- synapse/res/templates/sso_login_idp_picker.html | 114 +++---- synapse/res/templates/sso_new_user_consent.html | 60 ++-- synapse/res/templates/sso_redirect_confirm.html | 75 ++--- synapse/res/templates/style.css | 29 ++ synapse/res/templates/terms.html | 16 +- 31 files changed, 691 insertions(+), 740 deletions(-) create mode 100644 changelog.d/13652.feature create mode 100644 synapse/res/templates/_base.html create mode 100644 synapse/res/templates/style.css diff --git a/changelog.d/13652.feature b/changelog.d/13652.feature new file mode 100644 index 0000000000..bc7f2926dc --- /dev/null +++ b/changelog.d/13652.feature @@ -0,0 +1 @@ +Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. \ No newline at end of file diff --git a/synapse/res/templates/_base.html b/synapse/res/templates/_base.html new file mode 100644 index 0000000000..46439fce6a --- /dev/null +++ b/synapse/res/templates/_base.html @@ -0,0 +1,29 @@ + + + + + + + {% block title %}{% endblock %} + + {% block header %}{% endblock %} + + +
+ {% if app_name == "Riot" %} + [Riot] + {% elif app_name == "Vector" %} + [Vector] + {% elif app_name == "Element" %} + [Element] + {% else %} + [matrix] + {% endif %} +
+ +{% block body %}{% endblock %} + + + diff --git a/synapse/res/templates/account_previously_renewed.html b/synapse/res/templates/account_previously_renewed.html index bd4f7cea97..91582a8af0 100644 --- a/synapse/res/templates/account_previously_renewed.html +++ b/synapse/res/templates/account_previously_renewed.html @@ -1,12 +1,6 @@ - - - - - - - Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. - - - Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. - - \ No newline at end of file +{% extends "_base.html" %} +{% block title %}Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.{% endblock %} + +{% block body %} +

Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.

+{% endblock %} diff --git a/synapse/res/templates/account_renewed.html b/synapse/res/templates/account_renewed.html index 57b319f375..18a57833f1 100644 --- a/synapse/res/templates/account_renewed.html +++ b/synapse/res/templates/account_renewed.html @@ -1,12 +1,6 @@ - - - - - - - Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. - - - Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. - - \ No newline at end of file +{% extends "_base.html" %} +{% block title %}Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.{% endblock %} + +{% block body %} +

Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.

+{% endblock %} diff --git a/synapse/res/templates/add_threepid.html b/synapse/res/templates/add_threepid.html index 71f2215b7a..33c883936a 100644 --- a/synapse/res/templates/add_threepid.html +++ b/synapse/res/templates/add_threepid.html @@ -1,14 +1,8 @@ - - - - - - - Request to add an email address to your Matrix account - - -

A request to add an email address to your Matrix account has been received. If this was you, please click the link below to confirm adding this email:

- {{ link }} -

If this was not you, you can safely ignore this email. Thank you.

- - +{% extends "_base.html" %} +{% block title %}Request to add an email address to your Matrix account{% endblock %} + +{% block body %} +

A request to add an email address to your Matrix account has been received. If this was you, please click the link below to confirm adding this email:

+{{ link }} +

If this was not you, you can safely ignore this email. Thank you.

+{% endblock %} diff --git a/synapse/res/templates/add_threepid_failure.html b/synapse/res/templates/add_threepid_failure.html index bd627ee9ce..f6d7e33825 100644 --- a/synapse/res/templates/add_threepid_failure.html +++ b/synapse/res/templates/add_threepid_failure.html @@ -1,13 +1,7 @@ - - - - - - - Request failed - - -

The request failed for the following reason: {{ failure_reason }}.

-

No changes have been made to your account.

- - +{% extends "_base.html" %} +{% block title %}Request failed{% endblock %} + +{% block body %} +

The request failed for the following reason: {{ failure_reason }}.

+

No changes have been made to your account.

+{% endblock %} diff --git a/synapse/res/templates/add_threepid_success.html b/synapse/res/templates/add_threepid_success.html index 49170c138e..6d45111796 100644 --- a/synapse/res/templates/add_threepid_success.html +++ b/synapse/res/templates/add_threepid_success.html @@ -1,12 +1,6 @@ - - - - - - - Your email has now been validated - - -

Your email has now been validated, please return to your client. You may now close this window.

- - \ No newline at end of file +{% extends "_base.html" %} +{% block title %}Your email has now been validated{% endblock %} + +{% block body %} +

Your email has now been validated, please return to your client. You may now close this window.

+{% endblock %} diff --git a/synapse/res/templates/auth_success.html b/synapse/res/templates/auth_success.html index 2d6ac44a0e..9178332f59 100644 --- a/synapse/res/templates/auth_success.html +++ b/synapse/res/templates/auth_success.html @@ -1,21 +1,21 @@ - - -Success! - - +{% extends "_base.html" %} +{% block title %}Success!{% endblock %} + +{% block header %} - - -
-

Thank you

-

You may now close this window and return to the application

-
- - +{% endblock %} + +{% block body %} +
+

Thank you

+

You may now close this window and return to the application

+
+ +{% endblock %} diff --git a/synapse/res/templates/invalid_token.html b/synapse/res/templates/invalid_token.html index 2c7c384fe3..d0b1dae669 100644 --- a/synapse/res/templates/invalid_token.html +++ b/synapse/res/templates/invalid_token.html @@ -1,12 +1,5 @@ - - - - - - - Invalid renewal token. - - - Invalid renewal token. - - +{% block title %}Invalid renewal token.{% endblock %} + +{% block body %} +

Invalid renewal token.

+{% endblock %} diff --git a/synapse/res/templates/notice_expiry.html b/synapse/res/templates/notice_expiry.html index 865f9f7ada..406397aaca 100644 --- a/synapse/res/templates/notice_expiry.html +++ b/synapse/res/templates/notice_expiry.html @@ -1,47 +1,46 @@ - - - - - - - - - - - - - - -
- - - - - - - - -
-
Hi {{ display_name }},
-
-
Your account will expire on {{ expiration_ts|format_ts("%d-%m-%Y") }}. This means that you will lose access to your account after this date.
-
To extend the validity of your account, please click on the link below (or copy and paste it into a new browser tab):
- -
-
- - +{% extends "_base.html" %} +{% block title %}Notice of expiry{% endblock %} + +{% block header %} + +{% endblock %} + +{% block body %} + + + + + + +
+ + + + + + + + +
+
Hi {{ display_name }},
+
+
Your account will expire on {{ expiration_ts|format_ts("%d-%m-%Y") }}. This means that you will lose access to your account after this date.
+
To extend the validity of your account, please click on the link below (or copy and paste it into a new browser tab):
+ +
+
+{% endblock %} diff --git a/synapse/res/templates/notif_mail.html b/synapse/res/templates/notif_mail.html index 9dba0c0253..939d40315f 100644 --- a/synapse/res/templates/notif_mail.html +++ b/synapse/res/templates/notif_mail.html @@ -1,59 +1,57 @@ - - - - - - - - - - - - - - -
- - - - - -
-
Hi {{ user_display_name }},
-
{{ summary_text }}
-
- {%- for room in rooms %} - {%- include 'room.html' with context %} - {%- endfor %} - -
- - +{% block title %}New activity in room{% endblock %} + +{% block header %} + +{% endblock %} + +{% block body %} + + + + + + +
+ + + + + +
+
Hi {{ user_display_name }},
+
{{ summary_text }}
+
+ {%- for room in rooms %} + {%- include 'room.html' with context %} + {%- endfor %} + +
+{% endblock %} diff --git a/synapse/res/templates/password_reset.html b/synapse/res/templates/password_reset.html index a8bdce357b..de5a9ec68f 100644 --- a/synapse/res/templates/password_reset.html +++ b/synapse/res/templates/password_reset.html @@ -1,14 +1,9 @@ - - - Password reset - - - - -

A password reset request has been received for your Matrix account. If this was you, please click the link below to confirm resetting your password:

+{% block title %}Password reset{% endblock %} - {{ link }} +{% block body %} +

A password reset request has been received for your Matrix account. If this was you, please click the link below to confirm resetting your password:

-

If this was not you, do not click the link above and instead contact your server administrator. Thank you.

- - +{{ link }} + +

If this was not you, do not click the link above and instead contact your server administrator. Thank you.

+{% endblock %} diff --git a/synapse/res/templates/password_reset_confirmation.html b/synapse/res/templates/password_reset_confirmation.html index 2e3fd2ec1e..0eac64b6a8 100644 --- a/synapse/res/templates/password_reset_confirmation.html +++ b/synapse/res/templates/password_reset_confirmation.html @@ -1,10 +1,6 @@ - - - Password reset confirmation - - - - +{% block title %}Password reset confirmation{% endblock %} + +{% block body %}
@@ -15,6 +11,4 @@ If you did not mean to do this, please close this page and your password will not be changed.

- - - +{% endblock %} diff --git a/synapse/res/templates/password_reset_failure.html b/synapse/res/templates/password_reset_failure.html index 2d59c463f0..977babdb40 100644 --- a/synapse/res/templates/password_reset_failure.html +++ b/synapse/res/templates/password_reset_failure.html @@ -1,12 +1,6 @@ - - - Password reset failure - - - - -

The request failed for the following reason: {{ failure_reason }}.

+{% block title %}Password reset failure{% endblock %} +{% block body %} +

The request failed for the following reason: {{ failure_reason }}.

Your password has not been reset.

- - +{% endblock %} diff --git a/synapse/res/templates/password_reset_success.html b/synapse/res/templates/password_reset_success.html index 5165bd1fa2..0e99fad7ff 100644 --- a/synapse/res/templates/password_reset_success.html +++ b/synapse/res/templates/password_reset_success.html @@ -1,9 +1,5 @@ - - - - - - +{% block title %}Password reset success{% endblock %} + +{% block body %}

Your email has now been validated, please return to your client to reset your password. You may now close this window.

- - +{% endblock %} diff --git a/synapse/res/templates/recaptcha.html b/synapse/res/templates/recaptcha.html index 615d3239c6..feaf3f6aed 100644 --- a/synapse/res/templates/recaptcha.html +++ b/synapse/res/templates/recaptcha.html @@ -1,10 +1,7 @@ - - -Authentication - - - +{% block title %}Authentication{% endblock %} + +{% block header %} + - - +{% endblock %} + +{% block body %}
{% if error is defined %} @@ -37,5 +35,4 @@ function captchaDone() {
- - +{% endblock %} \ No newline at end of file diff --git a/synapse/res/templates/registration.html b/synapse/res/templates/registration.html index 20e831ff4a..189960a832 100644 --- a/synapse/res/templates/registration.html +++ b/synapse/res/templates/registration.html @@ -1,16 +1,11 @@ - - - Registration - - - - -

You have asked us to register this email with a new Matrix account. If this was you, please click the link below to confirm your email address:

+{% block title %}Registration{% endblock %} - Verify Your Email Address +{% block body %} +

You have asked us to register this email with a new Matrix account. If this was you, please click the link below to confirm your email address:

-

If this was not you, you can safely disregard this email.

+Verify Your Email Address -

Thank you.

- - +

If this was not you, you can safely disregard this email.

+ +

Thank you.

+{% endblock %} diff --git a/synapse/res/templates/registration_failure.html b/synapse/res/templates/registration_failure.html index a6ed22bc90..3debe9301d 100644 --- a/synapse/res/templates/registration_failure.html +++ b/synapse/res/templates/registration_failure.html @@ -1,9 +1,5 @@ - - - - - - +{% block title %}Registration failure{% endblock %} + +{% block body %}

Validation failed for the following reason: {{ failure_reason }}.

- - +{% endblock %} diff --git a/synapse/res/templates/registration_success.html b/synapse/res/templates/registration_success.html index d51d5549d8..e2dd020a9e 100644 --- a/synapse/res/templates/registration_success.html +++ b/synapse/res/templates/registration_success.html @@ -1,10 +1,5 @@ - - - Your email has now been validated - - - - +{% block title %}Your email has now been validated{% endblock %} + +{% block body %}

Your email has now been validated, please return to your client. You may now close this window.

- - +{% endblock %} diff --git a/synapse/res/templates/registration_token.html b/synapse/res/templates/registration_token.html index 59a98f564c..2ee5866ba5 100644 --- a/synapse/res/templates/registration_token.html +++ b/synapse/res/templates/registration_token.html @@ -1,11 +1,10 @@ - - -Authentication - - +{% block title %}Authentication{% endblock %} + +{% block header %} - - +{% endblock %} + +{% block body %}
{% if error is defined %} @@ -19,5 +18,4 @@
- - +{% endblock %} diff --git a/synapse/res/templates/sso_account_deactivated.html b/synapse/res/templates/sso_account_deactivated.html index 075f801cec..c634229840 100644 --- a/synapse/res/templates/sso_account_deactivated.html +++ b/synapse/res/templates/sso_account_deactivated.html @@ -1,25 +1,24 @@ - - - - - SSO account deactivated - - - - -
-

Your account has been deactivated

-

- No account found -

-

- Your account might have been deactivated by the server administrator. - You can either try to create a new account or contact the server’s - administrator. -

-
- {% include "sso_footer.html" without context %} - - +{% block title %}SSO account deactivated{% endblock %} + +{% block header %} + +{% endblock %} + +{% block body %} +
+
+

Your account has been deactivated

+

+ No account found +

+

+ Your account might have been deactivated by the server administrator. + You can either try to create a new account or contact the server’s + administrator. +

+
+
+{% include "sso_footer.html" without context %} +{% endblock %} diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index 2d1db386e1..b516333373 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -1,189 +1,185 @@ - - - - Create your account - - - - - - - -
-

Create your account

-

This is required. Continue to create your account on {{ server_name }}. You can't change this later.

-
-
-
-
- -
@
- -
:{{ server_name }}
+{% block title %}Create your account{% endblock %} + +{% block header %} + + +{% endblock %} + +{% block body %} +
+

Create your account

+

This is required. Continue to create your account on {{ server_name }}. You can't change this later.

+
+
+ +
+ +
@
+ +
:{{ server_name }}
+
+ + + {% if user_attributes.avatar_url or user_attributes.display_name or user_attributes.emails %} +
+

{% if idp.idp_icon %}{% endif %}Optional data from {{ idp.idp_name }}

+ {% if user_attributes.avatar_url %} +
- {% include "sso_footer.html" without context %} - - - + + + {% endif %} + {% if user_attributes.display_name %} + + {% endif %} + {% for email in user_attributes.emails %} + + {% endfor %} + + {% endif %} + +
+{% include "sso_footer.html" without context %} + +{% endblock %} diff --git a/synapse/res/templates/sso_auth_bad_user.html b/synapse/res/templates/sso_auth_bad_user.html index 94403fc3ce..69fdcc9ef0 100644 --- a/synapse/res/templates/sso_auth_bad_user.html +++ b/synapse/res/templates/sso_auth_bad_user.html @@ -1,27 +1,25 @@ - - - - - Authentication failed - - - - - -
-

That doesn't look right

-

- We were unable to validate your {{ server_name }} account - via single sign‑on (SSO), because the SSO Identity - Provider returned different details than when you logged in. -

-

- Try the operation again, and ensure that you use the same details on - the Identity Provider as when you log into your account. -

-
- {% include "sso_footer.html" without context %} - - +{% block title %}Authentication failed{% endblock %} + +{% block header %} + +{% endblock %} + +{% block body %} +
+
+

That doesn't look right

+

+ We were unable to validate your {{ server_name }} account + via single sign‑on (SSO), because the SSO Identity + Provider returned different details than when you logged in. +

+

+ Try the operation again, and ensure that you use the same details on + the Identity Provider as when you log into your account. +

+
+
+{% include "sso_footer.html" without context %} +{% endblock %} diff --git a/synapse/res/templates/sso_auth_confirm.html b/synapse/res/templates/sso_auth_confirm.html index aa1c974a6b..2d106e0ae4 100644 --- a/synapse/res/templates/sso_auth_confirm.html +++ b/synapse/res/templates/sso_auth_confirm.html @@ -1,30 +1,26 @@ - - - - - Confirm it's you - - - - - -
-

Confirm it's you to continue

-

- A client is trying to {{ description }}. To confirm this action - re-authorize your account with single sign-on. -

-

- If you did not expect this, your account may be compromised. -

-
-
- - Continue with {{ idp.idp_name }} - -
- {% include "sso_footer.html" without context %} - - +{% block title %}Confirm it's you{% endblock %} + +{% block header %} + +{% endblock %} + +{% block body %} +
+

Confirm it's you to continue

+

+ A client is trying to {{ description }}. To confirm this action + re-authorize your account with single sign-on. +

+

+ If you did not expect this, your account may be compromised. +

+
+
+ + Continue with {{ idp.idp_name }} + +
+{% include "sso_footer.html" without context %} +{% endblock %} diff --git a/synapse/res/templates/sso_auth_success.html b/synapse/res/templates/sso_auth_success.html index 4898af6011..56150eaefe 100644 --- a/synapse/res/templates/sso_auth_success.html +++ b/synapse/res/templates/sso_auth_success.html @@ -1,29 +1,25 @@ - - - - - Authentication successful - - - - - - -
-

Thank you

-

- Now we know it’s you, you can close this window and return to the - application. -

-
- {% include "sso_footer.html" without context %} - - +{% block title %}Authentication successful{% endblock %} + +{% block header %} + + +{% endblock %} + +{% block body %} +
+

Thank you

+

+ Now we know it’s you, you can close this window and return to the + application. +

+
+{% include "sso_footer.html" without context %} +{% endblock %} diff --git a/synapse/res/templates/sso_error.html b/synapse/res/templates/sso_error.html index 19992ff2ad..e394a92623 100644 --- a/synapse/res/templates/sso_error.html +++ b/synapse/res/templates/sso_error.html @@ -1,19 +1,19 @@ - - - - - Authentication failed - - - - - +{% block header %} +{% if error == "unauthorised" %} + +{% endif %} +{% endblock %} + +{% block body %} +
{# If an error of unauthorised is returned it means we have actively rejected their login #} {% if error == "unauthorised" %}
@@ -66,5 +66,5 @@ } {% endif %} - - +
+{% endblock %} diff --git a/synapse/res/templates/sso_login_idp_picker.html b/synapse/res/templates/sso_login_idp_picker.html index 56fabfa3d2..a2772ca9ef 100644 --- a/synapse/res/templates/sso_login_idp_picker.html +++ b/synapse/res/templates/sso_login_idp_picker.html @@ -1,63 +1,59 @@ - - - - - - - Choose identity provider - - - -
-

Log in to {{ server_name }}

-

Choose an identity provider to log in

-
-
- -
- {% include "sso_footer.html" without context %} - - + .providers a { + display: block; + border-radius: 4px; + border: 1px solid #17191C; + padding: 8px; + text-align: center; + text-decoration: none; + color: #17191C; + display: flex; + align-items: center; + font-weight: bold; + } + + .providers a img { + width: 24px; + height: 24px; + } + .providers a span { + flex: 1; + } + +{% endblock %} + +{% block body %} +
+

Log in to {{ server_name }}

+

Choose an identity provider to log in

+
+
+ +
+{% include "sso_footer.html" without context %} +{% endblock %} diff --git a/synapse/res/templates/sso_new_user_consent.html b/synapse/res/templates/sso_new_user_consent.html index 523f64c4fc..126887d26c 100644 --- a/synapse/res/templates/sso_new_user_consent.html +++ b/synapse/res/templates/sso_new_user_consent.html @@ -1,33 +1,29 @@ - - - - - Agree to terms and conditions - - - - - -
-

Your account is nearly ready

-

Agree to the terms to create your account.

-
-
- {% include "sso_partial_profile.html" %} - -
- {% include "sso_footer.html" without context %} - - +{% block header %} + +{% endblock %} + +{% block body %} +
+

Your account is nearly ready

+

Agree to the terms to create your account.

+
+
+ {% include "sso_partial_profile.html" %} + +
+{% include "sso_footer.html" without context %} +{% endblock %} diff --git a/synapse/res/templates/sso_redirect_confirm.html b/synapse/res/templates/sso_redirect_confirm.html index 1049a9bd92..887ee0d294 100644 --- a/synapse/res/templates/sso_redirect_confirm.html +++ b/synapse/res/templates/sso_redirect_confirm.html @@ -1,41 +1,38 @@ - - - - - Continue to your account - - - - - -
-

Continue to your account

-
-
- {% include "sso_partial_profile.html" %} -

Continuing will grant {{ display_url }} access to your account.

- Continue -
- {% include "sso_footer.html" without context %} - - + .confirm-trust { + margin: 34px 0; + color: #8D99A5; + } + .confirm-trust strong { + color: #17191C; + } + + .confirm-trust::before { + content: ""; + background-image: url('data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTgiIGhlaWdodD0iMTgiIHZpZXdCb3g9IjAgMCAxOCAxOCIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KPHBhdGggZmlsbC1ydWxlPSJldmVub2RkIiBjbGlwLXJ1bGU9ImV2ZW5vZGQiIGQ9Ik0xNi41IDlDMTYuNSAxMy4xNDIxIDEzLjE0MjEgMTYuNSA5IDE2LjVDNC44NTc4NiAxNi41IDEuNSAxMy4xNDIxIDEuNSA5QzEuNSA0Ljg1Nzg2IDQuODU3ODYgMS41IDkgMS41QzEzLjE0MjEgMS41IDE2LjUgNC44NTc4NiAxNi41IDlaTTcuMjUgOUM3LjI1IDkuNDY1OTYgNy41Njg2OSA5Ljg1NzQ4IDggOS45Njg1VjEyLjM3NUM4IDEyLjkyNzMgOC40NDc3MiAxMy4zNzUgOSAxMy4zNzVIMTAuMTI1QzEwLjY3NzMgMTMuMzc1IDExLjEyNSAxMi45MjczIDExLjEyNSAxMi4zNzVDMTEuMTI1IDExLjgyMjcgMTAuNjc3MyAxMS4zNzUgMTAuMTI1IDExLjM3NUgxMFY5QzEwIDguOTY1NDggOS45OTgyNSA4LjkzMTM3IDkuOTk0ODQgOC44OTc3NkM5Ljk0MzYzIDguMzkzNSA5LjUxNzc3IDggOSA4SDguMjVDNy42OTc3MiA4IDcuMjUgOC40NDc3MiA3LjI1IDlaTTkgNy41QzkuNjIxMzIgNy41IDEwLjEyNSA2Ljk5NjMyIDEwLjEyNSA2LjM3NUMxMC4xMjUgNS43NTM2OCA5LjYyMTMyIDUuMjUgOSA1LjI1QzguMzc4NjggNS4yNSA3Ljg3NSA1Ljc1MzY4IDcuODc1IDYuMzc1QzcuODc1IDYuOTk2MzIgOC4zNzg2OCA3LjUgOSA3LjVaIiBmaWxsPSIjQzFDNkNEIi8+Cjwvc3ZnPgoK'); + background-repeat: no-repeat; + width: 24px; + height: 24px; + display: block; + float: left; + } + +{% endblock %} + +{% block body %} +
+

Continue to your account

+
+
+ {% include "sso_partial_profile.html" %} +

Continuing will grant {{ display_url }} access to your account.

+ Continue +
+{% include "sso_footer.html" without context %} + +{% endblock %} diff --git a/synapse/res/templates/style.css b/synapse/res/templates/style.css new file mode 100644 index 0000000000..097b235ae5 --- /dev/null +++ b/synapse/res/templates/style.css @@ -0,0 +1,29 @@ +html { + height: 100%; +} + +body { + background: #f9fafb; + max-width: 680px; + margin: auto; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; +} + +.mx_Header { + border-bottom: 3px solid #ddd; + margin-bottom: 1rem; + padding-top: 1rem; + padding-bottom: 1rem; + text-align: center; +} + +@media screen and (max-width: 1120px) { + body { + font-size: 20px; + } + + h1 { font-size: 1rem; } + h2 { font-size: .9rem; } + h3 { font-size: .85rem; } + h4 { font-size: .8rem; } +} diff --git a/synapse/res/templates/terms.html b/synapse/res/templates/terms.html index 2081d990ab..977c3d0bc7 100644 --- a/synapse/res/templates/terms.html +++ b/synapse/res/templates/terms.html @@ -1,11 +1,10 @@ - - -Authentication - - +{% block title %}Authentication{% endblock %} + +{% block header %} - - +{% endblock %} + +{% block body %}
{% if error is defined %} @@ -19,5 +18,4 @@
- - +{% endblock %} -- cgit 1.5.1 From b7a7ff6ee39da4981dcfdce61bf8ac4735e3d047 Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 21 Oct 2022 10:46:22 -0700 Subject: Add initial power level event to batch of bulk persisted events when creating a new room. (#14228) --- changelog.d/14228.misc | 1 + synapse/handlers/federation.py | 4 +- synapse/handlers/federation_event.py | 4 +- synapse/handlers/message.py | 14 ++---- synapse/handlers/room.py | 39 ++++----------- synapse/push/bulk_push_rule_evaluator.py | 74 ++++++++++++++++++++++++----- tests/push/test_bulk_push_rule_evaluator.py | 2 +- tests/replication/_base.py | 2 +- 8 files changed, 82 insertions(+), 58 deletions(-) create mode 100644 changelog.d/14228.misc diff --git a/changelog.d/14228.misc b/changelog.d/14228.misc new file mode 100644 index 0000000000..14fe31a8bc --- /dev/null +++ b/changelog.d/14228.misc @@ -0,0 +1 @@ +Add initial power level event to batch of bulk persisted events when creating a new room. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 275a37a575..4fbc79a6cb 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1017,7 +1017,9 @@ class FederationHandler: context = EventContext.for_outlier(self._storage_controllers) - await self._bulk_push_rule_evaluator.action_for_event_by_user(event, context) + await self._bulk_push_rule_evaluator.action_for_events_by_user( + [(event, context)] + ) try: await self._federation_event_handler.persist_events_and_notify( event.room_id, [(event, context)] diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 06e41b5cc0..7da6316a82 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -2171,8 +2171,8 @@ class FederationEventHandler: min_depth, ) else: - await self._bulk_push_rule_evaluator.action_for_event_by_user( - event, context + await self._bulk_push_rule_evaluator.action_for_events_by_user( + [(event, context)] ) try: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 15b828dd74..468900a07f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1433,17 +1433,9 @@ class EventCreationHandler: a room that has been un-partial stated. """ - for event, context in events_and_context: - # Skip push notification actions for historical messages - # because we don't want to notify people about old history back in time. - # The historical messages also do not have the proper `context.current_state_ids` - # and `state_groups` because they have `prev_events` that aren't persisted yet - # (historical messages persisted in reverse-chronological order). - if not event.internal_metadata.is_historical(): - with opentracing.start_active_span("calculate_push_actions"): - await self._bulk_push_rule_evaluator.action_for_event_by_user( - event, context - ) + await self._bulk_push_rule_evaluator.action_for_events_by_user( + events_and_context + ) try: # If we're a worker we need to hit out to the master. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 638f54051a..cc1e5c8f97 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1055,9 +1055,6 @@ class RoomCreationHandler: event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""} depth = 1 - # the last event sent/persisted to the db - last_sent_event_id: Optional[str] = None - # the most recently created event prev_event: List[str] = [] # a map of event types, state keys -> event_ids. We collect these mappings this as events are @@ -1102,26 +1099,6 @@ class RoomCreationHandler: return new_event, new_context - async def send( - event: EventBase, - context: synapse.events.snapshot.EventContext, - creator: Requester, - ) -> int: - nonlocal last_sent_event_id - - ev = await self.event_creation_handler.handle_new_client_event( - requester=creator, - events_and_context=[(event, context)], - ratelimit=False, - ignore_shadow_ban=True, - ) - - last_sent_event_id = ev.event_id - - # we know it was persisted, so must have a stream ordering - assert ev.internal_metadata.stream_ordering - return ev.internal_metadata.stream_ordering - try: config = self._presets_dict[preset_config] except KeyError: @@ -1135,10 +1112,14 @@ class RoomCreationHandler: ) logger.debug("Sending %s in new room", EventTypes.Member) - await send(creation_event, creation_context, creator) + ev = await self.event_creation_handler.handle_new_client_event( + requester=creator, + events_and_context=[(creation_event, creation_context)], + ratelimit=False, + ignore_shadow_ban=True, + ) + last_sent_event_id = ev.event_id - # Room create event must exist at this point - assert last_sent_event_id is not None member_event_id, _ = await self.room_member_handler.update_membership( creator, creator.user, @@ -1157,6 +1138,7 @@ class RoomCreationHandler: depth += 1 state_map[(EventTypes.Member, creator.user.to_string())] = member_event_id + events_to_send = [] # We treat the power levels override specially as this needs to be one # of the first events that get sent into a room. pl_content = initial_state.pop((EventTypes.PowerLevels, ""), None) @@ -1165,7 +1147,7 @@ class RoomCreationHandler: EventTypes.PowerLevels, pl_content, False ) current_state_group = power_context._state_group - await send(power_event, power_context, creator) + events_to_send.append((power_event, power_context)) else: power_level_content: JsonDict = { "users": {creator_id: 100}, @@ -1214,9 +1196,8 @@ class RoomCreationHandler: False, ) current_state_group = pl_context._state_group - await send(pl_event, pl_context, creator) + events_to_send.append((pl_event, pl_context)) - events_to_send = [] if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state: room_alias_event, room_alias_context = await create_event( EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index a75386f6a0..d7795a9080 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -165,8 +165,21 @@ class BulkPushRuleEvaluator: return rules_by_user async def _get_power_levels_and_sender_level( - self, event: EventBase, context: EventContext + self, + event: EventBase, + context: EventContext, + event_id_to_event: Mapping[str, EventBase], ) -> Tuple[dict, Optional[int]]: + """ + Given an event and an event context, get the power level event relevant to the event + and the power level of the sender of the event. + Args: + event: event to check + context: context of event to check + event_id_to_event: a mapping of event_id to event for a set of events being + batch persisted. This is needed as the sought-after power level event may + be in this batch rather than the DB + """ # There are no power levels and sender levels possible to get from outlier if event.internal_metadata.is_outlier(): return {}, None @@ -177,15 +190,26 @@ class BulkPushRuleEvaluator: ) pl_event_id = prev_state_ids.get(POWER_KEY) + # fastpath: if there's a power level event, that's all we need, and + # not having a power level event is an extreme edge case if pl_event_id: - # fastpath: if there's a power level event, that's all we need, and - # not having a power level event is an extreme edge case - auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)} + # Get the power level event from the batch, or fall back to the database. + pl_event = event_id_to_event.get(pl_event_id) + if pl_event: + auth_events = {POWER_KEY: pl_event} + else: + auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)} else: auth_events_ids = self._event_auth_handler.compute_auth_events( event, prev_state_ids, for_verification=False ) auth_events_dict = await self.store.get_events(auth_events_ids) + # Some needed auth events might be in the batch, combine them with those + # fetched from the database. + for auth_event_id in auth_events_ids: + auth_event = event_id_to_event.get(auth_event_id) + if auth_event: + auth_events_dict[auth_event_id] = auth_event auth_events = {(e.type, e.state_key): e for e in auth_events_dict.values()} sender_level = get_user_power_level(event.sender, auth_events) @@ -194,16 +218,38 @@ class BulkPushRuleEvaluator: return pl_event.content if pl_event else {}, sender_level - @measure_func("action_for_event_by_user") - async def action_for_event_by_user( - self, event: EventBase, context: EventContext + async def action_for_events_by_user( + self, events_and_context: List[Tuple[EventBase, EventContext]] ) -> None: - """Given an event and context, evaluate the push rules, check if the message - should increment the unread count, and insert the results into the - event_push_actions_staging table. + """Given a list of events and their associated contexts, evaluate the push rules + for each event, check if the message should increment the unread count, and + insert the results into the event_push_actions_staging table. """ - if not event.internal_metadata.is_notifiable(): - # Push rules for events that aren't notifiable can't be processed by this + # For batched events the power level events may not have been persisted yet, + # so we pass in the batched events. Thus if the event cannot be found in the + # database we can check in the batch. + event_id_to_event = {e.event_id: e for e, _ in events_and_context} + for event, context in events_and_context: + await self._action_for_event_by_user(event, context, event_id_to_event) + + @measure_func("action_for_event_by_user") + async def _action_for_event_by_user( + self, + event: EventBase, + context: EventContext, + event_id_to_event: Mapping[str, EventBase], + ) -> None: + + if ( + not event.internal_metadata.is_notifiable() + or event.internal_metadata.is_historical() + ): + # Push rules for events that aren't notifiable can't be processed by this and + # we want to skip push notification actions for historical messages + # because we don't want to notify people about old history back in time. + # The historical messages also do not have the proper `context.current_state_ids` + # and `state_groups` because they have `prev_events` that aren't persisted yet + # (historical messages persisted in reverse-chronological order). return # Disable counting as unread unless the experimental configuration is @@ -223,7 +269,9 @@ class BulkPushRuleEvaluator: ( power_levels, sender_power_level, - ) = await self._get_power_levels_and_sender_level(event, context) + ) = await self._get_power_levels_and_sender_level( + event, context, event_id_to_event + ) # Find the event's thread ID. relation = relation_from_event(event) diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 675d7df2ac..594e7937a8 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -71,4 +71,4 @@ class TestBulkPushRuleEvaluator(unittest.HomeserverTestCase): bulk_evaluator = BulkPushRuleEvaluator(self.hs) # should not raise - self.get_success(bulk_evaluator.action_for_event_by_user(event, context)) + self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)])) diff --git a/tests/replication/_base.py b/tests/replication/_base.py index ce53f808db..121f3d8d65 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -371,7 +371,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): config=worker_hs.config.server.listeners[0], resource=resource, server_version_string="1", - max_request_body_size=4096, + max_request_body_size=8192, reactor=self.reactor, ) -- cgit 1.5.1 From 278b530875ff11ecdb0a7242a7a7bda457bca69e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Oct 2022 10:19:55 +0100 Subject: Bump serde from 1.0.145 to 1.0.147 (#14277) --- Cargo.lock | 8 ++++---- changelog.d/14277.misc | 1 + rust/Cargo.toml | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/14277.misc diff --git a/Cargo.lock b/Cargo.lock index b3090ad726..c1d29c3cac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.145" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" +checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.145" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" +checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" dependencies = [ "proc-macro2", "quote", diff --git a/changelog.d/14277.misc b/changelog.d/14277.misc new file mode 100644 index 0000000000..0eebf3f961 --- /dev/null +++ b/changelog.d/14277.misc @@ -0,0 +1 @@ +Bump serde from 1.0.145 to 1.0.147. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index cffaa5b51b..2cac123102 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -27,7 +27,7 @@ pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", pyo3-log = "0.7.0" pythonize = "0.17.0" regex = "1.6.0" -serde = { version = "1.0.144", features = ["derive"] } +serde = { version = "1.0.147", features = ["derive"] } serde_json = "1.0.85" [build-dependencies] -- cgit 1.5.1 From 5f0648841810b19499a6e03e05fe1ca404aa608e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Oct 2022 10:20:13 +0100 Subject: Bump anyhow from 1.0.65 to 1.0.66 (#14278) --- Cargo.lock | 4 ++-- changelog.d/14278.misc | 1 + rust/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14278.misc diff --git a/Cargo.lock b/Cargo.lock index c1d29c3cac..221ca19b7d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.65" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602" +checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" [[package]] name = "arc-swap" diff --git a/changelog.d/14278.misc b/changelog.d/14278.misc new file mode 100644 index 0000000000..4b9dd89957 --- /dev/null +++ b/changelog.d/14278.misc @@ -0,0 +1 @@ +Bump anyhow from 1.0.65 to 1.0.66. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 2cac123102..3c1fb7a8e7 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -20,7 +20,7 @@ crate-type = ["lib", "cdylib"] name = "synapse.synapse_rust" [dependencies] -anyhow = "1.0.63" +anyhow = "1.0.66" lazy_static = "1.4.0" log = "0.4.17" pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] } -- cgit 1.5.1 From cd02bfc026193128f6dd6a3ce851445f0898dab2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Oct 2022 10:38:40 +0100 Subject: Bump prometheus-client from 0.14.0 to 0.15.0 (#14274) --- changelog.d/14274.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14274.misc diff --git a/changelog.d/14274.misc b/changelog.d/14274.misc new file mode 100644 index 0000000000..e2077dc0ad --- /dev/null +++ b/changelog.d/14274.misc @@ -0,0 +1 @@ +Bump prometheus-client from 0.14.0 to 0.15.0. diff --git a/poetry.lock b/poetry.lock index 2713cf2d69..4b76d170c8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -710,7 +710,7 @@ test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock [[package]] name = "prometheus-client" -version = "0.14.0" +version = "0.15.0" description = "Python client for the Prometheus monitoring system." category = "main" optional = false @@ -2301,8 +2301,8 @@ platformdirs = [ {file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"}, ] prometheus-client = [ - {file = "prometheus_client-0.14.0-py3-none-any.whl", hash = "sha256:f4aba3fdd1735852049f537c1f0ab177159b7ab76f271ecc4d2f45aa2a1d01f2"}, - {file = "prometheus_client-0.14.0.tar.gz", hash = "sha256:8f7a922dd5455ad524b6ba212ce8eb2b4b05e073f4ec7218287f88b1cac34750"}, + {file = "prometheus_client-0.15.0-py3-none-any.whl", hash = "sha256:db7c05cbd13a0f79975592d112320f2605a325969b270a94b71dcabc47b931d2"}, + {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"}, ] psycopg2 = [ {file = "psycopg2-2.9.4-cp310-cp310-win32.whl", hash = "sha256:8de6a9fc5f42fa52f559e65120dcd7502394692490c98fed1221acf0819d7797"}, -- cgit 1.5.1 From cb76892c7d25bc7ea85c298fb1884c7ff56a9998 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Oct 2022 10:39:00 +0100 Subject: Bump setuptools-rust from 1.5.1 to 1.5.2 (#14273) --- changelog.d/14273.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14273.misc diff --git a/changelog.d/14273.misc b/changelog.d/14273.misc new file mode 100644 index 0000000000..d29dc8510d --- /dev/null +++ b/changelog.d/14273.misc @@ -0,0 +1 @@ +Bump setuptools-rust from 1.5.1 to 1.5.2. diff --git a/poetry.lock b/poetry.lock index 4b76d170c8..1e7440ae0a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1120,7 +1120,7 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( [[package]] name = "setuptools-rust" -version = "1.5.1" +version = "1.5.2" description = "Setuptools Rust extension plugin" category = "main" optional = false @@ -2539,8 +2539,8 @@ setuptools = [ {file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"}, ] setuptools-rust = [ - {file = "setuptools-rust-1.5.1.tar.gz", hash = "sha256:0e05e456645d59429cb1021370aede73c0760e9360bbfdaaefb5bced530eb9d7"}, - {file = "setuptools_rust-1.5.1-py3-none-any.whl", hash = "sha256:306b236ff3aa5229180e58292610d0c2c51bb488191122d2fc559ae4caeb7d5e"}, + {file = "setuptools-rust-1.5.2.tar.gz", hash = "sha256:d8daccb14dc0eae1b6b6eb3ecef79675bd37b4065369f79c35393dd5c55652c7"}, + {file = "setuptools_rust-1.5.2-py3-none-any.whl", hash = "sha256:8eb45851e34288f2296cd5ab9e924535ac1757318b730a13fe6836867843f206"}, ] signedjson = [ {file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"}, -- cgit 1.5.1 From 673970bb5ae75898e2ab8480daa7d55d4577f757 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Oct 2022 10:39:16 +0100 Subject: Bump types-requests from 2.28.11 to 2.28.11.2 (#14272) --- changelog.d/14272.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14272.misc diff --git a/changelog.d/14272.misc b/changelog.d/14272.misc new file mode 100644 index 0000000000..2ae88cc50c --- /dev/null +++ b/changelog.d/14272.misc @@ -0,0 +1 @@ +Bump types-requests from 2.28.11 to 2.28.11.2. diff --git a/poetry.lock b/poetry.lock index 1e7440ae0a..8e698b2274 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1469,7 +1469,7 @@ python-versions = "*" [[package]] name = "types-requests" -version = "2.28.11" +version = "2.28.11.2" description = "Typing stubs for requests" category = "dev" optional = false @@ -2787,8 +2787,8 @@ types-PyYAML = [ {file = "types_PyYAML-6.0.12-py3-none-any.whl", hash = "sha256:29228db9f82df4f1b7febee06bbfb601677882e98a3da98132e31c6874163e15"}, ] types-requests = [ - {file = "types-requests-2.28.11.tar.gz", hash = "sha256:7ee827eb8ce611b02b5117cfec5da6455365b6a575f5e3ff19f655ba603e6b4e"}, - {file = "types_requests-2.28.11-py3-none-any.whl", hash = "sha256:af5f55e803cabcfb836dad752bd6d8a0fc8ef1cd84243061c0e27dee04ccf4fd"}, + {file = "types-requests-2.28.11.2.tar.gz", hash = "sha256:fdcd7bd148139fb8eef72cf4a41ac7273872cad9e6ada14b11ff5dfdeee60ed3"}, + {file = "types_requests-2.28.11.2-py3-none-any.whl", hash = "sha256:14941f8023a80b16441b3b46caffcbfce5265fd14555844d6029697824b5a2ef"}, ] types-setuptools = [ {file = "types-setuptools-65.5.0.1.tar.gz", hash = "sha256:5b297081c8f1fbd992cd8b305a97ed96ee6ffc765e9115124029597dd10b8a71"}, -- cgit 1.5.1 From 94f239d911990cb73e39d4607cb3aa06015a7bdf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Oct 2022 10:40:08 +0100 Subject: Bump jinja2 from 3.0.3 to 3.1.2 (#14271) --- changelog.d/14271.misc | 1 + poetry.lock | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14271.misc diff --git a/changelog.d/14271.misc b/changelog.d/14271.misc new file mode 100644 index 0000000000..09a011bc8c --- /dev/null +++ b/changelog.d/14271.misc @@ -0,0 +1 @@ +Bump jinja2 from 3.0.3 to 3.1.2. diff --git a/poetry.lock b/poetry.lock index 8e698b2274..e7f66a5b35 100644 --- a/poetry.lock +++ b/poetry.lock @@ -438,11 +438,11 @@ trio = ["async_generator", "trio"] [[package]] name = "jinja2" -version = "3.0.3" +version = "3.1.2" description = "A very fast and expressive template engine." category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] MarkupSafe = ">=2.0" @@ -1999,8 +1999,8 @@ jeepney = [ {file = "jeepney-0.7.1.tar.gz", hash = "sha256:fa9e232dfa0c498bd0b8a3a73b8d8a31978304dcef0515adc859d4e096f96f4f"}, ] jinja2 = [ - {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"}, - {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"}, + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, ] jsonschema = [ {file = "jsonschema-4.16.0-py3-none-any.whl", hash = "sha256:9e74b8f9738d6a946d70705dc692b74b5429cd0960d58e79ffecfc43b2221eb9"}, -- cgit 1.5.1 From 6c82b3759f24a14b7a321df27300a29f94317a5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Oct 2022 10:40:30 +0100 Subject: Bump pysaml2 from 7.1.2 to 7.2.1 (#14270) --- changelog.d/14270.misc | 1 + poetry.lock | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14270.misc diff --git a/changelog.d/14270.misc b/changelog.d/14270.misc new file mode 100644 index 0000000000..526c8ac85a --- /dev/null +++ b/changelog.d/14270.misc @@ -0,0 +1 @@ +Bump pysaml2 from 7.1.2 to 7.2.1. diff --git a/poetry.lock b/poetry.lock index e7f66a5b35..3270b90a71 100644 --- a/poetry.lock +++ b/poetry.lock @@ -918,14 +918,14 @@ python-versions = ">=3.7" [[package]] name = "pysaml2" -version = "7.1.2" +version = "7.2.1" description = "Python implementation of SAML Version 2 Standard" category = "main" optional = true python-versions = "<4,>=3.6" [package.dependencies] -cryptography = ">=1.4" +cryptography = ">=3.1" defusedxml = "*" importlib-resources = {version = "*", markers = "python_version < \"3.9\""} pyOpenSSL = "*" @@ -2445,8 +2445,8 @@ pyrsistent = [ {file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"}, ] pysaml2 = [ - {file = "pysaml2-7.1.2-py2.py3-none-any.whl", hash = "sha256:d915961aaa4d4d97d952b30fe5d18d64cf053465acf3e38d8090b36c5ff08325"}, - {file = "pysaml2-7.1.2.tar.gz", hash = "sha256:1ec94442306511b93fe7a5710f224e05e0aba948682d506614d1e04f3232f827"}, + {file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"}, + {file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"}, ] python-dateutil = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, -- cgit 1.5.1 From 1469fed0e39d31a063e8a54c2ea027774eec6acb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 24 Oct 2022 10:45:10 +0100 Subject: Add debugging to help diagnose lost device-list-update (#14268) --- changelog.d/14268.misc | 1 + synapse/storage/databases/main/devices.py | 54 +++++++++++++++++++++---------- 2 files changed, 38 insertions(+), 17 deletions(-) create mode 100644 changelog.d/14268.misc diff --git a/changelog.d/14268.misc b/changelog.d/14268.misc new file mode 100644 index 0000000000..894b1e1d4c --- /dev/null +++ b/changelog.d/14268.misc @@ -0,0 +1 @@ +Add debugging to help diagnose lost device-list-update. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 830b076a32..979dd4e17e 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -274,6 +274,13 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): destination, int(from_stream_id) ) if not has_changed: + # debugging for https://github.com/matrix-org/synapse/issues/14251 + issue_8631_logger.debug( + "%s: no change between %i and %i", + destination, + from_stream_id, + now_stream_id, + ) return now_stream_id, [] updates = await self.db_pool.runInteraction( @@ -1848,7 +1855,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): self, txn: LoggingTransaction, user_id: str, - device_ids: Iterable[str], + device_id: str, hosts: Collection[str], stream_ids: List[int], context: Optional[Dict[str, str]], @@ -1864,6 +1871,21 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): stream_id_iterator = iter(stream_ids) encoded_context = json_encoder.encode(context) + mark_sent = not self.hs.is_mine_id(user_id) + + values = [ + ( + destination, + next(stream_id_iterator), + user_id, + device_id, + mark_sent, + now, + encoded_context if whitelisted_homeserver(destination) else "{}", + ) + for destination in hosts + ] + self.db_pool.simple_insert_many_txn( txn, table="device_lists_outbound_pokes", @@ -1876,23 +1898,21 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): "ts", "opentracing_context", ), - values=[ - ( - destination, - next(stream_id_iterator), - user_id, - device_id, - not self.hs.is_mine_id( - user_id - ), # We only need to send out update for *our* users - now, - encoded_context if whitelisted_homeserver(destination) else "{}", - ) - for destination in hosts - for device_id in device_ids - ], + values=values, ) + # debugging for https://github.com/matrix-org/synapse/issues/14251 + if issue_8631_logger.isEnabledFor(logging.DEBUG): + issue_8631_logger.debug( + "Recorded outbound pokes for %s:%s with device stream ids %s", + user_id, + device_id, + { + stream_id: destination + for (destination, stream_id, _, _, _, _, _) in values + }, + ) + def _add_device_outbound_room_poke_txn( self, txn: LoggingTransaction, @@ -1997,7 +2017,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): self._add_device_outbound_poke_to_stream_txn( txn, user_id=user_id, - device_ids=[device_id], + device_id=device_id, hosts=hosts, stream_ids=stream_ids, context=context, -- cgit 1.5.1 From c6987f65fe14c05050b2d63659384c447f5d039a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Oct 2022 10:13:29 +0000 Subject: Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0 (#14275) --- .github/workflows/docs.yaml | 2 +- changelog.d/14275.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14275.misc diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 798e308f20..7db8c0c3ea 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@v3 - name: Setup mdbook - uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14 + uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0 with: mdbook-version: '0.4.17' diff --git a/changelog.d/14275.misc b/changelog.d/14275.misc new file mode 100644 index 0000000000..bcda89ff92 --- /dev/null +++ b/changelog.d/14275.misc @@ -0,0 +1 @@ +Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. -- cgit 1.5.1 From 386e72a22d687002f9a43b2275b1308b6a80d48c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Oct 2022 10:16:33 +0000 Subject: Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0 (#14276) --- .github/workflows/docs.yaml | 2 +- changelog.d/14276.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14276.misc diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 7db8c0c3ea..575412d965 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -58,7 +58,7 @@ jobs: # Deploy to the target directory. - name: Deploy to gh pages - uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0 + uses: peaceiris/actions-gh-pages@de7ea6f8efb354206b205ef54722213d99067935 # v3.9.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: ./book diff --git a/changelog.d/14276.misc b/changelog.d/14276.misc new file mode 100644 index 0000000000..689e9f3728 --- /dev/null +++ b/changelog.d/14276.misc @@ -0,0 +1 @@ +Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. -- cgit 1.5.1 From 09b588854e3a6abc4ea2eaa68bb0345f23be5ce8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 24 Oct 2022 13:05:14 +0100 Subject: Fix `TypeError: 'dict_keys' object is not reversible` (#14280) --- changelog.d/14280.bugfix | 1 + synapse/federation/sender/__init__.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14280.bugfix diff --git a/changelog.d/14280.bugfix b/changelog.d/14280.bugfix new file mode 100644 index 0000000000..c546d2be48 --- /dev/null +++ b/changelog.d/14280.bugfix @@ -0,0 +1 @@ +Fix broken outbound federation when using Python 3.7. Broke in v1.70.0rc1. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 774ecd81b6..3ad483efe0 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -536,8 +536,7 @@ class FederationSender(AbstractFederationSender): if event_entries: now = self.clock.time_msec() - last_id = next(reversed(event_ids)) - ts = event_to_received_ts[last_id] + ts = max(t for t in event_to_received_ts.values() if t) assert ts is not None synapse.metrics.event_processing_lag.labels( -- cgit 1.5.1 From 1e73effebf4f307f499c8cd4be58f805959beb38 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 24 Oct 2022 13:56:18 +0100 Subject: Fix typo in cibuildwheel conf introduced in #14253 --- .ci/scripts/auditwheel_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/scripts/auditwheel_wrapper.py b/.ci/scripts/auditwheel_wrapper.py index c744644c25..a33b39314f 100755 --- a/.ci/scripts/auditwheel_wrapper.py +++ b/.ci/scripts/auditwheel_wrapper.py @@ -94,7 +94,7 @@ def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None: # `delocate-listdeps`. subprocess.run(["delocate-listdeps", wheel_file], check=True) subprocess.run( - ["delocate-wheel" "--require-archs", archs, "-w", dest_dir, wheel_file], + ["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file], check=True, ) else: -- cgit 1.5.1 From 872ea2f4de15419210808fe2cade7421e4a4b55d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Oct 2022 14:08:22 +0100 Subject: Bump serde_json from 1.0.86 to 1.0.87 (#14279) --- Cargo.lock | 4 ++-- changelog.d/14279.misc | 1 + rust/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14279.misc diff --git a/Cargo.lock b/Cargo.lock index 221ca19b7d..1107578a20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -343,9 +343,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41feea4228a6f1cd09ec7a3593a682276702cd67b5273544757dae23c096f074" +checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" dependencies = [ "itoa", "ryu", diff --git a/changelog.d/14279.misc b/changelog.d/14279.misc new file mode 100644 index 0000000000..af7e3265c0 --- /dev/null +++ b/changelog.d/14279.misc @@ -0,0 +1 @@ +Bump serde_json from 1.0.86 to 1.0.87. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 3c1fb7a8e7..e26201884c 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -28,7 +28,7 @@ pyo3-log = "0.7.0" pythonize = "0.17.0" regex = "1.6.0" serde = { version = "1.0.147", features = ["derive"] } -serde_json = "1.0.85" +serde_json = "1.0.87" [build-dependencies] blake2 = "0.10.4" -- cgit 1.5.1 From 19c0e55ef7742d67cff1cb6fb7c3e862b86ea788 Mon Sep 17 00:00:00 2001 From: Ryan Miguel <1818590+renegaderyu@users.noreply.github.com> Date: Mon, 24 Oct 2022 08:55:06 -0700 Subject: Return NOT_JSON if decode fails and defer set_timeline_upper_limit ca… (#14262) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Return NOT_JSON if decode fails and defer set_timeline_upper_limit call until after check_valid_filter. Fixes #13661. Signed-off-by: Ryan Miguel . * Reword changelog --- changelog.d/14262.misc | 1 + synapse/rest/client/sync.py | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14262.misc diff --git a/changelog.d/14262.misc b/changelog.d/14262.misc new file mode 100644 index 0000000000..c1d23bc67d --- /dev/null +++ b/changelog.d/14262.misc @@ -0,0 +1 @@ +Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 8a16459105..f2013faeb2 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -146,12 +146,12 @@ class SyncRestServlet(RestServlet): elif filter_id.startswith("{"): try: filter_object = json_decoder.decode(filter_id) - set_timeline_upper_limit( - filter_object, self.hs.config.server.filter_timeline_limit - ) except Exception: - raise SynapseError(400, "Invalid filter JSON") + raise SynapseError(400, "Invalid filter JSON", errcode=Codes.NOT_JSON) self.filtering.check_valid_filter(filter_object) + set_timeline_upper_limit( + filter_object, self.hs.config.server.filter_timeline_limit + ) filter_collection = FilterCollection(self.hs, filter_object) else: try: -- cgit 1.5.1 From 581b37b5d6c1c9430108930a4fe409cf3f86332f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 24 Oct 2022 12:07:16 -0400 Subject: Revert behavior change for bundling edits of non-message events (#14283) --- changelog.d/14283.bugfix | 1 + synapse/storage/databases/main/relations.py | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14283.bugfix diff --git a/changelog.d/14283.bugfix b/changelog.d/14283.bugfix new file mode 100644 index 0000000000..a80a8c0361 --- /dev/null +++ b/changelog.d/14283.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 1de62ee9df..c022510e76 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -484,11 +484,12 @@ class RelationsWorkerStore(SQLBaseStore): the event will map to None. """ - # We only allow edits for events that have the same sender and event type. - # We can't assert these things during regular event auth so we have to do - # the checks post hoc. + # We only allow edits for `m.room.message` events that have the same sender + # and event type. We can't assert these things during regular event auth so + # we have to do the checks post hoc. - # Fetches latest edit that has the same type and sender as the original. + # Fetches latest edit that has the same type and sender as the + # original, and is an `m.room.message`. if isinstance(self.database_engine, PostgresEngine): # The `DISTINCT ON` clause will pick the *first* row it encounters, # so ordering by origin server ts + event ID desc will ensure we get @@ -504,6 +505,7 @@ class RelationsWorkerStore(SQLBaseStore): WHERE %s AND relation_type = ? + AND edit.type = 'm.room.message' ORDER by original.event_id DESC, edit.origin_server_ts DESC, edit.event_id DESC """ else: @@ -522,6 +524,7 @@ class RelationsWorkerStore(SQLBaseStore): WHERE %s AND relation_type = ? + AND edit.type = 'm.room.message' ORDER by edit.origin_server_ts, edit.event_id """ -- cgit 1.5.1 From 8c94dd3a277d4e11192f98a9ca32cb6638606b66 Mon Sep 17 00:00:00 2001 From: asymmetric Date: Tue, 25 Oct 2022 11:22:55 +0200 Subject: Enable WAL for SQLite (#13897) Signed-off-by: Lorenzo Manacorda --- changelog.d/13897.feature | 1 + synapse/storage/engines/sqlite.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/13897.feature diff --git a/changelog.d/13897.feature b/changelog.d/13897.feature new file mode 100644 index 0000000000..d46fdf9fa5 --- /dev/null +++ b/changelog.d/13897.feature @@ -0,0 +1 @@ +Enable Write-Ahead Logging for SQLite installs. Contributed by [asymmetric](https://github.com/asymmetric). diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index faa574dbfd..14260442b6 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -88,6 +88,10 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]): db_conn.create_function("rank", 1, _rank) db_conn.execute("PRAGMA foreign_keys = ON;") + + # Enable WAL. + # see https://www.sqlite.org/wal.html + db_conn.execute("PRAGMA journal_mode = WAL;") db_conn.commit() def is_deadlock(self, error: Exception) -> bool: -- cgit 1.5.1 From 8c8fcdb87dd8568bbcbc43417fd19609df175957 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 25 Oct 2022 11:02:55 +0100 Subject: 1.70.0rc2 --- CHANGES.md | 18 ++++++++++++++++++ changelog.d/14248.bugfix | 1 - changelog.d/14253.misc | 1 - changelog.d/14259.misc | 1 - changelog.d/14280.bugfix | 1 - changelog.d/14283.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 8 files changed, 25 insertions(+), 6 deletions(-) delete mode 100644 changelog.d/14248.bugfix delete mode 100644 changelog.d/14253.misc delete mode 100644 changelog.d/14259.misc delete mode 100644 changelog.d/14280.bugfix delete mode 100644 changelog.d/14283.bugfix diff --git a/CHANGES.md b/CHANGES.md index 17e666709b..8052dbe466 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,21 @@ +Synapse 1.70.0rc2 (2022-10-25) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248)) +- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280)) +- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283)) + + +Internal Changes +---------------- + +- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253)) +- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259)) + + Synapse 1.70.0rc1 (2022-10-19) ============================== diff --git a/changelog.d/14248.bugfix b/changelog.d/14248.bugfix deleted file mode 100644 index 203c52c16b..0000000000 --- a/changelog.d/14248.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. diff --git a/changelog.d/14253.misc b/changelog.d/14253.misc deleted file mode 100644 index c1382ddafa..0000000000 --- a/changelog.d/14253.misc +++ /dev/null @@ -1 +0,0 @@ -Build ABI3 wheels for cpython. diff --git a/changelog.d/14259.misc b/changelog.d/14259.misc deleted file mode 100644 index 7425498964..0000000000 --- a/changelog.d/14259.misc +++ /dev/null @@ -1 +0,0 @@ -Only build aarch64 wheels for cpython manylinux. diff --git a/changelog.d/14280.bugfix b/changelog.d/14280.bugfix deleted file mode 100644 index c546d2be48..0000000000 --- a/changelog.d/14280.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix broken outbound federation when using Python 3.7. Broke in v1.70.0rc1. diff --git a/changelog.d/14283.bugfix b/changelog.d/14283.bugfix deleted file mode 100644 index a80a8c0361..0000000000 --- a/changelog.d/14283.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. diff --git a/debian/changelog b/debian/changelog index 60ec92b47d..c3139b7271 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium + + * New Synapse release 1.70.0rc2. + + -- Synapse Packaging team Tue, 25 Oct 2022 10:59:47 +0100 + matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium * New Synapse release 1.70.0rc1. diff --git a/pyproject.toml b/pyproject.toml index ec0fbd1eb9..7cd4a15435 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.70.0rc1" +version = "1.70.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From d12591996308cda35cb28cec839d5aacce32f52c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 25 Oct 2022 11:27:56 +0100 Subject: Cache rust build deps in trial CI (#14287) --- .github/workflows/tests.yml | 8 ++++++++ changelog.d/14287.misc | 1 + 2 files changed, 9 insertions(+) create mode 100644 changelog.d/14287.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1115dcc5f3..ff5cf0c534 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -167,6 +167,14 @@ jobs: -e POSTGRES_PASSWORD=postgres \ -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \ postgres:${{ matrix.job.postgres-version }} + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.58.1 + override: true + - uses: Swatinem/rust-cache@v2 + - uses: matrix-org/setup-python-poetry@v1 with: python-version: ${{ matrix.job.python-version }} diff --git a/changelog.d/14287.misc b/changelog.d/14287.misc new file mode 100644 index 0000000000..85c6dc2765 --- /dev/null +++ b/changelog.d/14287.misc @@ -0,0 +1 @@ +Add Rust cache to CI for `trial` runs. -- cgit 1.5.1 From c9dffd5b330553c5803784be5bc0e2479fab79b0 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Tue, 25 Oct 2022 11:39:25 +0100 Subject: Remove unused `@lru_cache` decorator (#13595) * Remove unused `@lru_cache` decorator Spotted this working on something else. Co-authored-by: David Robertson --- changelog.d/13595.misc | 1 + synapse/util/caches/descriptors.py | 104 ---------------------------------- tests/util/caches/test_descriptors.py | 40 ++----------- 3 files changed, 5 insertions(+), 140 deletions(-) create mode 100644 changelog.d/13595.misc diff --git a/changelog.d/13595.misc b/changelog.d/13595.misc new file mode 100644 index 0000000000..71959a6ee7 --- /dev/null +++ b/changelog.d/13595.misc @@ -0,0 +1 @@ +Remove unused `@lru_cache` decorator. diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index b3c748ef44..75428d19ba 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import enum import functools import inspect import logging @@ -146,109 +145,6 @@ class _CacheDescriptorBase: ) -class _LruCachedFunction(Generic[F]): - cache: LruCache[CacheKey, Any] - __call__: F - - -def lru_cache( - *, max_entries: int = 1000, cache_context: bool = False -) -> Callable[[F], _LruCachedFunction[F]]: - """A method decorator that applies a memoizing cache around the function. - - This is more-or-less a drop-in equivalent to functools.lru_cache, although note - that the signature is slightly different. - - The main differences with functools.lru_cache are: - (a) the size of the cache can be controlled via the cache_factor mechanism - (b) the wrapped function can request a "cache_context" which provides a - callback mechanism to indicate that the result is no longer valid - (c) prometheus metrics are exposed automatically. - - The function should take zero or more arguments, which are used as the key for the - cache. Single-argument functions use that argument as the cache key; otherwise the - arguments are built into a tuple. - - Cached functions can be "chained" (i.e. a cached function can call other cached - functions and get appropriately invalidated when they called caches are - invalidated) by adding a special "cache_context" argument to the function - and passing that as a kwarg to all caches called. For example: - - @lru_cache(cache_context=True) - def foo(self, key, cache_context): - r1 = self.bar1(key, on_invalidate=cache_context.invalidate) - r2 = self.bar2(key, on_invalidate=cache_context.invalidate) - return r1 + r2 - - The wrapped function also has a 'cache' property which offers direct access to the - underlying LruCache. - """ - - def func(orig: F) -> _LruCachedFunction[F]: - desc = LruCacheDescriptor( - orig, - max_entries=max_entries, - cache_context=cache_context, - ) - return cast(_LruCachedFunction[F], desc) - - return func - - -class LruCacheDescriptor(_CacheDescriptorBase): - """Helper for @lru_cache""" - - class _Sentinel(enum.Enum): - sentinel = object() - - def __init__( - self, - orig: Callable[..., Any], - max_entries: int = 1000, - cache_context: bool = False, - ): - super().__init__( - orig, num_args=None, uncached_args=None, cache_context=cache_context - ) - self.max_entries = max_entries - - def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]: - cache: LruCache[CacheKey, Any] = LruCache( - cache_name=self.name, - max_size=self.max_entries, - ) - - get_cache_key = self.cache_key_builder - sentinel = LruCacheDescriptor._Sentinel.sentinel - - @functools.wraps(self.orig) - def _wrapped(*args: Any, **kwargs: Any) -> Any: - invalidate_callback = kwargs.pop("on_invalidate", None) - callbacks = (invalidate_callback,) if invalidate_callback else () - - cache_key = get_cache_key(args, kwargs) - - ret = cache.get(cache_key, default=sentinel, callbacks=callbacks) - if ret != sentinel: - return ret - - # Add our own `cache_context` to argument list if the wrapped function - # has asked for one - if self.add_cache_context: - kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key) - - ret2 = self.orig(obj, *args, **kwargs) - cache.set(cache_key, ret2, callbacks=callbacks) - - return ret2 - - wrapped = cast(CachedFunction, _wrapped) - wrapped.cache = cache - obj.__dict__[self.name] = wrapped - - return wrapped - - class DeferredCacheDescriptor(_CacheDescriptorBase): """A method decorator that applies a memoizing cache around the function. diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 78fd7b6961..43475a307f 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -28,7 +28,7 @@ from synapse.logging.context import ( make_deferred_yieldable, ) from synapse.util.caches import descriptors -from synapse.util.caches.descriptors import cached, cachedList, lru_cache +from synapse.util.caches.descriptors import cached, cachedList from tests import unittest from tests.test_utils import get_awaitable_result @@ -36,38 +36,6 @@ from tests.test_utils import get_awaitable_result logger = logging.getLogger(__name__) -class LruCacheDecoratorTestCase(unittest.TestCase): - def test_base(self): - class Cls: - def __init__(self): - self.mock = mock.Mock() - - @lru_cache() - def fn(self, arg1, arg2): - return self.mock(arg1, arg2) - - obj = Cls() - obj.mock.return_value = "fish" - r = obj.fn(1, 2) - self.assertEqual(r, "fish") - obj.mock.assert_called_once_with(1, 2) - obj.mock.reset_mock() - - # a call with different params should call the mock again - obj.mock.return_value = "chips" - r = obj.fn(1, 3) - self.assertEqual(r, "chips") - obj.mock.assert_called_once_with(1, 3) - obj.mock.reset_mock() - - # the two values should now be cached - r = obj.fn(1, 2) - self.assertEqual(r, "fish") - r = obj.fn(1, 3) - self.assertEqual(r, "chips") - obj.mock.assert_not_called() - - def run_on_reactor(): d = defer.Deferred() reactor.callLater(0, d.callback, 0) @@ -478,10 +446,10 @@ class DescriptorTestCase(unittest.TestCase): @cached(cache_context=True) async def func2(self, key, cache_context): - return self.func3(key, on_invalidate=cache_context.invalidate) + return await self.func3(key, on_invalidate=cache_context.invalidate) - @lru_cache(cache_context=True) - def func3(self, key, cache_context): + @cached(cache_context=True) + async def func3(self, key, cache_context): self.invalidate = cache_context.invalidate return 42 -- cgit 1.5.1 From 0f1befd0b1e85da18109ec0870c04156e4da038c Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 25 Oct 2022 14:16:05 +0100 Subject: Tweak changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 8052dbe466..c2a0a2102f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -31,7 +31,7 @@ Features - The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028)) - Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184)) - Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054)) -- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874). ([\#14148](https://github.com/matrix-org/synapse/issues/14148)) +- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148)) - Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179)) - Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212)) -- cgit 1.5.1 From 2d0ba3f89aaf9545d81c4027500e543ec70b68a6 Mon Sep 17 00:00:00 2001 From: "DeepBlueV7.X" Date: Tue, 25 Oct 2022 13:38:01 +0000 Subject: Implementation for MSC3664: Pushrules for relations (#11804) --- changelog.d/11804.feature | 1 + rust/src/push/base_rules.rs | 17 +++ rust/src/push/evaluator.rs | 99 ++++++++++++- rust/src/push/mod.rs | 61 ++++++-- stubs/synapse/synapse_rust/push.pyi | 6 +- synapse/config/experimental.py | 3 + synapse/push/bulk_push_rule_evaluator.py | 49 ++++++- synapse/rest/client/capabilities.py | 5 + synapse/storage/databases/main/push_rule.py | 15 +- tests/push/test_push_rule_evaluator.py | 215 +++++++++++++++++++++++++++- 10 files changed, 454 insertions(+), 17 deletions(-) create mode 100644 changelog.d/11804.feature diff --git a/changelog.d/11804.feature b/changelog.d/11804.feature new file mode 100644 index 0000000000..6420393541 --- /dev/null +++ b/changelog.d/11804.feature @@ -0,0 +1 @@ +Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664). Contributed by Nico. diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 63240cacfc..49802fa4eb 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -25,6 +25,7 @@ use crate::push::Action; use crate::push::Condition; use crate::push::EventMatchCondition; use crate::push::PushRule; +use crate::push::RelatedEventMatchCondition; use crate::push::SetTweak; use crate::push::TweakValue; @@ -114,6 +115,22 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default: true, default_enabled: true, }, + PushRule { + rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"), + priority_class: 5, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch( + RelatedEventMatchCondition { + key: Some(Cow::Borrowed("sender")), + pattern: None, + pattern_type: Some(Cow::Borrowed("user_id")), + rel_type: Cow::Borrowed("m.in_reply_to"), + include_fallbacks: None, + }, + ))]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]), + default: true, + default_enabled: true, + }, PushRule { rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"), priority_class: 5, diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 0365dd01dc..cedd42c54d 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -23,6 +23,7 @@ use regex::Regex; use super::{ utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType}, Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition, + RelatedEventMatchCondition, }; lazy_static! { @@ -49,6 +50,13 @@ pub struct PushRuleEvaluator { /// The power level of the sender of the event, or None if event is an /// outlier. sender_power_level: Option, + + /// The related events, indexed by relation type. Flattened in the same manner as + /// `flattened_keys`. + related_events_flattened: BTreeMap>, + + /// If msc3664, push rules for related events, is enabled. + related_event_match_enabled: bool, } #[pymethods] @@ -60,6 +68,8 @@ impl PushRuleEvaluator { room_member_count: u64, sender_power_level: Option, notification_power_levels: BTreeMap, + related_events_flattened: BTreeMap>, + related_event_match_enabled: bool, ) -> Result { let body = flattened_keys .get("content.body") @@ -72,6 +82,8 @@ impl PushRuleEvaluator { room_member_count, notification_power_levels, sender_power_level, + related_events_flattened, + related_event_match_enabled, }) } @@ -156,6 +168,9 @@ impl PushRuleEvaluator { KnownCondition::EventMatch(event_match) => { self.match_event_match(event_match, user_id)? } + KnownCondition::RelatedEventMatch(event_match) => { + self.match_related_event_match(event_match, user_id)? + } KnownCondition::ContainsDisplayName => { if let Some(dn) = display_name { if !dn.is_empty() { @@ -239,6 +254,79 @@ impl PushRuleEvaluator { compiled_pattern.is_match(haystack) } + /// Evaluates a `related_event_match` condition. (MSC3664) + fn match_related_event_match( + &self, + event_match: &RelatedEventMatchCondition, + user_id: Option<&str>, + ) -> Result { + // First check if related event matching is enabled... + if !self.related_event_match_enabled { + return Ok(false); + } + + // get the related event, fail if there is none. + let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) { + event + } else { + return Ok(false); + }; + + // If we are not matching fallbacks, don't match if our special key indicating this is a + // fallback relation is not present. + if !event_match.include_fallbacks.unwrap_or(false) + && event.contains_key("im.vector.is_falling_back") + { + return Ok(false); + } + + // if we have no key, accept the event as matching, if it existed without matching any + // fields. + let key = if let Some(key) = &event_match.key { + key + } else { + return Ok(true); + }; + + let pattern = if let Some(pattern) = &event_match.pattern { + pattern + } else if let Some(pattern_type) = &event_match.pattern_type { + // The `pattern_type` can either be "user_id" or "user_localpart", + // either way if we don't have a `user_id` then the condition can't + // match. + let user_id = if let Some(user_id) = user_id { + user_id + } else { + return Ok(false); + }; + + match &**pattern_type { + "user_id" => user_id, + "user_localpart" => get_localpart_from_id(user_id)?, + _ => return Ok(false), + } + } else { + return Ok(false); + }; + + let haystack = if let Some(haystack) = event.get(&**key) { + haystack + } else { + return Ok(false); + }; + + // For the content.body we match against "words", but for everything + // else we match against the entire value. + let match_type = if key == "content.body" { + GlobMatchType::Word + } else { + GlobMatchType::Whole + }; + + let mut compiled_pattern = get_glob_matcher(pattern, match_type)?; + compiled_pattern.is_match(haystack) + } + /// Match the member count against an 'is' condition /// The `is` condition can be things like '>2', '==3' or even just '4'. fn match_member_count(&self, is: &str) -> Result { @@ -267,8 +355,15 @@ impl PushRuleEvaluator { fn push_rule_evaluator() { let mut flattened_keys = BTreeMap::new(); flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string()); - let evaluator = - PushRuleEvaluator::py_new(flattened_keys, 10, Some(0), BTreeMap::new()).unwrap(); + let evaluator = PushRuleEvaluator::py_new( + flattened_keys, + 10, + Some(0), + BTreeMap::new(), + BTreeMap::new(), + true, + ) + .unwrap(); let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob")); assert_eq!(result.len(), 3); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 0dabfab8b8..d57800aa4a 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -267,6 +267,8 @@ pub enum Condition { #[serde(tag = "kind")] pub enum KnownCondition { EventMatch(EventMatchCondition), + #[serde(rename = "im.nheko.msc3664.related_event_match")] + RelatedEventMatch(RelatedEventMatchCondition), ContainsDisplayName, RoomMemberCount { #[serde(skip_serializing_if = "Option::is_none")] @@ -299,6 +301,20 @@ pub struct EventMatchCondition { pub pattern_type: Option>, } +/// The body of a [`Condition::RelatedEventMatch`] +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct RelatedEventMatchCondition { + #[serde(skip_serializing_if = "Option::is_none")] + pub key: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub pattern: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub pattern_type: Option>, + pub rel_type: Cow<'static, str>, + #[serde(skip_serializing_if = "Option::is_none")] + pub include_fallbacks: Option, +} + /// The collection of push rules for a user. #[derive(Debug, Clone, Default)] #[pyclass(frozen)] @@ -391,15 +407,21 @@ impl PushRules { pub struct FilteredPushRules { push_rules: PushRules, enabled_map: BTreeMap, + msc3664_enabled: bool, } #[pymethods] impl FilteredPushRules { #[new] - pub fn py_new(push_rules: PushRules, enabled_map: BTreeMap) -> Self { + pub fn py_new( + push_rules: PushRules, + enabled_map: BTreeMap, + msc3664_enabled: bool, + ) -> Self { Self { push_rules, enabled_map, + msc3664_enabled, } } @@ -414,13 +436,25 @@ impl FilteredPushRules { /// Iterates over all the rules and their enabled state, including base /// rules, in the order they should be executed in. fn iter(&self) -> impl Iterator { - self.push_rules.iter().map(|r| { - let enabled = *self - .enabled_map - .get(&*r.rule_id) - .unwrap_or(&r.default_enabled); - (r, enabled) - }) + self.push_rules + .iter() + .filter(|rule| { + // Ignore disabled experimental push rules + if !self.msc3664_enabled + && rule.rule_id == "global/override/.im.nheko.msc3664.reply" + { + return false; + } + + true + }) + .map(|r| { + let enabled = *self + .enabled_map + .get(&*r.rule_id) + .unwrap_or(&r.default_enabled); + (r, enabled) + }) } } @@ -446,6 +480,17 @@ fn test_deserialize_condition() { let _: Condition = serde_json::from_str(json).unwrap(); } +#[test] +fn test_deserialize_unstable_msc3664_condition() { + let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern":"coffee","rel_type":"m.in_reply_to"}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!( + condition, + Condition::Known(KnownCondition::RelatedEventMatch(_)) + )); +} + #[test] fn test_deserialize_custom_condition() { let json = r#"{"kind":"custom_tag"}"#; diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index f2a61df660..f3b6d6c933 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -25,7 +25,9 @@ class PushRules: def rules(self) -> Collection[PushRule]: ... class FilteredPushRules: - def __init__(self, push_rules: PushRules, enabled_map: Dict[str, bool]): ... + def __init__( + self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool + ): ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ... def get_base_rule_ids() -> Collection[str]: ... @@ -37,6 +39,8 @@ class PushRuleEvaluator: room_member_count: int, sender_power_level: Optional[int], notification_power_levels: Mapping[str, int], + related_events_flattened: Mapping[str, Mapping[str, str]], + related_event_match_enabled: bool, ): ... def run( self, diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 4009add01d..d9bdd66d55 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -98,6 +98,9 @@ class ExperimentalConfig(Config): # MSC3773: Thread notifications self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False) + # MSC3664: Pushrules to match on related events + self.msc3664_enabled: bool = experimental.get("msc3664_enabled", False) + # MSC3848: Introduce errcodes for specific event sending failures self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index d7795a9080..75b7e126ca 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -45,7 +45,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) - push_rules_invalidation_counter = Counter( "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", "" ) @@ -107,6 +106,8 @@ class BulkPushRuleEvaluator: self.clock = hs.get_clock() self._event_auth_handler = hs.get_event_auth_handler() + self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled + self.room_push_rule_cache_metrics = register_cache( "cache", "room_push_rule_cache", @@ -218,6 +219,48 @@ class BulkPushRuleEvaluator: return pl_event.content if pl_event else {}, sender_level + async def _related_events(self, event: EventBase) -> Dict[str, Dict[str, str]]: + """Fetches the related events for 'event'. Sets the im.vector.is_falling_back key if the event is from a fallback relation + + Returns: + Mapping of relation type to flattened events. + """ + related_events: Dict[str, Dict[str, str]] = {} + if self._related_event_match_enabled: + related_event_id = event.content.get("m.relates_to", {}).get("event_id") + relation_type = event.content.get("m.relates_to", {}).get("rel_type") + if related_event_id is not None and relation_type is not None: + related_event = await self.store.get_event( + related_event_id, allow_none=True + ) + if related_event is not None: + related_events[relation_type] = _flatten_dict(related_event) + + reply_event_id = ( + event.content.get("m.relates_to", {}) + .get("m.in_reply_to", {}) + .get("event_id") + ) + + # convert replies to pseudo relations + if reply_event_id is not None: + related_event = await self.store.get_event( + reply_event_id, allow_none=True + ) + + if related_event is not None: + related_events["m.in_reply_to"] = _flatten_dict(related_event) + + # indicate that this is from a fallback relation. + if relation_type == "m.thread" and event.content.get( + "m.relates_to", {} + ).get("is_falling_back", False): + related_events["m.in_reply_to"][ + "im.vector.is_falling_back" + ] = "" + + return related_events + async def action_for_events_by_user( self, events_and_context: List[Tuple[EventBase, EventContext]] ) -> None: @@ -286,6 +329,8 @@ class BulkPushRuleEvaluator: # the parent is part of a thread. thread_id = await self.store.get_thread_id(relation.parent_id) + related_events = await self._related_events(event) + # It's possible that old room versions have non-integer power levels (floats or # strings). Workaround this by explicitly converting to int. notification_levels = power_levels.get("notifications", {}) @@ -298,6 +343,8 @@ class BulkPushRuleEvaluator: room_member_count, sender_power_level, notification_levels, + related_events, + self._related_event_match_enabled, ) users = rules_by_user.keys() diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index 4237071c61..e84dde31b1 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -77,6 +77,11 @@ class CapabilitiesRestServlet(RestServlet): "enabled": True, } + if self.config.experimental.msc3664_enabled: + response["capabilities"]["im.nheko.msc3664.related_event_match"] = { + "enabled": self.config.experimental.msc3664_enabled, + } + return HTTPStatus.OK, response diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 51416b2236..b6c15f29f8 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -29,6 +29,7 @@ from typing import ( ) from synapse.api.errors import StoreError +from synapse.config.homeserver import ExperimentalConfig from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -62,7 +63,9 @@ logger = logging.getLogger(__name__) def _load_rules( - rawrules: List[JsonDict], enabled_map: Dict[str, bool] + rawrules: List[JsonDict], + enabled_map: Dict[str, bool], + experimental_config: ExperimentalConfig, ) -> FilteredPushRules: """Take the DB rows returned from the DB and convert them into a full `FilteredPushRules` object. @@ -80,7 +83,9 @@ def _load_rules( push_rules = PushRules(ruleslist) - filtered_rules = FilteredPushRules(push_rules, enabled_map) + filtered_rules = FilteredPushRules( + push_rules, enabled_map, msc3664_enabled=experimental_config.msc3664_enabled + ) return filtered_rules @@ -160,7 +165,7 @@ class PushRulesWorkerStore( enabled_map = await self.get_push_rules_enabled_for_user(user_id) - return _load_rules(rows, enabled_map) + return _load_rules(rows, enabled_map, self.hs.config.experimental) async def get_push_rules_enabled_for_user(self, user_id: str) -> Dict[str, bool]: results = await self.db_pool.simple_select_list( @@ -219,7 +224,9 @@ class PushRulesWorkerStore( results: Dict[str, FilteredPushRules] = {} for user_id, rules in raw_rules.items(): - results[user_id] = _load_rules(rules, enabled_map_by_user.get(user_id, {})) + results[user_id] = _load_rules( + rules, enabled_map_by_user.get(user_id, {}), self.hs.config.experimental + ) return results diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index decf619466..fe7c145840 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -38,7 +38,9 @@ from tests.test_utils.event_injection import create_event, inject_member_event class PushRuleEvaluatorTestCase(unittest.TestCase): - def _get_evaluator(self, content: JsonDict) -> PushRuleEvaluator: + def _get_evaluator( + self, content: JsonDict, related_events=None + ) -> PushRuleEvaluator: event = FrozenEvent( { "event_id": "$event_id", @@ -58,6 +60,8 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): room_member_count, sender_power_level, power_levels.get("notifications", {}), + {} if related_events is None else related_events, + True, ) def test_display_name(self) -> None: @@ -292,6 +296,215 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): {"sound": "default", "highlight": True}, ) + def test_related_event_match(self): + evaluator = self._get_evaluator( + { + "m.relates_to": { + "event_id": "$parent_event_id", + "key": "😀", + "rel_type": "m.annotation", + "m.in_reply_to": { + "event_id": "$parent_event_id", + }, + } + }, + { + "m.in_reply_to": { + "event_id": "$parent_event_id", + "type": "m.room.message", + "sender": "@other_user:test", + "room_id": "!room:test", + "content.msgtype": "m.text", + "content.body": "Original message", + }, + "m.annotation": { + "event_id": "$parent_event_id", + "type": "m.room.message", + "sender": "@other_user:test", + "room_id": "!room:test", + "content.msgtype": "m.text", + "content.body": "Original message", + }, + }, + ) + self.assertTrue( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "key": "sender", + "rel_type": "m.in_reply_to", + "pattern": "@other_user:test", + }, + "@user:test", + "display_name", + ) + ) + self.assertFalse( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "key": "sender", + "rel_type": "m.in_reply_to", + "pattern": "@user:test", + }, + "@other_user:test", + "display_name", + ) + ) + self.assertTrue( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "key": "sender", + "rel_type": "m.annotation", + "pattern": "@other_user:test", + }, + "@other_user:test", + "display_name", + ) + ) + self.assertFalse( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "key": "sender", + "rel_type": "m.in_reply_to", + }, + "@user:test", + "display_name", + ) + ) + self.assertTrue( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "rel_type": "m.in_reply_to", + }, + "@user:test", + "display_name", + ) + ) + self.assertFalse( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "rel_type": "m.replace", + }, + "@other_user:test", + "display_name", + ) + ) + + def test_related_event_match_with_fallback(self): + evaluator = self._get_evaluator( + { + "m.relates_to": { + "event_id": "$parent_event_id", + "key": "😀", + "rel_type": "m.thread", + "is_falling_back": True, + "m.in_reply_to": { + "event_id": "$parent_event_id", + }, + } + }, + { + "m.in_reply_to": { + "event_id": "$parent_event_id", + "type": "m.room.message", + "sender": "@other_user:test", + "room_id": "!room:test", + "content.msgtype": "m.text", + "content.body": "Original message", + "im.vector.is_falling_back": "", + }, + "m.thread": { + "event_id": "$parent_event_id", + "type": "m.room.message", + "sender": "@other_user:test", + "room_id": "!room:test", + "content.msgtype": "m.text", + "content.body": "Original message", + }, + }, + ) + self.assertTrue( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "key": "sender", + "rel_type": "m.in_reply_to", + "pattern": "@other_user:test", + "include_fallbacks": True, + }, + "@user:test", + "display_name", + ) + ) + self.assertFalse( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "key": "sender", + "rel_type": "m.in_reply_to", + "pattern": "@other_user:test", + "include_fallbacks": False, + }, + "@user:test", + "display_name", + ) + ) + self.assertFalse( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "key": "sender", + "rel_type": "m.in_reply_to", + "pattern": "@other_user:test", + }, + "@user:test", + "display_name", + ) + ) + + def test_related_event_match_no_related_event(self): + evaluator = self._get_evaluator( + {"msgtype": "m.text", "body": "Message without related event"} + ) + self.assertFalse( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "key": "sender", + "rel_type": "m.in_reply_to", + "pattern": "@other_user:test", + }, + "@user:test", + "display_name", + ) + ) + self.assertFalse( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "key": "sender", + "rel_type": "m.in_reply_to", + }, + "@user:test", + "display_name", + ) + ) + self.assertFalse( + evaluator.matches( + { + "kind": "im.nheko.msc3664.related_event_match", + "rel_type": "m.in_reply_to", + }, + "@user:test", + "display_name", + ) + ) + class TestBulkPushRuleEvaluator(unittest.HomeserverTestCase): """Tests for the bulk push rule evaluator""" -- cgit 1.5.1 From 9192d74b0bf2f87b00d3e106a18baa9ce27acda1 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 25 Oct 2022 16:25:02 +0200 Subject: Refactor OIDC tests to better mimic an actual OIDC provider. (#13910) This implements a fake OIDC server, which intercepts calls to the HTTP client. Improves accuracy of tests by covering more internal methods. One particular example was the ID token validation, which previously mocked. This uncovered an incorrect dependency: Synapse actually requires at least authlib 0.15.1, not 0.14.0. --- changelog.d/13910.misc | 1 + pyproject.toml | 2 +- synapse/handlers/oidc.py | 15 +- tests/federation/test_federation_client.py | 36 +- tests/handlers/test_oidc.py | 580 +++++++++++++---------------- tests/rest/client/test_auth.py | 32 +- tests/rest/client/test_login.py | 40 +- tests/rest/client/utils.py | 136 +++---- tests/test_utils/__init__.py | 40 +- tests/test_utils/oidc.py | 325 ++++++++++++++++ 10 files changed, 747 insertions(+), 460 deletions(-) create mode 100644 changelog.d/13910.misc create mode 100644 tests/test_utils/oidc.py diff --git a/changelog.d/13910.misc b/changelog.d/13910.misc new file mode 100644 index 0000000000..e906952aab --- /dev/null +++ b/changelog.d/13910.misc @@ -0,0 +1 @@ +Refactor OIDC tests to better mimic an actual OIDC provider. diff --git a/pyproject.toml b/pyproject.toml index 6ebac41ed1..7e0feb75aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -192,7 +192,7 @@ psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'Py psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true } psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true } pysaml2 = { version = ">=4.5.0", optional = true } -authlib = { version = ">=0.14.0", optional = true } +authlib = { version = ">=0.15.1", optional = true } # systemd-python is necessary for logging to the systemd journal via # `systemd.journal.JournalHandler`, as is documented in # `contrib/systemd/log_config.yaml`. diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index d7a8226900..9759daf043 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -275,6 +275,7 @@ class OidcProvider: provider: OidcProviderConfig, ): self._store = hs.get_datastores().main + self._clock = hs.get_clock() self._macaroon_generaton = macaroon_generator @@ -673,6 +674,13 @@ class OidcProvider: Returns: The decoded claims in the ID token. """ + id_token = token.get("id_token") + logger.debug("Attempting to decode JWT id_token %r", id_token) + + # That has been theoritically been checked by the caller, so even though + # assertion are not enabled in production, it is mainly here to appease mypy + assert id_token is not None + metadata = await self.load_metadata() claims_params = { "nonce": nonce, @@ -688,9 +696,6 @@ class OidcProvider: claim_options = {"iss": {"values": [metadata["issuer"]]}} - id_token = token["id_token"] - logger.debug("Attempting to decode JWT id_token %r", id_token) - # Try to decode the keys in cache first, then retry by forcing the keys # to be reloaded jwk_set = await self.load_jwks() @@ -715,7 +720,9 @@ class OidcProvider: logger.debug("Decoded id_token JWT %r; validating", claims) - claims.validate(leeway=120) # allows 2 min of clock skew + claims.validate( + now=self._clock.time(), leeway=120 + ) # allows 2 min of clock skew return claims diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py index a538215931..51d3bb8fff 100644 --- a/tests/federation/test_federation_client.py +++ b/tests/federation/test_federation_client.py @@ -12,13 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json from unittest import mock import twisted.web.client from twisted.internet import defer -from twisted.internet.protocol import Protocol -from twisted.python.failure import Failure from twisted.test.proto_helpers import MemoryReactor from synapse.api.room_versions import RoomVersions @@ -26,10 +23,9 @@ from synapse.events import EventBase from synapse.rest import admin from synapse.rest.client import login, room from synapse.server import HomeServer -from synapse.types import JsonDict from synapse.util import Clock -from tests.test_utils import event_injection +from tests.test_utils import FakeResponse, event_injection from tests.unittest import FederatingHomeserverTestCase @@ -98,8 +94,8 @@ class FederationClientTest(FederatingHomeserverTestCase): # mock up the response, and have the agent return it self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed( - _mock_response( - { + FakeResponse.json( + payload={ "pdus": [ create_event_dict, member_event_dict, @@ -208,8 +204,8 @@ class FederationClientTest(FederatingHomeserverTestCase): # mock up the response, and have the agent return it self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed( - _mock_response( - { + FakeResponse.json( + payload={ "origin": "yet.another.server", "origin_server_ts": 900, "pdus": [ @@ -269,8 +265,8 @@ class FederationClientTest(FederatingHomeserverTestCase): # We expect an outbound request to /backfill, so stub that out self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed( - _mock_response( - { + FakeResponse.json( + payload={ "origin": "yet.another.server", "origin_server_ts": 900, # Mimic the other server returning our new `pulled_event` @@ -305,21 +301,3 @@ class FederationClientTest(FederatingHomeserverTestCase): # This is 2 because it failed once from `self.OTHER_SERVER_NAME` and the # other from "yet.another.server" self.assertEqual(backfill_num_attempts, 2) - - -def _mock_response(resp: JsonDict): - body = json.dumps(resp).encode("utf-8") - - def deliver_body(p: Protocol): - p.dataReceived(body) - p.connectionLost(Failure(twisted.web.client.ResponseDone())) - - response = mock.Mock( - code=200, - phrase=b"OK", - headers=twisted.web.client.Headers({"content-Type": ["application/json"]}), - length=len(body), - deliverBody=deliver_body, - ) - mock.seal(response) - return response diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index e6cd3af7b7..5955410524 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -11,9 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json import os -from typing import Any, Dict +from typing import Any, Dict, Tuple from unittest.mock import ANY, Mock, patch from urllib.parse import parse_qs, urlparse @@ -22,12 +21,15 @@ import pymacaroons from twisted.test.proto_helpers import MemoryReactor from synapse.handlers.sso import MappingException +from synapse.http.site import SynapseRequest from synapse.server import HomeServer -from synapse.types import JsonDict, UserID +from synapse.types import UserID from synapse.util import Clock -from synapse.util.macaroons import OidcSessionData, get_value_from_macaroon +from synapse.util.macaroons import get_value_from_macaroon +from synapse.util.stringutils import random_string from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock +from tests.test_utils.oidc import FakeAuthorizationGrant, FakeOidcServer from tests.unittest import HomeserverTestCase, override_config try: @@ -46,12 +48,6 @@ BASE_URL = "https://synapse/" CALLBACK_URL = BASE_URL + "_synapse/client/oidc/callback" SCOPES = ["openid"] -AUTHORIZATION_ENDPOINT = ISSUER + "authorize" -TOKEN_ENDPOINT = ISSUER + "token" -USERINFO_ENDPOINT = ISSUER + "userinfo" -WELL_KNOWN = ISSUER + ".well-known/openid-configuration" -JWKS_URI = ISSUER + ".well-known/jwks.json" - # config for common cases DEFAULT_CONFIG = { "enabled": True, @@ -66,9 +62,9 @@ DEFAULT_CONFIG = { EXPLICIT_ENDPOINT_CONFIG = { **DEFAULT_CONFIG, "discover": False, - "authorization_endpoint": AUTHORIZATION_ENDPOINT, - "token_endpoint": TOKEN_ENDPOINT, - "jwks_uri": JWKS_URI, + "authorization_endpoint": ISSUER + "authorize", + "token_endpoint": ISSUER + "token", + "jwks_uri": ISSUER + "jwks", } @@ -102,27 +98,6 @@ class TestMappingProviderFailures(TestMappingProvider): } -async def get_json(url: str) -> JsonDict: - # Mock get_json calls to handle jwks & oidc discovery endpoints - if url == WELL_KNOWN: - # Minimal discovery document, as defined in OpenID.Discovery - # https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata - return { - "issuer": ISSUER, - "authorization_endpoint": AUTHORIZATION_ENDPOINT, - "token_endpoint": TOKEN_ENDPOINT, - "jwks_uri": JWKS_URI, - "userinfo_endpoint": USERINFO_ENDPOINT, - "response_types_supported": ["code"], - "subject_types_supported": ["public"], - "id_token_signing_alg_values_supported": ["RS256"], - } - elif url == JWKS_URI: - return {"keys": []} - - return {} - - def _key_file_path() -> str: """path to a file containing the private half of a test key""" @@ -159,11 +134,11 @@ class OidcHandlerTestCase(HomeserverTestCase): return config def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.http_client = Mock(spec=["get_json"]) - self.http_client.get_json.side_effect = get_json - self.http_client.user_agent = b"Synapse Test" + self.fake_server = FakeOidcServer(clock=clock, issuer=ISSUER) - hs = self.setup_test_homeserver(proxied_http_client=self.http_client) + hs = self.setup_test_homeserver() + self.hs_patcher = self.fake_server.patch_homeserver(hs=hs) + self.hs_patcher.start() self.handler = hs.get_oidc_handler() self.provider = self.handler._providers["oidc"] @@ -175,18 +150,51 @@ class OidcHandlerTestCase(HomeserverTestCase): # Reduce the number of attempts when generating MXIDs. sso_handler._MAP_USERNAME_RETRIES = 3 + auth_handler = hs.get_auth_handler() + # Mock the complete SSO login method. + self.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = self.complete_sso_login # type: ignore[assignment] + return hs + def tearDown(self) -> None: + self.hs_patcher.stop() + return super().tearDown() + + def reset_mocks(self): + """Reset all the Mocks.""" + self.fake_server.reset_mocks() + self.render_error.reset_mock() + self.complete_sso_login.reset_mock() + def metadata_edit(self, values): """Modify the result that will be returned by the well-known query""" - async def patched_get_json(uri): - res = await get_json(uri) - if uri == WELL_KNOWN: - res.update(values) - return res + metadata = self.fake_server.get_metadata() + metadata.update(values) + return patch.object(self.fake_server, "get_metadata", return_value=metadata) - return patch.object(self.http_client, "get_json", patched_get_json) + def start_authorization( + self, + userinfo: dict, + client_redirect_url: str = "http://client/redirect", + scope: str = "openid", + with_sid: bool = False, + ) -> Tuple[SynapseRequest, FakeAuthorizationGrant]: + """Start an authorization request, and get the callback request back.""" + nonce = random_string(10) + state = random_string(10) + + code, grant = self.fake_server.start_authorization( + userinfo=userinfo, + scope=scope, + client_id=self.provider._client_auth.client_id, + redirect_uri=self.provider._callback_url, + nonce=nonce, + with_sid=with_sid, + ) + session = self._generate_oidc_session_token(state, nonce, client_redirect_url) + return _build_callback_request(code, state, session), grant def assertRenderedError(self, error, error_description=None): self.render_error.assert_called_once() @@ -210,52 +218,54 @@ class OidcHandlerTestCase(HomeserverTestCase): """The handler should discover the endpoints from OIDC discovery document.""" # This would throw if some metadata were invalid metadata = self.get_success(self.provider.load_metadata()) - self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.fake_server.get_metadata_handler.assert_called_once() - self.assertEqual(metadata.issuer, ISSUER) - self.assertEqual(metadata.authorization_endpoint, AUTHORIZATION_ENDPOINT) - self.assertEqual(metadata.token_endpoint, TOKEN_ENDPOINT) - self.assertEqual(metadata.jwks_uri, JWKS_URI) - # FIXME: it seems like authlib does not have that defined in its metadata models - # self.assertEqual(metadata.userinfo_endpoint, USERINFO_ENDPOINT) + self.assertEqual(metadata.issuer, self.fake_server.issuer) + self.assertEqual( + metadata.authorization_endpoint, + self.fake_server.authorization_endpoint, + ) + self.assertEqual(metadata.token_endpoint, self.fake_server.token_endpoint) + self.assertEqual(metadata.jwks_uri, self.fake_server.jwks_uri) + # It seems like authlib does not have that defined in its metadata models + self.assertEqual( + metadata.get("userinfo_endpoint"), + self.fake_server.userinfo_endpoint, + ) # subsequent calls should be cached - self.http_client.reset_mock() + self.reset_mocks() self.get_success(self.provider.load_metadata()) - self.http_client.get_json.assert_not_called() + self.fake_server.get_metadata_handler.assert_not_called() @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG}) def test_no_discovery(self) -> None: """When discovery is disabled, it should not try to load from discovery document.""" self.get_success(self.provider.load_metadata()) - self.http_client.get_json.assert_not_called() + self.fake_server.get_metadata_handler.assert_not_called() - @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG}) + @override_config({"oidc_config": DEFAULT_CONFIG}) def test_load_jwks(self) -> None: """JWKS loading is done once (then cached) if used.""" jwks = self.get_success(self.provider.load_jwks()) - self.http_client.get_json.assert_called_once_with(JWKS_URI) - self.assertEqual(jwks, {"keys": []}) + self.fake_server.get_jwks_handler.assert_called_once() + self.assertEqual(jwks, self.fake_server.get_jwks()) # subsequent calls should be cached… - self.http_client.reset_mock() + self.reset_mocks() self.get_success(self.provider.load_jwks()) - self.http_client.get_json.assert_not_called() + self.fake_server.get_jwks_handler.assert_not_called() # …unless forced - self.http_client.reset_mock() + self.reset_mocks() self.get_success(self.provider.load_jwks(force=True)) - self.http_client.get_json.assert_called_once_with(JWKS_URI) + self.fake_server.get_jwks_handler.assert_called_once() - # Throw if the JWKS uri is missing - original = self.provider.load_metadata - - async def patched_load_metadata(): - m = (await original()).copy() - m.update({"jwks_uri": None}) - return m - - with patch.object(self.provider, "load_metadata", patched_load_metadata): + with self.metadata_edit({"jwks_uri": None}): + # If we don't do this, the load_metadata call will throw because of the + # missing jwks_uri + self.provider._user_profile_method = "userinfo_endpoint" + self.get_success(self.provider.load_metadata(force=True)) self.get_failure(self.provider.load_jwks(force=True), RuntimeError) @override_config({"oidc_config": DEFAULT_CONFIG}) @@ -359,7 +369,7 @@ class OidcHandlerTestCase(HomeserverTestCase): self.provider.handle_redirect_request(req, b"http://client/redirect") ) ) - auth_endpoint = urlparse(AUTHORIZATION_ENDPOINT) + auth_endpoint = urlparse(self.fake_server.authorization_endpoint) self.assertEqual(url.scheme, auth_endpoint.scheme) self.assertEqual(url.netloc, auth_endpoint.netloc) @@ -424,48 +434,34 @@ class OidcHandlerTestCase(HomeserverTestCase): with self.assertRaises(AttributeError): _ = mapping_provider.get_extra_attributes - token = { - "type": "bearer", - "id_token": "id_token", - "access_token": "access_token", - } username = "bar" userinfo = { "sub": "foo", "username": username, } expected_user_id = "@%s:%s" % (username, self.hs.hostname) - self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] - self.provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] - self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment] - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() - code = "code" - state = "state" - nonce = "nonce" client_redirect_url = "http://client/redirect" - ip_address = "10.0.0.1" - session = self._generate_oidc_session_token(state, nonce, client_redirect_url) - request = _build_callback_request(code, state, session, ip_address=ip_address) - + request, _ = self.start_authorization( + userinfo, client_redirect_url=client_redirect_url + ) self.get_success(self.handler.handle_oidc_callback(request)) - auth_handler.complete_sso_login.assert_called_once_with( + self.complete_sso_login.assert_called_once_with( expected_user_id, - "oidc", + self.provider.idp_id, request, client_redirect_url, None, new_user=True, auth_provider_session_id=None, ) - self.provider._exchange_code.assert_called_once_with(code) - self.provider._parse_id_token.assert_called_once_with(token, nonce=nonce) - self.provider._fetch_userinfo.assert_not_called() + self.fake_server.post_token_handler.assert_called_once() + self.fake_server.get_userinfo_handler.assert_not_called() self.render_error.assert_not_called() # Handle mapping errors + request, _ = self.start_authorization(userinfo) with patch.object( self.provider, "_remote_id_from_userinfo", @@ -475,81 +471,63 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertRenderedError("mapping_error") # Handle ID token errors - self.provider._parse_id_token = simple_async_mock(raises=Exception()) # type: ignore[assignment] - self.get_success(self.handler.handle_oidc_callback(request)) + request, _ = self.start_authorization(userinfo) + with self.fake_server.id_token_override({"iss": "https://bad.issuer/"}): + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_token") - auth_handler.complete_sso_login.reset_mock() - self.provider._exchange_code.reset_mock() - self.provider._parse_id_token.reset_mock() - self.provider._fetch_userinfo.reset_mock() + self.reset_mocks() # With userinfo fetching self.provider._user_profile_method = "userinfo_endpoint" - token = { - "type": "bearer", - "access_token": "access_token", - } - self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] + # Without the "openid" scope, the FakeProvider does not generate an id_token + request, _ = self.start_authorization(userinfo, scope="") self.get_success(self.handler.handle_oidc_callback(request)) - auth_handler.complete_sso_login.assert_called_once_with( + self.complete_sso_login.assert_called_once_with( expected_user_id, - "oidc", + self.provider.idp_id, request, - client_redirect_url, + ANY, None, new_user=False, auth_provider_session_id=None, ) - self.provider._exchange_code.assert_called_once_with(code) - self.provider._parse_id_token.assert_not_called() - self.provider._fetch_userinfo.assert_called_once_with(token) + self.fake_server.post_token_handler.assert_called_once() + self.fake_server.get_userinfo_handler.assert_called_once() self.render_error.assert_not_called() + self.reset_mocks() + # With an ID token, userinfo fetching and sid in the ID token self.provider._user_profile_method = "userinfo_endpoint" - token = { - "type": "bearer", - "access_token": "access_token", - "id_token": "id_token", - } - id_token = { - "sid": "abcdefgh", - } - self.provider._parse_id_token = simple_async_mock(return_value=id_token) # type: ignore[assignment] - self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] - auth_handler.complete_sso_login.reset_mock() - self.provider._fetch_userinfo.reset_mock() + request, grant = self.start_authorization(userinfo, with_sid=True) + self.assertIsNotNone(grant.sid) self.get_success(self.handler.handle_oidc_callback(request)) - auth_handler.complete_sso_login.assert_called_once_with( + self.complete_sso_login.assert_called_once_with( expected_user_id, - "oidc", + self.provider.idp_id, request, - client_redirect_url, + ANY, None, new_user=False, - auth_provider_session_id=id_token["sid"], + auth_provider_session_id=grant.sid, ) - self.provider._exchange_code.assert_called_once_with(code) - self.provider._parse_id_token.assert_called_once_with(token, nonce=nonce) - self.provider._fetch_userinfo.assert_called_once_with(token) + self.fake_server.post_token_handler.assert_called_once() + self.fake_server.get_userinfo_handler.assert_called_once() self.render_error.assert_not_called() # Handle userinfo fetching error - self.provider._fetch_userinfo = simple_async_mock(raises=Exception()) # type: ignore[assignment] - self.get_success(self.handler.handle_oidc_callback(request)) + request, _ = self.start_authorization(userinfo) + with self.fake_server.buggy_endpoint(userinfo=True): + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("fetch_error") - # Handle code exchange failure - from synapse.handlers.oidc import OidcError - - self.provider._exchange_code = simple_async_mock( # type: ignore[assignment] - raises=OidcError("invalid_request") - ) - self.get_success(self.handler.handle_oidc_callback(request)) - self.assertRenderedError("invalid_request") + request, _ = self.start_authorization(userinfo) + with self.fake_server.buggy_endpoint(token=True): + self.get_success(self.handler.handle_oidc_callback(request)) + self.assertRenderedError("server_error") @override_config({"oidc_config": DEFAULT_CONFIG}) def test_callback_session(self) -> None: @@ -599,18 +577,22 @@ class OidcHandlerTestCase(HomeserverTestCase): ) def test_exchange_code(self) -> None: """Code exchange behaves correctly and handles various error scenarios.""" - token = {"type": "bearer"} - token_json = json.dumps(token).encode("utf-8") - self.http_client.request = simple_async_mock( - return_value=FakeResponse(code=200, phrase=b"OK", body=token_json) + token = { + "type": "Bearer", + "access_token": "aabbcc", + } + + self.fake_server.post_token_handler.side_effect = None + self.fake_server.post_token_handler.return_value = FakeResponse.json( + payload=token ) code = "code" ret = self.get_success(self.provider._exchange_code(code)) - kwargs = self.http_client.request.call_args[1] + kwargs = self.fake_server.request.call_args[1] self.assertEqual(ret, token) self.assertEqual(kwargs["method"], "POST") - self.assertEqual(kwargs["uri"], TOKEN_ENDPOINT) + self.assertEqual(kwargs["uri"], self.fake_server.token_endpoint) args = parse_qs(kwargs["data"].decode("utf-8")) self.assertEqual(args["grant_type"], ["authorization_code"]) @@ -620,12 +602,8 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertEqual(args["redirect_uri"], [CALLBACK_URL]) # Test error handling - self.http_client.request = simple_async_mock( - return_value=FakeResponse( - code=400, - phrase=b"Bad Request", - body=b'{"error": "foo", "error_description": "bar"}', - ) + self.fake_server.post_token_handler.return_value = FakeResponse.json( + code=400, payload={"error": "foo", "error_description": "bar"} ) from synapse.handlers.oidc import OidcError @@ -634,46 +612,30 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertEqual(exc.value.error_description, "bar") # Internal server error with no JSON body - self.http_client.request = simple_async_mock( - return_value=FakeResponse( - code=500, - phrase=b"Internal Server Error", - body=b"Not JSON", - ) + self.fake_server.post_token_handler.return_value = FakeResponse( + code=500, body=b"Not JSON" ) exc = self.get_failure(self.provider._exchange_code(code), OidcError) self.assertEqual(exc.value.error, "server_error") # Internal server error with JSON body - self.http_client.request = simple_async_mock( - return_value=FakeResponse( - code=500, - phrase=b"Internal Server Error", - body=b'{"error": "internal_server_error"}', - ) + self.fake_server.post_token_handler.return_value = FakeResponse.json( + code=500, payload={"error": "internal_server_error"} ) exc = self.get_failure(self.provider._exchange_code(code), OidcError) self.assertEqual(exc.value.error, "internal_server_error") # 4xx error without "error" field - self.http_client.request = simple_async_mock( - return_value=FakeResponse( - code=400, - phrase=b"Bad request", - body=b"{}", - ) + self.fake_server.post_token_handler.return_value = FakeResponse.json( + code=400, payload={} ) exc = self.get_failure(self.provider._exchange_code(code), OidcError) self.assertEqual(exc.value.error, "server_error") # 2xx error with "error" field - self.http_client.request = simple_async_mock( - return_value=FakeResponse( - code=200, - phrase=b"OK", - body=b'{"error": "some_error"}', - ) + self.fake_server.post_token_handler.return_value = FakeResponse.json( + code=200, payload={"error": "some_error"} ) exc = self.get_failure(self.provider._exchange_code(code), OidcError) self.assertEqual(exc.value.error, "some_error") @@ -697,11 +659,14 @@ class OidcHandlerTestCase(HomeserverTestCase): """Test that code exchange works with a JWK client secret.""" from authlib.jose import jwt - token = {"type": "bearer"} - self.http_client.request = simple_async_mock( - return_value=FakeResponse( - code=200, phrase=b"OK", body=json.dumps(token).encode("utf-8") - ) + token = { + "type": "Bearer", + "access_token": "aabbcc", + } + + self.fake_server.post_token_handler.side_effect = None + self.fake_server.post_token_handler.return_value = FakeResponse.json( + payload=token ) code = "code" @@ -714,9 +679,9 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertEqual(ret, token) # the request should have hit the token endpoint - kwargs = self.http_client.request.call_args[1] + kwargs = self.fake_server.request.call_args[1] self.assertEqual(kwargs["method"], "POST") - self.assertEqual(kwargs["uri"], TOKEN_ENDPOINT) + self.assertEqual(kwargs["uri"], self.fake_server.token_endpoint) # the client secret provided to the should be a jwt which can be checked with # the public key @@ -750,11 +715,14 @@ class OidcHandlerTestCase(HomeserverTestCase): ) def test_exchange_code_no_auth(self) -> None: """Test that code exchange works with no client secret.""" - token = {"type": "bearer"} - self.http_client.request = simple_async_mock( - return_value=FakeResponse( - code=200, phrase=b"OK", body=json.dumps(token).encode("utf-8") - ) + token = { + "type": "Bearer", + "access_token": "aabbcc", + } + + self.fake_server.post_token_handler.side_effect = None + self.fake_server.post_token_handler.return_value = FakeResponse.json( + payload=token ) code = "code" ret = self.get_success(self.provider._exchange_code(code)) @@ -762,9 +730,9 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertEqual(ret, token) # the request should have hit the token endpoint - kwargs = self.http_client.request.call_args[1] + kwargs = self.fake_server.request.call_args[1] self.assertEqual(kwargs["method"], "POST") - self.assertEqual(kwargs["uri"], TOKEN_ENDPOINT) + self.assertEqual(kwargs["uri"], self.fake_server.token_endpoint) # check the POSTed data args = parse_qs(kwargs["data"].decode("utf-8")) @@ -787,37 +755,19 @@ class OidcHandlerTestCase(HomeserverTestCase): """ Login while using a mapping provider that implements get_extra_attributes. """ - token = { - "type": "bearer", - "id_token": "id_token", - "access_token": "access_token", - } userinfo = { "sub": "foo", "username": "foo", "phone": "1234567", } - self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] - self.provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() - - state = "state" - client_redirect_url = "http://client/redirect" - session = self._generate_oidc_session_token( - state=state, - nonce="nonce", - client_redirect_url=client_redirect_url, - ) - request = _build_callback_request("code", state, session) - + request, _ = self.start_authorization(userinfo) self.get_success(self.handler.handle_oidc_callback(request)) - auth_handler.complete_sso_login.assert_called_once_with( + self.complete_sso_login.assert_called_once_with( "@foo:test", - "oidc", + self.provider.idp_id, request, - client_redirect_url, + ANY, {"phone": "1234567"}, new_user=True, auth_provider_session_id=None, @@ -826,41 +776,40 @@ class OidcHandlerTestCase(HomeserverTestCase): @override_config({"oidc_config": DEFAULT_CONFIG}) def test_map_userinfo_to_user(self) -> None: """Ensure that mapping the userinfo returned from a provider to an MXID works properly.""" - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() - userinfo: dict = { "sub": "test_user", "username": "test_user", } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_called_once_with( + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_called_once_with( "@test_user:test", - "oidc", - ANY, + self.provider.idp_id, + request, ANY, None, new_user=True, auth_provider_session_id=None, ) - auth_handler.complete_sso_login.reset_mock() + self.reset_mocks() # Some providers return an integer ID. userinfo = { "sub": 1234, "username": "test_user_2", } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_called_once_with( + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_called_once_with( "@test_user_2:test", - "oidc", - ANY, + self.provider.idp_id, + request, ANY, None, new_user=True, auth_provider_session_id=None, ) - auth_handler.complete_sso_login.reset_mock() + self.reset_mocks() # Test if the mxid is already taken store = self.hs.get_datastores().main @@ -869,8 +818,9 @@ class OidcHandlerTestCase(HomeserverTestCase): store.register_user(user_id=user3.to_string(), password_hash=None) ) userinfo = {"sub": "test3", "username": "test_user_3"} - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_not_called() + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() self.assertRenderedError( "mapping_error", "Mapping provider does not support de-duplicating Matrix IDs", @@ -885,38 +835,37 @@ class OidcHandlerTestCase(HomeserverTestCase): store.register_user(user_id=user.to_string(), password_hash=None) ) - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() - # Map a user via SSO. userinfo = { "sub": "test", "username": "test_user", } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_called_once_with( + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_called_once_with( user.to_string(), - "oidc", - ANY, + self.provider.idp_id, + request, ANY, None, new_user=False, auth_provider_session_id=None, ) - auth_handler.complete_sso_login.reset_mock() + self.reset_mocks() # Subsequent calls should map to the same mxid. - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_called_once_with( + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_called_once_with( user.to_string(), - "oidc", - ANY, + self.provider.idp_id, + request, ANY, None, new_user=False, auth_provider_session_id=None, ) - auth_handler.complete_sso_login.reset_mock() + self.reset_mocks() # Note that a second SSO user can be mapped to the same Matrix ID. (This # requires a unique sub, but something that maps to the same matrix ID, @@ -927,17 +876,18 @@ class OidcHandlerTestCase(HomeserverTestCase): "sub": "test1", "username": "test_user", } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_called_once_with( + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_called_once_with( user.to_string(), - "oidc", - ANY, + self.provider.idp_id, + request, ANY, None, new_user=False, auth_provider_session_id=None, ) - auth_handler.complete_sso_login.reset_mock() + self.reset_mocks() # Register some non-exact matching cases. user2 = UserID.from_string("@TEST_user_2:test") @@ -954,8 +904,9 @@ class OidcHandlerTestCase(HomeserverTestCase): "sub": "test2", "username": "TEST_USER_2", } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_not_called() + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() args = self.assertRenderedError("mapping_error") self.assertTrue( args[2].startswith( @@ -969,11 +920,12 @@ class OidcHandlerTestCase(HomeserverTestCase): store.register_user(user_id=user2.to_string(), password_hash=None) ) - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_called_once_with( + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_called_once_with( "@TEST_USER_2:test", - "oidc", - ANY, + self.provider.idp_id, + request, ANY, None, new_user=False, @@ -983,9 +935,9 @@ class OidcHandlerTestCase(HomeserverTestCase): @override_config({"oidc_config": DEFAULT_CONFIG}) def test_map_userinfo_to_invalid_localpart(self) -> None: """If the mapping provider generates an invalid localpart it should be rejected.""" - self.get_success( - _make_callback_with_userinfo(self.hs, {"sub": "test2", "username": "föö"}) - ) + userinfo = {"sub": "test2", "username": "föö"} + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mapping_error", "localpart is invalid: föö") @override_config( @@ -1000,9 +952,6 @@ class OidcHandlerTestCase(HomeserverTestCase): ) def test_map_userinfo_to_user_retries(self) -> None: """The mapping provider can retry generating an MXID if the MXID is already in use.""" - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() - store = self.hs.get_datastores().main self.get_success( store.register_user(user_id="@test_user:test", password_hash=None) @@ -1011,19 +960,20 @@ class OidcHandlerTestCase(HomeserverTestCase): "sub": "test", "username": "test_user", } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) # test_user is already taken, so test_user1 gets registered instead. - auth_handler.complete_sso_login.assert_called_once_with( + self.complete_sso_login.assert_called_once_with( "@test_user1:test", - "oidc", - ANY, + self.provider.idp_id, + request, ANY, None, new_user=True, auth_provider_session_id=None, ) - auth_handler.complete_sso_login.reset_mock() + self.reset_mocks() # Register all of the potential mxids for a particular OIDC username. self.get_success( @@ -1039,8 +989,9 @@ class OidcHandlerTestCase(HomeserverTestCase): "sub": "tester", "username": "tester", } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_not_called() + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() self.assertRenderedError( "mapping_error", "Unable to generate a Matrix ID from the SSO response" ) @@ -1052,7 +1003,8 @@ class OidcHandlerTestCase(HomeserverTestCase): "sub": "tester", "username": "", } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mapping_error", "localpart is invalid: ") @override_config( @@ -1071,7 +1023,8 @@ class OidcHandlerTestCase(HomeserverTestCase): "sub": "tester", "username": None, } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mapping_error", "localpart is invalid: ") @override_config( @@ -1084,16 +1037,14 @@ class OidcHandlerTestCase(HomeserverTestCase): ) def test_attribute_requirements(self) -> None: """The required attributes must be met from the OIDC userinfo response.""" - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() - # userinfo lacking "test": "foobar" attribute should fail. userinfo = { "sub": "tester", "username": "tester", } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_not_called() + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() # userinfo with "test": "foobar" attribute should succeed. userinfo = { @@ -1101,13 +1052,14 @@ class OidcHandlerTestCase(HomeserverTestCase): "username": "tester", "test": "foobar", } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) # check that the auth handler got called as expected - auth_handler.complete_sso_login.assert_called_once_with( + self.complete_sso_login.assert_called_once_with( "@tester:test", - "oidc", - ANY, + self.provider.idp_id, + request, ANY, None, new_user=True, @@ -1124,21 +1076,20 @@ class OidcHandlerTestCase(HomeserverTestCase): ) def test_attribute_requirements_contains(self) -> None: """Test that auth succeeds if userinfo attribute CONTAINS required value""" - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() # userinfo with "test": ["foobar", "foo", "bar"] attribute should succeed. userinfo = { "sub": "tester", "username": "tester", "test": ["foobar", "foo", "bar"], } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) # check that the auth handler got called as expected - auth_handler.complete_sso_login.assert_called_once_with( + self.complete_sso_login.assert_called_once_with( "@tester:test", - "oidc", - ANY, + self.provider.idp_id, + request, ANY, None, new_user=True, @@ -1158,16 +1109,15 @@ class OidcHandlerTestCase(HomeserverTestCase): Test that auth fails if attributes exist but don't match, or are non-string values. """ - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() # userinfo with "test": "not_foobar" attribute should fail userinfo: dict = { "sub": "tester", "username": "tester", "test": "not_foobar", } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_not_called() + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() # userinfo with "test": ["foo", "bar"] attribute should fail userinfo = { @@ -1175,8 +1125,9 @@ class OidcHandlerTestCase(HomeserverTestCase): "username": "tester", "test": ["foo", "bar"], } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_not_called() + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() # userinfo with "test": False attribute should fail # this is largely just to ensure we don't crash here @@ -1185,8 +1136,9 @@ class OidcHandlerTestCase(HomeserverTestCase): "username": "tester", "test": False, } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_not_called() + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() # userinfo with "test": None attribute should fail # a value of None breaks the OIDC spec, but it's important to not crash here @@ -1195,8 +1147,9 @@ class OidcHandlerTestCase(HomeserverTestCase): "username": "tester", "test": None, } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_not_called() + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() # userinfo with "test": 1 attribute should fail # this is largely just to ensure we don't crash here @@ -1205,8 +1158,9 @@ class OidcHandlerTestCase(HomeserverTestCase): "username": "tester", "test": 1, } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_not_called() + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() # userinfo with "test": 3.14 attribute should fail # this is largely just to ensure we don't crash here @@ -1215,8 +1169,9 @@ class OidcHandlerTestCase(HomeserverTestCase): "username": "tester", "test": 3.14, } - self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) - auth_handler.complete_sso_login.assert_not_called() + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() def _generate_oidc_session_token( self, @@ -1230,7 +1185,7 @@ class OidcHandlerTestCase(HomeserverTestCase): return self.handler._macaroon_generator.generate_oidc_session_token( state=state, session_data=OidcSessionData( - idp_id="oidc", + idp_id=self.provider.idp_id, nonce=nonce, client_redirect_url=client_redirect_url, ui_auth_session_id=ui_auth_session_id, @@ -1238,41 +1193,6 @@ class OidcHandlerTestCase(HomeserverTestCase): ) -async def _make_callback_with_userinfo( - hs: HomeServer, userinfo: dict, client_redirect_url: str = "http://client/redirect" -) -> None: - """Mock up an OIDC callback with the given userinfo dict - - We'll pull out the OIDC handler from the homeserver, stub out a couple of methods, - and poke in the userinfo dict as if it were the response to an OIDC userinfo call. - - Args: - hs: the HomeServer impl to send the callback to. - userinfo: the OIDC userinfo dict - client_redirect_url: the URL to redirect to on success. - """ - - handler = hs.get_oidc_handler() - provider = handler._providers["oidc"] - provider._exchange_code = simple_async_mock(return_value={"id_token": ""}) # type: ignore[assignment] - provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] - provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment] - - state = "state" - session = handler._macaroon_generator.generate_oidc_session_token( - state=state, - session_data=OidcSessionData( - idp_id="oidc", - nonce="nonce", - client_redirect_url=client_redirect_url, - ui_auth_session_id="", - ), - ) - request = _build_callback_request("code", state, session) - - await handler.handle_oidc_callback(request) - - def _build_callback_request( code: str, state: str, diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py index 090cef5216..ebf653d018 100644 --- a/tests/rest/client/test_auth.py +++ b/tests/rest/client/test_auth.py @@ -465,9 +465,11 @@ class UIAuthTests(unittest.HomeserverTestCase): * checking that the original operation succeeds """ + fake_oidc_server = self.helper.fake_oidc_server() + # log the user in remote_user_id = UserID.from_string(self.user).localpart - login_resp = self.helper.login_via_oidc(remote_user_id) + login_resp, _ = self.helper.login_via_oidc(fake_oidc_server, remote_user_id) self.assertEqual(login_resp["user_id"], self.user) # initiate a UI Auth process by attempting to delete the device @@ -481,8 +483,8 @@ class UIAuthTests(unittest.HomeserverTestCase): # run the UIA-via-SSO flow session_id = channel.json_body["session"] - channel = self.helper.auth_via_oidc( - {"sub": remote_user_id}, ui_auth_session_id=session_id + channel, _ = self.helper.auth_via_oidc( + fake_oidc_server, {"sub": remote_user_id}, ui_auth_session_id=session_id ) # that should serve a confirmation page @@ -499,7 +501,8 @@ class UIAuthTests(unittest.HomeserverTestCase): @skip_unless(HAS_OIDC, "requires OIDC") @override_config({"oidc_config": TEST_OIDC_CONFIG}) def test_does_not_offer_password_for_sso_user(self) -> None: - login_resp = self.helper.login_via_oidc("username") + fake_oidc_server = self.helper.fake_oidc_server() + login_resp, _ = self.helper.login_via_oidc(fake_oidc_server, "username") user_tok = login_resp["access_token"] device_id = login_resp["device_id"] @@ -522,7 +525,10 @@ class UIAuthTests(unittest.HomeserverTestCase): @override_config({"oidc_config": TEST_OIDC_CONFIG}) def test_offers_both_flows_for_upgraded_user(self) -> None: """A user that had a password and then logged in with SSO should get both flows""" - login_resp = self.helper.login_via_oidc(UserID.from_string(self.user).localpart) + fake_oidc_server = self.helper.fake_oidc_server() + login_resp, _ = self.helper.login_via_oidc( + fake_oidc_server, UserID.from_string(self.user).localpart + ) self.assertEqual(login_resp["user_id"], self.user) channel = self.delete_device( @@ -539,8 +545,13 @@ class UIAuthTests(unittest.HomeserverTestCase): @override_config({"oidc_config": TEST_OIDC_CONFIG}) def test_ui_auth_fails_for_incorrect_sso_user(self) -> None: """If the user tries to authenticate with the wrong SSO user, they get an error""" + + fake_oidc_server = self.helper.fake_oidc_server() + # log the user in - login_resp = self.helper.login_via_oidc(UserID.from_string(self.user).localpart) + login_resp, _ = self.helper.login_via_oidc( + fake_oidc_server, UserID.from_string(self.user).localpart + ) self.assertEqual(login_resp["user_id"], self.user) # start a UI Auth flow by attempting to delete a device @@ -553,8 +564,8 @@ class UIAuthTests(unittest.HomeserverTestCase): session_id = channel.json_body["session"] # do the OIDC auth, but auth as the wrong user - channel = self.helper.auth_via_oidc( - {"sub": "wrong_user"}, ui_auth_session_id=session_id + channel, _ = self.helper.auth_via_oidc( + fake_oidc_server, {"sub": "wrong_user"}, ui_auth_session_id=session_id ) # that should return a failure message @@ -584,7 +595,10 @@ class UIAuthTests(unittest.HomeserverTestCase): """Tests that if we register a user via SSO while requiring approval for new accounts, we still raise the correct error before logging the user in. """ - login_resp = self.helper.login_via_oidc("username", expected_status=403) + fake_oidc_server = self.helper.fake_oidc_server() + login_resp, _ = self.helper.login_via_oidc( + fake_oidc_server, "username", expected_status=403 + ) self.assertEqual(login_resp["errcode"], Codes.USER_AWAITING_APPROVAL) self.assertEqual( diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index e801ba8c8b..ff5baa9f0a 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -36,7 +36,7 @@ from synapse.util import Clock from tests import unittest from tests.handlers.test_oidc import HAS_OIDC from tests.handlers.test_saml import has_saml2 -from tests.rest.client.utils import TEST_OIDC_AUTH_ENDPOINT, TEST_OIDC_CONFIG +from tests.rest.client.utils import TEST_OIDC_CONFIG from tests.server import FakeChannel from tests.test_utils.html_parsers import TestHtmlParser from tests.unittest import HomeserverTestCase, override_config, skip_unless @@ -612,13 +612,16 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): def test_login_via_oidc(self) -> None: """If OIDC is chosen, should redirect to the OIDC auth endpoint""" - # pick the default OIDC provider - channel = self.make_request( - "GET", - "/_synapse/client/pick_idp?redirectUrl=" - + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) - + "&idp=oidc", - ) + fake_oidc_server = self.helper.fake_oidc_server() + + with fake_oidc_server.patch_homeserver(hs=self.hs): + # pick the default OIDC provider + channel = self.make_request( + "GET", + "/_synapse/client/pick_idp?redirectUrl=" + + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) + + "&idp=oidc", + ) self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers @@ -626,7 +629,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1) # it should redirect us to the auth page of the OIDC server - self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT) + self.assertEqual(oidc_uri_path, fake_oidc_server.authorization_endpoint) # ... and should have set a cookie including the redirect url cookie_headers = channel.headers.getRawHeaders("Set-Cookie") @@ -643,7 +646,9 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): TEST_CLIENT_REDIRECT_URL, ) - channel = self.helper.complete_oidc_auth(oidc_uri, cookies, {"sub": "user1"}) + channel, _ = self.helper.complete_oidc_auth( + fake_oidc_server, oidc_uri, cookies, {"sub": "user1"} + ) # that should serve a confirmation page self.assertEqual(channel.code, 200, channel.result) @@ -693,7 +698,10 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): def test_client_idp_redirect_to_oidc(self) -> None: """If the client pick a known IdP, redirect to it""" - channel = self._make_sso_redirect_request("oidc") + fake_oidc_server = self.helper.fake_oidc_server() + + with fake_oidc_server.patch_homeserver(hs=self.hs): + channel = self._make_sso_redirect_request("oidc") self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers @@ -701,7 +709,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1) # it should redirect us to the auth page of the OIDC server - self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT) + self.assertEqual(oidc_uri_path, fake_oidc_server.authorization_endpoint) def _make_sso_redirect_request(self, idp_prov: Optional[str] = None) -> FakeChannel: """Send a request to /_matrix/client/r0/login/sso/redirect @@ -1280,9 +1288,13 @@ class UsernamePickerTestCase(HomeserverTestCase): def test_username_picker(self) -> None: """Test the happy path of a username picker flow.""" + fake_oidc_server = self.helper.fake_oidc_server() + # do the start of the login flow - channel = self.helper.auth_via_oidc( - {"sub": "tester", "displayname": "Jonny"}, TEST_CLIENT_REDIRECT_URL + channel, _ = self.helper.auth_via_oidc( + fake_oidc_server, + {"sub": "tester", "displayname": "Jonny"}, + TEST_CLIENT_REDIRECT_URL, ) # that should redirect to the username picker diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index c249a42bb6..967d229223 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -31,7 +31,6 @@ from typing import ( Tuple, overload, ) -from unittest.mock import patch from urllib.parse import urlencode import attr @@ -46,8 +45,19 @@ from synapse.server import HomeServer from synapse.types import JsonDict from tests.server import FakeChannel, FakeSite, make_request -from tests.test_utils import FakeResponse from tests.test_utils.html_parsers import TestHtmlParser +from tests.test_utils.oidc import FakeAuthorizationGrant, FakeOidcServer + +# an 'oidc_config' suitable for login_via_oidc. +TEST_OIDC_ISSUER = "https://issuer.test/" +TEST_OIDC_CONFIG = { + "enabled": True, + "issuer": TEST_OIDC_ISSUER, + "client_id": "test-client-id", + "client_secret": "test-client-secret", + "scopes": ["openid"], + "user_mapping_provider": {"config": {"localpart_template": "{{ user.sub }}"}}, +} @attr.s(auto_attribs=True) @@ -543,12 +553,28 @@ class RestHelper: return channel.json_body + def fake_oidc_server(self, issuer: str = TEST_OIDC_ISSUER) -> FakeOidcServer: + """Create a ``FakeOidcServer``. + + This can be used in conjuction with ``login_via_oidc``:: + + fake_oidc_server = self.helper.fake_oidc_server() + login_data, _ = self.helper.login_via_oidc(fake_oidc_server, "user") + """ + + return FakeOidcServer( + clock=self.hs.get_clock(), + issuer=issuer, + ) + def login_via_oidc( self, + fake_server: FakeOidcServer, remote_user_id: str, + with_sid: bool = False, expected_status: int = 200, - ) -> JsonDict: - """Log in via OIDC + ) -> Tuple[JsonDict, FakeAuthorizationGrant]: + """Log in (as a new user) via OIDC Returns the result of the final token login. @@ -560,7 +586,10 @@ class RestHelper: the normal places. """ client_redirect_url = "https://x" - channel = self.auth_via_oidc({"sub": remote_user_id}, client_redirect_url) + userinfo = {"sub": remote_user_id} + channel, grant = self.auth_via_oidc( + fake_server, userinfo, client_redirect_url, with_sid=with_sid + ) # expect a confirmation page assert channel.code == HTTPStatus.OK, channel.result @@ -585,14 +614,16 @@ class RestHelper: assert ( channel.code == expected_status ), f"unexpected status in response: {channel.code}" - return channel.json_body + return channel.json_body, grant def auth_via_oidc( self, + fake_server: FakeOidcServer, user_info_dict: JsonDict, client_redirect_url: Optional[str] = None, ui_auth_session_id: Optional[str] = None, - ) -> FakeChannel: + with_sid: bool = False, + ) -> Tuple[FakeChannel, FakeAuthorizationGrant]: """Perform an OIDC authentication flow via a mock OIDC provider. This can be used for either login or user-interactive auth. @@ -616,6 +647,7 @@ class RestHelper: the login redirect endpoint ui_auth_session_id: if set, we will perform a UI Auth flow. The session id of the UI auth. + with_sid: if True, generates a random `sid` (OIDC session ID) Returns: A FakeChannel containing the result of calling the OIDC callback endpoint. @@ -625,14 +657,15 @@ class RestHelper: cookies: Dict[str, str] = {} - # if we're doing a ui auth, hit the ui auth redirect endpoint - if ui_auth_session_id: - # can't set the client redirect url for UI Auth - assert client_redirect_url is None - oauth_uri = self.initiate_sso_ui_auth(ui_auth_session_id, cookies) - else: - # otherwise, hit the login redirect endpoint - oauth_uri = self.initiate_sso_login(client_redirect_url, cookies) + with fake_server.patch_homeserver(hs=self.hs): + # if we're doing a ui auth, hit the ui auth redirect endpoint + if ui_auth_session_id: + # can't set the client redirect url for UI Auth + assert client_redirect_url is None + oauth_uri = self.initiate_sso_ui_auth(ui_auth_session_id, cookies) + else: + # otherwise, hit the login redirect endpoint + oauth_uri = self.initiate_sso_login(client_redirect_url, cookies) # we now have a URI for the OIDC IdP, but we skip that and go straight # back to synapse's OIDC callback resource. However, we do need the "state" @@ -640,17 +673,21 @@ class RestHelper: # that synapse passes to the client. oauth_uri_path, _ = oauth_uri.split("?", 1) - assert oauth_uri_path == TEST_OIDC_AUTH_ENDPOINT, ( + assert oauth_uri_path == fake_server.authorization_endpoint, ( "unexpected SSO URI " + oauth_uri_path ) - return self.complete_oidc_auth(oauth_uri, cookies, user_info_dict) + return self.complete_oidc_auth( + fake_server, oauth_uri, cookies, user_info_dict, with_sid=with_sid + ) def complete_oidc_auth( self, + fake_serer: FakeOidcServer, oauth_uri: str, cookies: Mapping[str, str], user_info_dict: JsonDict, - ) -> FakeChannel: + with_sid: bool = False, + ) -> Tuple[FakeChannel, FakeAuthorizationGrant]: """Mock out an OIDC authentication flow Assumes that an OIDC auth has been initiated by one of initiate_sso_login or @@ -661,50 +698,37 @@ class RestHelper: Requires the OIDC callback resource to be mounted at the normal place. Args: + fake_server: the fake OIDC server with which the auth should be done oauth_uri: the OIDC URI returned by synapse's redirect endpoint (ie, from initiate_sso_login or initiate_sso_ui_auth). cookies: the cookies set by synapse's redirect endpoint, which will be sent back to the callback endpoint. user_info_dict: the remote userinfo that the OIDC provider should present. Typically this should be '{"sub": ""}'. + with_sid: if True, generates a random `sid` (OIDC session ID) Returns: A FakeChannel containing the result of calling the OIDC callback endpoint. """ _, oauth_uri_qs = oauth_uri.split("?", 1) params = urllib.parse.parse_qs(oauth_uri_qs) + + code, grant = fake_serer.start_authorization( + scope=params["scope"][0], + userinfo=user_info_dict, + client_id=params["client_id"][0], + redirect_uri=params["redirect_uri"][0], + nonce=params["nonce"][0], + with_sid=with_sid, + ) + state = params["state"][0] + callback_uri = "%s?%s" % ( urllib.parse.urlparse(params["redirect_uri"][0]).path, - urllib.parse.urlencode({"state": params["state"][0], "code": "TEST_CODE"}), + urllib.parse.urlencode({"state": state, "code": code}), ) - # before we hit the callback uri, stub out some methods in the http client so - # that we don't have to handle full HTTPS requests. - # (expected url, json response) pairs, in the order we expect them. - expected_requests = [ - # first we get a hit to the token endpoint, which we tell to return - # a dummy OIDC access token - (TEST_OIDC_TOKEN_ENDPOINT, {"access_token": "TEST"}), - # and then one to the user_info endpoint, which returns our remote user id. - (TEST_OIDC_USERINFO_ENDPOINT, user_info_dict), - ] - - async def mock_req( - method: str, - uri: str, - data: Optional[dict] = None, - headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None, - ): - (expected_uri, resp_obj) = expected_requests.pop(0) - assert uri == expected_uri - resp = FakeResponse( - code=HTTPStatus.OK, - phrase=b"OK", - body=json.dumps(resp_obj).encode("utf-8"), - ) - return resp - - with patch.object(self.hs.get_proxied_http_client(), "request", mock_req): + with fake_serer.patch_homeserver(hs=self.hs): # now hit the callback URI with the right params and a made-up code channel = make_request( self.hs.get_reactor(), @@ -715,7 +739,7 @@ class RestHelper: ("Cookie", "%s=%s" % (k, v)) for (k, v) in cookies.items() ], ) - return channel + return channel, grant def initiate_sso_login( self, client_redirect_url: Optional[str], cookies: MutableMapping[str, str] @@ -806,21 +830,3 @@ class RestHelper: assert len(p.links) == 1, "not exactly one link in confirmation page" oauth_uri = p.links[0] return oauth_uri - - -# an 'oidc_config' suitable for login_via_oidc. -TEST_OIDC_AUTH_ENDPOINT = "https://issuer.test/auth" -TEST_OIDC_TOKEN_ENDPOINT = "https://issuer.test/token" -TEST_OIDC_USERINFO_ENDPOINT = "https://issuer.test/userinfo" -TEST_OIDC_CONFIG = { - "enabled": True, - "discover": False, - "issuer": "https://issuer.test", - "client_id": "test-client-id", - "client_secret": "test-client-secret", - "scopes": ["profile"], - "authorization_endpoint": TEST_OIDC_AUTH_ENDPOINT, - "token_endpoint": TEST_OIDC_TOKEN_ENDPOINT, - "userinfo_endpoint": TEST_OIDC_USERINFO_ENDPOINT, - "user_mapping_provider": {"config": {"localpart_template": "{{ user.sub }}"}}, -} diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index 0d0d6faf0d..e62ebcc6a5 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -15,17 +15,24 @@ """ Utilities for running the unit tests """ +import json import sys import warnings from asyncio import Future from binascii import unhexlify -from typing import Awaitable, Callable, TypeVar +from typing import Awaitable, Callable, Tuple, TypeVar from unittest.mock import Mock import attr +import zope.interface from twisted.python.failure import Failure from twisted.web.client import ResponseDone +from twisted.web.http import RESPONSES +from twisted.web.http_headers import Headers +from twisted.web.iweb import IResponse + +from synapse.types import JsonDict TV = TypeVar("TV") @@ -97,27 +104,44 @@ def simple_async_mock(return_value=None, raises=None) -> Mock: return Mock(side_effect=cb) -@attr.s -class FakeResponse: +# Type ignore: it does not fully implement IResponse, but is good enough for tests +@zope.interface.implementer(IResponse) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class FakeResponse: # type: ignore[misc] """A fake twisted.web.IResponse object there is a similar class at treq.test.test_response, but it lacks a `phrase` attribute, and didn't support deliverBody until recently. """ - # HTTP response code - code = attr.ib(type=int) + version: Tuple[bytes, int, int] = (b"HTTP", 1, 1) - # HTTP response phrase (eg b'OK' for a 200) - phrase = attr.ib(type=bytes) + # HTTP response code + code: int = 200 # body of the response - body = attr.ib(type=bytes) + body: bytes = b"" + + headers: Headers = attr.Factory(Headers) + + @property + def phrase(self): + return RESPONSES.get(self.code, b"Unknown Status") + + @property + def length(self): + return len(self.body) def deliverBody(self, protocol): protocol.dataReceived(self.body) protocol.connectionLost(Failure(ResponseDone())) + @classmethod + def json(cls, *, code: int = 200, payload: JsonDict) -> "FakeResponse": + headers = Headers({"Content-Type": ["application/json"]}) + body = json.dumps(payload).encode("utf-8") + return cls(code=code, body=body, headers=headers) + # A small image used in some tests. # diff --git a/tests/test_utils/oidc.py b/tests/test_utils/oidc.py new file mode 100644 index 0000000000..de134bbc89 --- /dev/null +++ b/tests/test_utils/oidc.py @@ -0,0 +1,325 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +from typing import Any, Dict, List, Optional, Tuple +from unittest.mock import Mock, patch +from urllib.parse import parse_qs + +import attr + +from twisted.web.http_headers import Headers +from twisted.web.iweb import IResponse + +from synapse.server import HomeServer +from synapse.util import Clock +from synapse.util.stringutils import random_string + +from tests.test_utils import FakeResponse + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class FakeAuthorizationGrant: + userinfo: dict + client_id: str + redirect_uri: str + scope: str + nonce: Optional[str] + sid: Optional[str] + + +class FakeOidcServer: + """A fake OpenID Connect Provider.""" + + # All methods here are mocks, so we can track when they are called, and override + # their values + request: Mock + get_jwks_handler: Mock + get_metadata_handler: Mock + get_userinfo_handler: Mock + post_token_handler: Mock + + def __init__(self, clock: Clock, issuer: str): + from authlib.jose import ECKey, KeySet + + self._clock = clock + self.issuer = issuer + + self.request = Mock(side_effect=self._request) + self.get_jwks_handler = Mock(side_effect=self._get_jwks_handler) + self.get_metadata_handler = Mock(side_effect=self._get_metadata_handler) + self.get_userinfo_handler = Mock(side_effect=self._get_userinfo_handler) + self.post_token_handler = Mock(side_effect=self._post_token_handler) + + # A code -> grant mapping + self._authorization_grants: Dict[str, FakeAuthorizationGrant] = {} + # An access token -> grant mapping + self._sessions: Dict[str, FakeAuthorizationGrant] = {} + + # We generate here an ECDSA key with the P-256 curve (ES256 algorithm) used for + # signing JWTs. ECDSA keys are really quick to generate compared to RSA. + self._key = ECKey.generate_key(crv="P-256", is_private=True) + self._jwks = KeySet([ECKey.import_key(self._key.as_pem(is_private=False))]) + + self._id_token_overrides: Dict[str, Any] = {} + + def reset_mocks(self): + self.request.reset_mock() + self.get_jwks_handler.reset_mock() + self.get_metadata_handler.reset_mock() + self.get_userinfo_handler.reset_mock() + self.post_token_handler.reset_mock() + + def patch_homeserver(self, hs: HomeServer): + """Patch the ``HomeServer`` HTTP client to handle requests through the ``FakeOidcServer``. + + This patch should be used whenever the HS is expected to perform request to the + OIDC provider, e.g.:: + + fake_oidc_server = self.helper.fake_oidc_server() + with fake_oidc_server.patch_homeserver(hs): + self.make_request("GET", "/_matrix/client/r0/login/sso/redirect") + """ + return patch.object(hs.get_proxied_http_client(), "request", self.request) + + @property + def authorization_endpoint(self) -> str: + return self.issuer + "authorize" + + @property + def token_endpoint(self) -> str: + return self.issuer + "token" + + @property + def userinfo_endpoint(self) -> str: + return self.issuer + "userinfo" + + @property + def metadata_endpoint(self) -> str: + return self.issuer + ".well-known/openid-configuration" + + @property + def jwks_uri(self) -> str: + return self.issuer + "jwks" + + def get_metadata(self) -> dict: + return { + "issuer": self.issuer, + "authorization_endpoint": self.authorization_endpoint, + "token_endpoint": self.token_endpoint, + "jwks_uri": self.jwks_uri, + "userinfo_endpoint": self.userinfo_endpoint, + "response_types_supported": ["code"], + "subject_types_supported": ["public"], + "id_token_signing_alg_values_supported": ["ES256"], + } + + def get_jwks(self) -> dict: + return self._jwks.as_dict() + + def get_userinfo(self, access_token: str) -> Optional[dict]: + """Given an access token, get the userinfo of the associated session.""" + session = self._sessions.get(access_token, None) + if session is None: + return None + return session.userinfo + + def _sign(self, payload: dict) -> str: + from authlib.jose import JsonWebSignature + + jws = JsonWebSignature() + kid = self.get_jwks()["keys"][0]["kid"] + protected = {"alg": "ES256", "kid": kid} + json_payload = json.dumps(payload) + return jws.serialize_compact(protected, json_payload, self._key).decode("utf-8") + + def generate_id_token(self, grant: FakeAuthorizationGrant) -> str: + now = self._clock.time() + id_token = { + **grant.userinfo, + "iss": self.issuer, + "aud": grant.client_id, + "iat": now, + "nbf": now, + "exp": now + 600, + } + + if grant.nonce is not None: + id_token["nonce"] = grant.nonce + + if grant.sid is not None: + id_token["sid"] = grant.sid + + id_token.update(self._id_token_overrides) + + return self._sign(id_token) + + def id_token_override(self, overrides: dict): + """Temporarily patch the ID token generated by the token endpoint.""" + return patch.object(self, "_id_token_overrides", overrides) + + def start_authorization( + self, + client_id: str, + scope: str, + redirect_uri: str, + userinfo: dict, + nonce: Optional[str] = None, + with_sid: bool = False, + ) -> Tuple[str, FakeAuthorizationGrant]: + """Start an authorization request, and get back the code to use on the authorization endpoint.""" + code = random_string(10) + sid = None + if with_sid: + sid = random_string(10) + + grant = FakeAuthorizationGrant( + userinfo=userinfo, + scope=scope, + redirect_uri=redirect_uri, + nonce=nonce, + client_id=client_id, + sid=sid, + ) + self._authorization_grants[code] = grant + + return code, grant + + def exchange_code(self, code: str) -> Optional[Dict[str, Any]]: + grant = self._authorization_grants.pop(code, None) + if grant is None: + return None + + access_token = random_string(10) + self._sessions[access_token] = grant + + token = { + "token_type": "Bearer", + "access_token": access_token, + "expires_in": 3600, + "scope": grant.scope, + } + + if "openid" in grant.scope: + token["id_token"] = self.generate_id_token(grant) + + return dict(token) + + def buggy_endpoint( + self, + *, + jwks: bool = False, + metadata: bool = False, + token: bool = False, + userinfo: bool = False, + ): + """A context which makes a set of endpoints return a 500 error. + + Args: + jwks: If True, makes the JWKS endpoint return a 500 error. + metadata: If True, makes the OIDC Discovery endpoint return a 500 error. + token: If True, makes the token endpoint return a 500 error. + userinfo: If True, makes the userinfo endpoint return a 500 error. + """ + buggy = FakeResponse(code=500, body=b"Internal server error") + + patches = {} + if jwks: + patches["get_jwks_handler"] = Mock(return_value=buggy) + if metadata: + patches["get_metadata_handler"] = Mock(return_value=buggy) + if token: + patches["post_token_handler"] = Mock(return_value=buggy) + if userinfo: + patches["get_userinfo_handler"] = Mock(return_value=buggy) + + return patch.multiple(self, **patches) + + async def _request( + self, + method: str, + uri: str, + data: Optional[bytes] = None, + headers: Optional[Headers] = None, + ) -> IResponse: + """The override of the SimpleHttpClient#request() method""" + access_token: Optional[str] = None + + if headers is None: + headers = Headers() + + # Try to find the access token in the headers if any + auth_headers = headers.getRawHeaders(b"Authorization") + if auth_headers: + parts = auth_headers[0].split(b" ") + if parts[0] == b"Bearer" and len(parts) == 2: + access_token = parts[1].decode("ascii") + + if method == "POST": + # If the method is POST, assume it has an url-encoded body + if data is None or headers.getRawHeaders(b"Content-Type") != [ + b"application/x-www-form-urlencoded" + ]: + return FakeResponse.json(code=400, payload={"error": "invalid_request"}) + + params = parse_qs(data.decode("utf-8")) + + if uri == self.token_endpoint: + # Even though this endpoint should be protected, this does not check + # for client authentication. We're not checking it for simplicity, + # and because client authentication is tested in other standalone tests. + return self.post_token_handler(params) + + elif method == "GET": + if uri == self.jwks_uri: + return self.get_jwks_handler() + elif uri == self.metadata_endpoint: + return self.get_metadata_handler() + elif uri == self.userinfo_endpoint: + return self.get_userinfo_handler(access_token=access_token) + + return FakeResponse(code=404, body=b"404 not found") + + # Request handlers + def _get_jwks_handler(self) -> IResponse: + """Handles requests to the JWKS URI.""" + return FakeResponse.json(payload=self.get_jwks()) + + def _get_metadata_handler(self) -> IResponse: + """Handles requests to the OIDC well-known document.""" + return FakeResponse.json(payload=self.get_metadata()) + + def _get_userinfo_handler(self, access_token: Optional[str]) -> IResponse: + """Handles requests to the userinfo endpoint.""" + if access_token is None: + return FakeResponse(code=401) + user_info = self.get_userinfo(access_token) + if user_info is None: + return FakeResponse(code=401) + + return FakeResponse.json(payload=user_info) + + def _post_token_handler(self, params: Dict[str, List[str]]) -> IResponse: + """Handles requests to the token endpoint.""" + code = params.get("code", []) + + if len(code) != 1: + return FakeResponse.json(code=400, payload={"error": "invalid_request"}) + + grant = self.exchange_code(code=code[0]) + if grant is None: + return FakeResponse.json(code=400, payload={"error": "invalid_grant"}) + + return FakeResponse.json(payload=grant) -- cgit 1.5.1 From d902181de98399d90c46c4e4e2cf631064757941 Mon Sep 17 00:00:00 2001 From: James Salter Date: Tue, 25 Oct 2022 19:05:22 +0100 Subject: Unified search query syntax using the full-text search capabilities of the underlying DB. (#11635) Support a unified search query syntax which leverages more of the full-text search of each database supported by Synapse. Supports, with the same syntax across Postgresql 11+ and Sqlite: - quoted "search terms" - `AND`, `OR`, `-` (negation) operators - Matching words based on their stem, e.g. searches for "dog" matches documents containing "dogs". This is achieved by - If on postgresql 11+, pass the user input to `websearch_to_tsquery` - If on sqlite, manually parse the query and transform it into the sqlite-specific query syntax. Note that postgresql 10, which is close to end-of-life, falls back to using `phraseto_tsquery`, which only supports a subset of the features. Multiple terms separated by a space are implicitly ANDed. Note that: 1. There is no escaping of full-text syntax that might be supported by the database; e.g. `NOT`, `NEAR`, `*` in sqlite. This runs the risk that people might discover this as accidental functionality and depend on something we don't guarantee. 2. English text is assumed for stemming. To support other languages, either the target language needs to be known at the time of indexing the message (via room metadata, or otherwise), or a separate index for each language supported could be created. Sqlite docs: https://www.sqlite.org/fts3.html#full_text_index_queries Postgres docs: https://www.postgresql.org/docs/11/textsearch-controls.html --- changelog.d/11635.feature | 1 + synapse/storage/databases/main/search.py | 197 +++++++++++++++---- synapse/storage/engines/postgres.py | 16 ++ .../delta/73/10_update_sqlite_fts4_tokenizer.py | 62 ++++++ tests/storage/test_room_search.py | 213 +++++++++++++++++++++ 5 files changed, 454 insertions(+), 35 deletions(-) create mode 100644 changelog.d/11635.feature create mode 100644 synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py diff --git a/changelog.d/11635.feature b/changelog.d/11635.feature new file mode 100644 index 0000000000..94c8a83212 --- /dev/null +++ b/changelog.d/11635.feature @@ -0,0 +1 @@ +Allow use of postgres and sqllite full-text search operators in search queries. \ No newline at end of file diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 1b79acf955..a89fc54c2c 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -11,10 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import enum import logging import re -from typing import TYPE_CHECKING, Any, Collection, Iterable, List, Optional, Set, Tuple +from collections import deque +from dataclasses import dataclass +from typing import ( + TYPE_CHECKING, + Any, + Collection, + Iterable, + List, + Optional, + Set, + Tuple, + Union, +) import attr @@ -27,7 +39,7 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.databases.main.events_worker import EventRedactBehaviour -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine +from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import JsonDict if TYPE_CHECKING: @@ -421,8 +433,6 @@ class SearchStore(SearchBackgroundUpdateStore): """ clauses = [] - search_query = _parse_query(self.database_engine, search_term) - args: List[Any] = [] # Make sure we don't explode because the person is in too many rooms. @@ -444,20 +454,24 @@ class SearchStore(SearchBackgroundUpdateStore): count_clauses = clauses if isinstance(self.database_engine, PostgresEngine): + search_query = search_term + tsquery_func = self.database_engine.tsquery_func sql = ( - "SELECT ts_rank_cd(vector, to_tsquery('english', ?)) AS rank," + f"SELECT ts_rank_cd(vector, {tsquery_func}('english', ?)) AS rank," " room_id, event_id" " FROM event_search" - " WHERE vector @@ to_tsquery('english', ?)" + f" WHERE vector @@ {tsquery_func}('english', ?)" ) args = [search_query, search_query] + args count_sql = ( "SELECT room_id, count(*) as count FROM event_search" - " WHERE vector @@ to_tsquery('english', ?)" + f" WHERE vector @@ {tsquery_func}('english', ?)" ) count_args = [search_query] + count_args elif isinstance(self.database_engine, Sqlite3Engine): + search_query = _parse_query_for_sqlite(search_term) + sql = ( "SELECT rank(matchinfo(event_search)) as rank, room_id, event_id" " FROM event_search" @@ -469,7 +483,7 @@ class SearchStore(SearchBackgroundUpdateStore): "SELECT room_id, count(*) as count FROM event_search" " WHERE value MATCH ?" ) - count_args = [search_term] + count_args + count_args = [search_query] + count_args else: # This should be unreachable. raise Exception("Unrecognized database engine") @@ -501,7 +515,9 @@ class SearchStore(SearchBackgroundUpdateStore): highlights = None if isinstance(self.database_engine, PostgresEngine): - highlights = await self._find_highlights_in_postgres(search_query, events) + highlights = await self._find_highlights_in_postgres( + search_query, events, tsquery_func + ) count_sql += " GROUP BY room_id" @@ -510,7 +526,6 @@ class SearchStore(SearchBackgroundUpdateStore): ) count = sum(row["count"] for row in count_results if row["room_id"] in room_ids) - return { "results": [ {"event": event_map[r["event_id"]], "rank": r["rank"]} @@ -542,9 +557,6 @@ class SearchStore(SearchBackgroundUpdateStore): Each match as a dictionary. """ clauses = [] - - search_query = _parse_query(self.database_engine, search_term) - args: List[Any] = [] # Make sure we don't explode because the person is in too many rooms. @@ -582,20 +594,23 @@ class SearchStore(SearchBackgroundUpdateStore): args.extend([origin_server_ts, origin_server_ts, stream]) if isinstance(self.database_engine, PostgresEngine): + search_query = search_term + tsquery_func = self.database_engine.tsquery_func sql = ( - "SELECT ts_rank_cd(vector, to_tsquery('english', ?)) as rank," + f"SELECT ts_rank_cd(vector, {tsquery_func}('english', ?)) as rank," " origin_server_ts, stream_ordering, room_id, event_id" " FROM event_search" - " WHERE vector @@ to_tsquery('english', ?) AND " + f" WHERE vector @@ {tsquery_func}('english', ?) AND " ) args = [search_query, search_query] + args count_sql = ( "SELECT room_id, count(*) as count FROM event_search" - " WHERE vector @@ to_tsquery('english', ?) AND " + f" WHERE vector @@ {tsquery_func}('english', ?) AND " ) count_args = [search_query] + count_args elif isinstance(self.database_engine, Sqlite3Engine): + # We use CROSS JOIN here to ensure we use the right indexes. # https://sqlite.org/optoverview.html#crossjoin # @@ -614,13 +629,14 @@ class SearchStore(SearchBackgroundUpdateStore): " CROSS JOIN events USING (event_id)" " WHERE " ) + search_query = _parse_query_for_sqlite(search_term) args = [search_query] + args count_sql = ( "SELECT room_id, count(*) as count FROM event_search" " WHERE value MATCH ? AND " ) - count_args = [search_term] + count_args + count_args = [search_query] + count_args else: # This should be unreachable. raise Exception("Unrecognized database engine") @@ -660,7 +676,9 @@ class SearchStore(SearchBackgroundUpdateStore): highlights = None if isinstance(self.database_engine, PostgresEngine): - highlights = await self._find_highlights_in_postgres(search_query, events) + highlights = await self._find_highlights_in_postgres( + search_query, events, tsquery_func + ) count_sql += " GROUP BY room_id" @@ -686,7 +704,7 @@ class SearchStore(SearchBackgroundUpdateStore): } async def _find_highlights_in_postgres( - self, search_query: str, events: List[EventBase] + self, search_query: str, events: List[EventBase], tsquery_func: str ) -> Set[str]: """Given a list of events and a search term, return a list of words that match from the content of the event. @@ -697,6 +715,7 @@ class SearchStore(SearchBackgroundUpdateStore): Args: search_query events: A list of events + tsquery_func: The tsquery_* function to use when making queries Returns: A set of strings. @@ -729,7 +748,7 @@ class SearchStore(SearchBackgroundUpdateStore): while stop_sel in value: stop_sel += ">" - query = "SELECT ts_headline(?, to_tsquery('english', ?), %s)" % ( + query = f"SELECT ts_headline(?, {tsquery_func}('english', ?), %s)" % ( _to_postgres_options( { "StartSel": start_sel, @@ -760,20 +779,128 @@ def _to_postgres_options(options_dict: JsonDict) -> str: return "'%s'" % (",".join("%s=%s" % (k, v) for k, v in options_dict.items()),) -def _parse_query(database_engine: BaseDatabaseEngine, search_term: str) -> str: - """Takes a plain unicode string from the user and converts it into a form - that can be passed to database. - We use this so that we can add prefix matching, which isn't something - that is supported by default. +@dataclass +class Phrase: + phrase: List[str] + + +class SearchToken(enum.Enum): + Not = enum.auto() + Or = enum.auto() + And = enum.auto() + + +Token = Union[str, Phrase, SearchToken] +TokenList = List[Token] + + +def _is_stop_word(word: str) -> bool: + # TODO Pull these out of the dictionary: + # https://github.com/postgres/postgres/blob/master/src/backend/snowball/stopwords/english.stop + return word in {"the", "a", "you", "me", "and", "but"} + + +def _tokenize_query(query: str) -> TokenList: + """ + Convert the user-supplied `query` into a TokenList, which can be translated into + some DB-specific syntax. + + The following constructs are supported: + + - phrase queries using "double quotes" + - case-insensitive `or` and `and` operators + - negation of a keyword via unary `-` + - unary hyphen to denote NOT e.g. 'include -exclude' + + The following differs from websearch_to_tsquery: + + - Stop words are not removed. + - Unclosed phrases are treated differently. + + """ + tokens: TokenList = [] + + # Find phrases. + in_phrase = False + parts = deque(query.split('"')) + for i, part in enumerate(parts): + # The contents inside double quotes is treated as a phrase, a trailing + # double quote is not implied. + in_phrase = bool(i % 2) and i != (len(parts) - 1) + + # Pull out the individual words, discarding any non-word characters. + words = deque(re.findall(r"([\w\-]+)", part, re.UNICODE)) + + # Phrases have simplified handling of words. + if in_phrase: + # Skip stop words. + phrase = [word for word in words if not _is_stop_word(word)] + + # Consecutive words are implicitly ANDed together. + if tokens and tokens[-1] not in (SearchToken.Not, SearchToken.Or): + tokens.append(SearchToken.And) + + # Add the phrase. + tokens.append(Phrase(phrase)) + continue + + # Otherwise, not in a phrase. + while words: + word = words.popleft() + + if word.startswith("-"): + tokens.append(SearchToken.Not) + + # If there's more word, put it back to be processed again. + word = word[1:] + if word: + words.appendleft(word) + elif word.lower() == "or": + tokens.append(SearchToken.Or) + else: + # Skip stop words. + if _is_stop_word(word): + continue + + # Consecutive words are implicitly ANDed together. + if tokens and tokens[-1] not in (SearchToken.Not, SearchToken.Or): + tokens.append(SearchToken.And) + + # Add the search term. + tokens.append(word) + + return tokens + + +def _tokens_to_sqlite_match_query(tokens: TokenList) -> str: + """ + Convert the list of tokens to a string suitable for passing to sqlite's MATCH. + Assume sqlite was compiled with enhanced query syntax. + + Ref: https://www.sqlite.org/fts3.html#full_text_index_queries """ + match_query = [] + for token in tokens: + if isinstance(token, str): + match_query.append(token) + elif isinstance(token, Phrase): + match_query.append('"' + " ".join(token.phrase) + '"') + elif token == SearchToken.Not: + # TODO: SQLite treats NOT as a *binary* operator. Hopefully a search + # term has already been added before this. + match_query.append(" NOT ") + elif token == SearchToken.Or: + match_query.append(" OR ") + elif token == SearchToken.And: + match_query.append(" AND ") + else: + raise ValueError(f"unknown token {token}") + + return "".join(match_query) - # Pull out the individual words, discarding any non-word characters. - results = re.findall(r"([\w\-]+)", search_term, re.UNICODE) - if isinstance(database_engine, PostgresEngine): - return " & ".join(result + ":*" for result in results) - elif isinstance(database_engine, Sqlite3Engine): - return " & ".join(result + "*" for result in results) - else: - # This should be unreachable. - raise Exception("Unrecognized database engine") +def _parse_query_for_sqlite(search_term: str) -> str: + """Takes a plain unicode string from the user and converts it into a form + that can be passed to sqllite's matchinfo(). + """ + return _tokens_to_sqlite_match_query(_tokenize_query(search_term)) diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index d8c0f64d9a..9bf74bbf59 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -170,6 +170,22 @@ class PostgresEngine( """Do we support the `RETURNING` clause in insert/update/delete?""" return True + @property + def tsquery_func(self) -> str: + """ + Selects a tsquery_* func to use. + + Ref: https://www.postgresql.org/docs/current/textsearch-controls.html + + Returns: + The function name. + """ + # Postgres 11 added support for websearch_to_tsquery. + assert self._version is not None + if self._version >= 110000: + return "websearch_to_tsquery" + return "plainto_tsquery" + def is_deadlock(self, error: Exception) -> bool: if isinstance(error, psycopg2.DatabaseError): # https://www.postgresql.org/docs/current/static/errcodes-appendix.html diff --git a/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py b/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py new file mode 100644 index 0000000000..3de0a709eb --- /dev/null +++ b/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py @@ -0,0 +1,62 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json + +from synapse.storage.engines import BaseDatabaseEngine, Sqlite3Engine +from synapse.storage.types import Cursor + + +def run_create(cur: Cursor, database_engine: BaseDatabaseEngine) -> None: + """ + Upgrade the event_search table to use the porter tokenizer if it isn't already + + Applies only for sqlite. + """ + if not isinstance(database_engine, Sqlite3Engine): + return + + # Rebuild the table event_search table with tokenize=porter configured. + cur.execute("DROP TABLE event_search") + cur.execute( + """ + CREATE VIRTUAL TABLE event_search + USING fts4 (tokenize=porter, event_id, room_id, sender, key, value ) + """ + ) + + # Re-run the background job to re-populate the event_search table. + cur.execute("SELECT MIN(stream_ordering) FROM events") + row = cur.fetchone() + min_stream_id = row[0] + + # If there are not any events, nothing to do. + if min_stream_id is None: + return + + cur.execute("SELECT MAX(stream_ordering) FROM events") + row = cur.fetchone() + max_stream_id = row[0] + + progress = { + "target_min_stream_id_inclusive": min_stream_id, + "max_stream_id_exclusive": max_stream_id + 1, + } + progress_json = json.dumps(progress) + + sql = """ + INSERT into background_updates (ordering, update_name, progress_json) + VALUES (?, ?, ?) + """ + + cur.execute(sql, (7310, "event_search", progress_json)) diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py index e747c6b50e..9ddc19900a 100644 --- a/tests/storage/test_room_search.py +++ b/tests/storage/test_room_search.py @@ -12,11 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import List, Tuple, Union +from unittest.case import SkipTest +from unittest.mock import PropertyMock, patch + +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.api.constants import EventTypes from synapse.api.errors import StoreError from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.storage.databases.main import DataStore +from synapse.storage.databases.main.search import Phrase, SearchToken, _tokenize_query from synapse.storage.engines import PostgresEngine +from synapse.storage.engines.sqlite import Sqlite3Engine +from synapse.util import Clock from tests.unittest import HomeserverTestCase, skip_unless from tests.utils import USE_POSTGRES_FOR_TESTS @@ -187,3 +198,205 @@ class EventSearchInsertionTest(HomeserverTestCase): ), ) self.assertCountEqual(values, ["hi", "2"]) + + +class MessageSearchTest(HomeserverTestCase): + """ + Check message search. + + A powerful way to check the behaviour is to run the following in Postgres >= 11: + + # SELECT websearch_to_tsquery('english', ); + + The result can be compared to the tokenized version for SQLite and Postgres < 11. + + """ + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + room.register_servlets, + ] + + PHRASE = "the quick brown fox jumps over the lazy dog" + + # Each entry is a search query, followed by either a boolean of whether it is + # in the phrase OR a tuple of booleans: whether it matches using websearch + # and using plain search. + COMMON_CASES: List[Tuple[str, Union[bool, Tuple[bool, bool]]]] = [ + ("nope", False), + ("brown", True), + ("quick brown", True), + ("brown quick", True), + ("quick \t brown", True), + ("jump", True), + ("brown nope", False), + ('"brown quick"', (False, True)), + ('"jumps over"', True), + ('"quick fox"', (False, True)), + ("nope OR doublenope", False), + ("furphy OR fox", (True, False)), + ("fox -nope", (True, False)), + ("fox -brown", (False, True)), + ('"fox" quick', True), + ('"fox quick', True), + ('"quick brown', True), + ('" quick "', True), + ('" nope"', False), + ] + # TODO Test non-ASCII cases. + + # Case that fail on SQLite. + POSTGRES_CASES: List[Tuple[str, Union[bool, Tuple[bool, bool]]]] = [ + # SQLite treats NOT as a binary operator. + ("- fox", (False, True)), + ("- nope", (True, False)), + ('"-fox quick', (False, True)), + # PostgreSQL skips stop words. + ('"the quick brown"', True), + ('"over lazy"', True), + ] + + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + # Register a user and create a room, create some messages + self.register_user("alice", "password") + self.access_token = self.login("alice", "password") + self.room_id = self.helper.create_room_as("alice", tok=self.access_token) + + # Send the phrase as a message and check it was created + response = self.helper.send(self.room_id, self.PHRASE, tok=self.access_token) + self.assertIn("event_id", response) + + def test_tokenize_query(self) -> None: + """Test the custom logic to tokenize a user's query.""" + cases = ( + ("brown", ["brown"]), + ("quick brown", ["quick", SearchToken.And, "brown"]), + ("quick \t brown", ["quick", SearchToken.And, "brown"]), + ('"brown quick"', [Phrase(["brown", "quick"])]), + ("furphy OR fox", ["furphy", SearchToken.Or, "fox"]), + ("fox -brown", ["fox", SearchToken.Not, "brown"]), + ("- fox", [SearchToken.Not, "fox"]), + ('"fox" quick', [Phrase(["fox"]), SearchToken.And, "quick"]), + # No trailing double quoe. + ('"fox quick', ["fox", SearchToken.And, "quick"]), + ('"-fox quick', [SearchToken.Not, "fox", SearchToken.And, "quick"]), + ('" quick "', [Phrase(["quick"])]), + ( + 'q"uick brow"n', + [ + "q", + SearchToken.And, + Phrase(["uick", "brow"]), + SearchToken.And, + "n", + ], + ), + ( + '-"quick brown"', + [SearchToken.Not, Phrase(["quick", "brown"])], + ), + ) + + for query, expected in cases: + tokenized = _tokenize_query(query) + self.assertEqual( + tokenized, expected, f"{tokenized} != {expected} for {query}" + ) + + def _check_test_cases( + self, + store: DataStore, + cases: List[Tuple[str, Union[bool, Tuple[bool, bool]]]], + index=0, + ) -> None: + # Run all the test cases versus search_msgs + for query, expect_to_contain in cases: + if isinstance(expect_to_contain, tuple): + expect_to_contain = expect_to_contain[index] + + result = self.get_success( + store.search_msgs([self.room_id], query, ["content.body"]) + ) + self.assertEquals( + result["count"], + 1 if expect_to_contain else 0, + f"expected '{query}' to match '{self.PHRASE}'" + if expect_to_contain + else f"'{query}' unexpectedly matched '{self.PHRASE}'", + ) + self.assertEquals( + len(result["results"]), + 1 if expect_to_contain else 0, + "results array length should match count", + ) + + # Run them again versus search_rooms + for query, expect_to_contain in cases: + if isinstance(expect_to_contain, tuple): + expect_to_contain = expect_to_contain[index] + + result = self.get_success( + store.search_rooms([self.room_id], query, ["content.body"], 10) + ) + self.assertEquals( + result["count"], + 1 if expect_to_contain else 0, + f"expected '{query}' to match '{self.PHRASE}'" + if expect_to_contain + else f"'{query}' unexpectedly matched '{self.PHRASE}'", + ) + self.assertEquals( + len(result["results"]), + 1 if expect_to_contain else 0, + "results array length should match count", + ) + + def test_postgres_web_search_for_phrase(self): + """ + Test searching for phrases using typical web search syntax, as per postgres' websearch_to_tsquery. + This test is skipped unless the postgres instance supports websearch_to_tsquery. + """ + + store = self.hs.get_datastores().main + if not isinstance(store.database_engine, PostgresEngine): + raise SkipTest("Test only applies when postgres is used as the database") + + if store.database_engine.tsquery_func != "websearch_to_tsquery": + raise SkipTest( + "Test only applies when postgres supporting websearch_to_tsquery is used as the database" + ) + + self._check_test_cases(store, self.COMMON_CASES + self.POSTGRES_CASES, index=0) + + def test_postgres_non_web_search_for_phrase(self): + """ + Test postgres searching for phrases without using web search, which is used when websearch_to_tsquery isn't + supported by the current postgres version. + """ + + store = self.hs.get_datastores().main + if not isinstance(store.database_engine, PostgresEngine): + raise SkipTest("Test only applies when postgres is used as the database") + + # Patch supports_websearch_to_tsquery to always return False to ensure we're testing the plainto_tsquery path. + with patch( + "synapse.storage.engines.postgres.PostgresEngine.tsquery_func", + new_callable=PropertyMock, + ) as supports_websearch_to_tsquery: + supports_websearch_to_tsquery.return_value = "plainto_tsquery" + self._check_test_cases( + store, self.COMMON_CASES + self.POSTGRES_CASES, index=1 + ) + + def test_sqlite_search(self): + """ + Test sqlite searching for phrases. + """ + store = self.hs.get_datastores().main + if not isinstance(store.database_engine, Sqlite3Engine): + raise SkipTest("Test only applies when sqlite is used as the database") + + self._check_test_cases(store, self.COMMON_CASES, index=0) -- cgit 1.5.1 From 23fa636ed70b340e642efbe0c71e1addf4299479 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 26 Oct 2022 11:13:29 +0100 Subject: 1.70.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index c2a0a2102f..444d790e80 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.70.0 (2022-10-26) +=========================== + +No significant changes since 1.70.0rc2. + + Synapse 1.70.0rc2 (2022-10-25) ============================== diff --git a/debian/changelog b/debian/changelog index c3139b7271..c3c1cc538a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.70.0) stable; urgency=medium + + * New Synapse release 1.70.0. + + -- Synapse Packaging team Wed, 26 Oct 2022 11:11:50 +0100 + matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium * New Synapse release 1.70.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 7cd4a15435..0e8f6af728 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.70.0rc2" +version = "1.70.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From 8756d5c87efc5637da55c9e21d2a4eb2369ba693 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 26 Oct 2022 12:45:41 +0200 Subject: Save login tokens in database (#13844) * Save login tokens in database Signed-off-by: Quentin Gliech * Add upgrade notes * Track login token reuse in a Prometheus metric Signed-off-by: Quentin Gliech --- changelog.d/13844.misc | 1 + docs/upgrade.md | 9 ++ synapse/handlers/auth.py | 64 +++++++-- synapse/module_api/__init__.py | 41 +----- synapse/rest/client/login.py | 3 +- synapse/rest/client/login_token_request.py | 5 +- synapse/storage/databases/main/registration.py | 156 ++++++++++++++++++++- .../schema/main/delta/73/10login_tokens.sql | 35 +++++ synapse/util/macaroons.py | 87 +----------- tests/handlers/test_auth.py | 135 ++++++++++-------- tests/util/test_macaroons.py | 28 ---- 11 files changed, 337 insertions(+), 227 deletions(-) create mode 100644 changelog.d/13844.misc create mode 100644 synapse/storage/schema/main/delta/73/10login_tokens.sql diff --git a/changelog.d/13844.misc b/changelog.d/13844.misc new file mode 100644 index 0000000000..66f4414df7 --- /dev/null +++ b/changelog.d/13844.misc @@ -0,0 +1 @@ +Save login tokens in database and prevent login token reuse. diff --git a/docs/upgrade.md b/docs/upgrade.md index b81385b191..78c34d0c15 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,15 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.71.0 + +## Removal of the `generate_short_term_login_token` module API method + +As announced with the release of [Synapse 1.69.0](#deprecation-of-the-generate_short_term_login_token-module-api-method), the deprecated `generate_short_term_login_token` module method has been removed. + +Modules relying on it can instead use the `create_login_token` method. + + # Upgrading to v1.69.0 ## Changes to the receipts replication streams diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index f5f0e0e7a7..8b9ef25d29 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -38,6 +38,7 @@ from typing import ( import attr import bcrypt import unpaddedbase64 +from prometheus_client import Counter from twisted.internet.defer import CancelledError from twisted.web.server import Request @@ -48,6 +49,7 @@ from synapse.api.errors import ( Codes, InteractiveAuthIncompleteError, LoginError, + NotFoundError, StoreError, SynapseError, UserDeactivatedError, @@ -63,10 +65,14 @@ from synapse.http.server import finish_request, respond_with_html from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.databases.main.registration import ( + LoginTokenExpired, + LoginTokenLookupResult, + LoginTokenReused, +) from synapse.types import JsonDict, Requester, UserID from synapse.util import stringutils as stringutils from synapse.util.async_helpers import delay_cancellation, maybe_awaitable -from synapse.util.macaroons import LoginTokenAttributes from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import base62_encode from synapse.util.threepids import canonicalise_email @@ -80,6 +86,12 @@ logger = logging.getLogger(__name__) INVALID_USERNAME_OR_PASSWORD = "Invalid username or password" +invalid_login_token_counter = Counter( + "synapse_user_login_invalid_login_tokens", + "Counts the number of rejected m.login.token on /login", + ["reason"], +) + def convert_client_dict_legacy_fields_to_identifier( submission: JsonDict, @@ -883,6 +895,25 @@ class AuthHandler: return True + async def create_login_token_for_user_id( + self, + user_id: str, + duration_ms: int = (2 * 60 * 1000), + auth_provider_id: Optional[str] = None, + auth_provider_session_id: Optional[str] = None, + ) -> str: + login_token = self.generate_login_token() + now = self._clock.time_msec() + expiry_ts = now + duration_ms + await self.store.add_login_token_to_user( + user_id=user_id, + token=login_token, + expiry_ts=expiry_ts, + auth_provider_id=auth_provider_id, + auth_provider_session_id=auth_provider_session_id, + ) + return login_token + async def create_refresh_token_for_user_id( self, user_id: str, @@ -1401,6 +1432,18 @@ class AuthHandler: return None return user_id + def generate_login_token(self) -> str: + """Generates an opaque string, for use as an short-term login token""" + + # we use the following format for access tokens: + # syl__ + + random_string = stringutils.random_string(20) + base = f"syl_{random_string}" + + crc = base62_encode(crc32(base.encode("ascii")), minwidth=6) + return f"{base}_{crc}" + def generate_access_token(self, for_user: UserID) -> str: """Generates an opaque string, for use as an access token""" @@ -1427,16 +1470,17 @@ class AuthHandler: crc = base62_encode(crc32(base.encode("ascii")), minwidth=6) return f"{base}_{crc}" - async def validate_short_term_login_token( - self, login_token: str - ) -> LoginTokenAttributes: + async def consume_login_token(self, login_token: str) -> LoginTokenLookupResult: try: - res = self.macaroon_gen.verify_short_term_login_token(login_token) - except Exception: - raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN) + return await self.store.consume_login_token(login_token) + except LoginTokenExpired: + invalid_login_token_counter.labels("expired").inc() + except LoginTokenReused: + invalid_login_token_counter.labels("reused").inc() + except NotFoundError: + invalid_login_token_counter.labels("not found").inc() - await self.auth_blocking.check_auth_blocking(res.user_id) - return res + raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN) async def delete_access_token(self, access_token: str) -> None: """Invalidate a single access token @@ -1711,7 +1755,7 @@ class AuthHandler: ) # Create a login token - login_token = self.macaroon_gen.generate_short_term_login_token( + login_token = await self.create_login_token_for_user_id( registered_user_id, auth_provider_id=auth_provider_id, auth_provider_session_id=auth_provider_session_id, diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 6a6ae208d1..30e689d00d 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -771,50 +771,11 @@ class ModuleApi: auth_provider_session_id: The session ID got during login from the SSO IdP, if any. """ - # The deprecated `generate_short_term_login_token` method defaulted to an empty - # string for the `auth_provider_id` because of how the underlying macaroon was - # generated. This will change to a proper NULL-able field when the tokens get - # moved to the database. - return self._hs.get_macaroon_generator().generate_short_term_login_token( + return await self._hs.get_auth_handler().create_login_token_for_user_id( user_id, - auth_provider_id or "", - auth_provider_session_id, duration_in_ms, - ) - - def generate_short_term_login_token( - self, - user_id: str, - duration_in_ms: int = (2 * 60 * 1000), - auth_provider_id: str = "", - auth_provider_session_id: Optional[str] = None, - ) -> str: - """Generate a login token suitable for m.login.token authentication - - Added in Synapse v1.9.0. - - This was deprecated in Synapse v1.69.0 in favor of create_login_token, and will - be removed in Synapse 1.71.0. - - Args: - user_id: gives the ID of the user that the token is for - - duration_in_ms: the time that the token will be valid for - - auth_provider_id: the ID of the SSO IdP that the user used to authenticate - to get this token, if any. This is encoded in the token so that - /login can report stats on number of successful logins by IdP. - """ - logger.warn( - "A module configured on this server uses ModuleApi.generate_short_term_login_token(), " - "which is deprecated in favor of ModuleApi.create_login_token(), and will be removed in " - "Synapse 1.71.0", - ) - return self._hs.get_macaroon_generator().generate_short_term_login_token( - user_id, auth_provider_id, auth_provider_session_id, - duration_in_ms, ) @defer.inlineCallbacks diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index f554586ac3..7774f1967d 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -436,8 +436,7 @@ class LoginRestServlet(RestServlet): The body of the JSON response. """ token = login_submission["token"] - auth_handler = self.auth_handler - res = await auth_handler.validate_short_term_login_token(token) + res = await self.auth_handler.consume_login_token(token) return await self._complete_login( res.user_id, diff --git a/synapse/rest/client/login_token_request.py b/synapse/rest/client/login_token_request.py index 277b20fb63..43ea21d5e6 100644 --- a/synapse/rest/client/login_token_request.py +++ b/synapse/rest/client/login_token_request.py @@ -57,7 +57,6 @@ class LoginTokenRequestServlet(RestServlet): self.store = hs.get_datastores().main self.clock = hs.get_clock() self.server_name = hs.config.server.server_name - self.macaroon_gen = hs.get_macaroon_generator() self.auth_handler = hs.get_auth_handler() self.token_timeout = hs.config.experimental.msc3882_token_timeout self.ui_auth = hs.config.experimental.msc3882_ui_auth @@ -76,10 +75,10 @@ class LoginTokenRequestServlet(RestServlet): can_skip_ui_auth=False, # Don't allow skipping of UI auth ) - login_token = self.macaroon_gen.generate_short_term_login_token( + login_token = await self.auth_handler.create_login_token_for_user_id( user_id=requester.user.to_string(), auth_provider_id="org.matrix.msc3882.login_token_request", - duration_in_ms=self.token_timeout, + duration_ms=self.token_timeout, ) return ( diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 2996d6bb4d..0255295317 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -21,7 +21,13 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast import attr from synapse.api.constants import UserTypes -from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError +from synapse.api.errors import ( + Codes, + NotFoundError, + StoreError, + SynapseError, + ThreepidValidationError, +) from synapse.config.homeserver import HomeServerConfig from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage.database import ( @@ -50,6 +56,14 @@ class ExternalIDReuseException(Exception): because this external id is given to an other user.""" +class LoginTokenExpired(Exception): + """Exception if the login token sent expired""" + + +class LoginTokenReused(Exception): + """Exception if the login token sent was already used""" + + @attr.s(frozen=True, slots=True, auto_attribs=True) class TokenLookupResult: """Result of looking up an access token. @@ -115,6 +129,20 @@ class RefreshTokenLookupResult: If None, the session can be refreshed indefinitely.""" +@attr.s(auto_attribs=True, frozen=True, slots=True) +class LoginTokenLookupResult: + """Result of looking up a login token.""" + + user_id: str + """The user this token belongs to.""" + + auth_provider_id: Optional[str] + """The SSO Identity Provider that the user authenticated with, to get this token.""" + + auth_provider_session_id: Optional[str] + """The session ID advertised by the SSO Identity Provider.""" + + class RegistrationWorkerStore(CacheInvalidationWorkerStore): def __init__( self, @@ -1789,6 +1817,109 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): "replace_refresh_token", _replace_refresh_token_txn ) + async def add_login_token_to_user( + self, + user_id: str, + token: str, + expiry_ts: int, + auth_provider_id: Optional[str], + auth_provider_session_id: Optional[str], + ) -> None: + """Adds a short-term login token for the given user. + + Args: + user_id: The user ID. + token: The new login token to add. + expiry_ts (milliseconds since the epoch): Time after which the login token + cannot be used. + auth_provider_id: The SSO Identity Provider that the user authenticated with + to get this token, if any + auth_provider_session_id: The session ID advertised by the SSO Identity + Provider, if any. + """ + await self.db_pool.simple_insert( + "login_tokens", + { + "token": token, + "user_id": user_id, + "expiry_ts": expiry_ts, + "auth_provider_id": auth_provider_id, + "auth_provider_session_id": auth_provider_session_id, + }, + desc="add_login_token_to_user", + ) + + def _consume_login_token( + self, + txn: LoggingTransaction, + token: str, + ts: int, + ) -> LoginTokenLookupResult: + values = self.db_pool.simple_select_one_txn( + txn, + "login_tokens", + keyvalues={"token": token}, + retcols=( + "user_id", + "expiry_ts", + "used_ts", + "auth_provider_id", + "auth_provider_session_id", + ), + allow_none=True, + ) + + if values is None: + raise NotFoundError() + + self.db_pool.simple_update_one_txn( + txn, + "login_tokens", + keyvalues={"token": token}, + updatevalues={"used_ts": ts}, + ) + user_id = values["user_id"] + expiry_ts = values["expiry_ts"] + used_ts = values["used_ts"] + auth_provider_id = values["auth_provider_id"] + auth_provider_session_id = values["auth_provider_session_id"] + + # Token was already used + if used_ts is not None: + raise LoginTokenReused() + + # Token expired + if ts > int(expiry_ts): + raise LoginTokenExpired() + + return LoginTokenLookupResult( + user_id=user_id, + auth_provider_id=auth_provider_id, + auth_provider_session_id=auth_provider_session_id, + ) + + async def consume_login_token(self, token: str) -> LoginTokenLookupResult: + """Lookup a login token and consume it. + + Args: + token: The login token. + + Returns: + The data stored with that token, including the `user_id`. Returns `None` if + the token does not exist or if it expired. + + Raises: + NotFound if the login token was not found in database + LoginTokenExpired if the login token expired + LoginTokenReused if the login token was already used + """ + return await self.db_pool.runInteraction( + "consume_login_token", + self._consume_login_token, + token, + self._clock.time_msec(), + ) + @cached() async def is_guest(self, user_id: str) -> bool: res = await self.db_pool.simple_select_one_onecol( @@ -2019,6 +2150,12 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): and hs.config.experimental.msc3866.require_approval_for_new_accounts ) + # Create a background job for removing expired login tokens + if hs.config.worker.run_background_tasks: + self._clock.looping_call( + self._delete_expired_login_tokens, THIRTY_MINUTES_IN_MS + ) + async def add_access_token_to_user( self, user_id: str, @@ -2617,6 +2754,23 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): approved, ) + @wrap_as_background_process("delete_expired_login_tokens") + async def _delete_expired_login_tokens(self) -> None: + """Remove login tokens with expiry dates that have passed.""" + + def _delete_expired_login_tokens_txn(txn: LoggingTransaction, ts: int) -> None: + sql = "DELETE FROM login_tokens WHERE expiry_ts <= ?" + txn.execute(sql, (ts,)) + + # We keep the expired tokens for an extra 5 minutes so we can measure how many + # times a token is being used after its expiry + now = self._clock.time_msec() + await self.db_pool.runInteraction( + "delete_expired_login_tokens", + _delete_expired_login_tokens_txn, + now - (5 * 60 * 1000), + ) + def find_max_generated_user_id_localpart(cur: Cursor) -> int: """ diff --git a/synapse/storage/schema/main/delta/73/10login_tokens.sql b/synapse/storage/schema/main/delta/73/10login_tokens.sql new file mode 100644 index 0000000000..a39b7bcece --- /dev/null +++ b/synapse/storage/schema/main/delta/73/10login_tokens.sql @@ -0,0 +1,35 @@ +/* + * Copyright 2022 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Login tokens are short-lived tokens that are used for the m.login.token +-- login method, mainly during SSO logins +CREATE TABLE login_tokens ( + token TEXT PRIMARY KEY, + user_id TEXT NOT NULL, + expiry_ts BIGINT NOT NULL, + used_ts BIGINT, + auth_provider_id TEXT, + auth_provider_session_id TEXT +); + +-- We're sometimes querying them by their session ID we got from their IDP +CREATE INDEX login_tokens_auth_provider_idx + ON login_tokens (auth_provider_id, auth_provider_session_id); + +-- We're deleting them by their expiration time +CREATE INDEX login_tokens_expiry_time_idx + ON login_tokens (expiry_ts); + diff --git a/synapse/util/macaroons.py b/synapse/util/macaroons.py index df77edcce2..5df03d3ddc 100644 --- a/synapse/util/macaroons.py +++ b/synapse/util/macaroons.py @@ -24,7 +24,7 @@ from typing_extensions import Literal from synapse.util import Clock, stringutils -MacaroonType = Literal["access", "delete_pusher", "session", "login"] +MacaroonType = Literal["access", "delete_pusher", "session"] def get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str: @@ -111,19 +111,6 @@ class OidcSessionData: """The session ID of the ongoing UI Auth ("" if this is a login)""" -@attr.s(slots=True, frozen=True, auto_attribs=True) -class LoginTokenAttributes: - """Data we store in a short-term login token""" - - user_id: str - - auth_provider_id: str - """The SSO Identity Provider that the user authenticated with, to get this token.""" - - auth_provider_session_id: Optional[str] - """The session ID advertised by the SSO Identity Provider.""" - - class MacaroonGenerator: def __init__(self, clock: Clock, location: str, secret_key: bytes): self._clock = clock @@ -165,35 +152,6 @@ class MacaroonGenerator: macaroon.add_first_party_caveat(f"pushkey = {pushkey}") return macaroon.serialize() - def generate_short_term_login_token( - self, - user_id: str, - auth_provider_id: str, - auth_provider_session_id: Optional[str] = None, - duration_in_ms: int = (2 * 60 * 1000), - ) -> str: - """Generate a short-term login token used during SSO logins - - Args: - user_id: The user for which the token is valid. - auth_provider_id: The SSO IdP the user used. - auth_provider_session_id: The session ID got during login from the SSO IdP. - - Returns: - A signed token valid for using as a ``m.login.token`` token. - """ - now = self._clock.time_msec() - expiry = now + duration_in_ms - macaroon = self._generate_base_macaroon("login") - macaroon.add_first_party_caveat(f"user_id = {user_id}") - macaroon.add_first_party_caveat(f"time < {expiry}") - macaroon.add_first_party_caveat(f"auth_provider_id = {auth_provider_id}") - if auth_provider_session_id is not None: - macaroon.add_first_party_caveat( - f"auth_provider_session_id = {auth_provider_session_id}" - ) - return macaroon.serialize() - def generate_oidc_session_token( self, state: str, @@ -233,49 +191,6 @@ class MacaroonGenerator: return macaroon.serialize() - def verify_short_term_login_token(self, token: str) -> LoginTokenAttributes: - """Verify a short-term-login macaroon - - Checks that the given token is a valid, unexpired short-term-login token - minted by this server. - - Args: - token: The login token to verify. - - Returns: - A set of attributes carried by this token, including the - ``user_id`` and informations about the SSO IDP used during that - login. - - Raises: - MacaroonVerificationFailedException if the verification failed - """ - macaroon = pymacaroons.Macaroon.deserialize(token) - - v = self._base_verifier("login") - v.satisfy_general(lambda c: c.startswith("user_id = ")) - v.satisfy_general(lambda c: c.startswith("auth_provider_id = ")) - v.satisfy_general(lambda c: c.startswith("auth_provider_session_id = ")) - satisfy_expiry(v, self._clock.time_msec) - v.verify(macaroon, self._secret_key) - - user_id = get_value_from_macaroon(macaroon, "user_id") - auth_provider_id = get_value_from_macaroon(macaroon, "auth_provider_id") - - auth_provider_session_id: Optional[str] = None - try: - auth_provider_session_id = get_value_from_macaroon( - macaroon, "auth_provider_session_id" - ) - except MacaroonVerificationFailedException: - pass - - return LoginTokenAttributes( - user_id=user_id, - auth_provider_id=auth_provider_id, - auth_provider_session_id=auth_provider_session_id, - ) - def verify_guest_token(self, token: str) -> str: """Verify a guest access token macaroon diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 7106799d44..036dbbc45b 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional from unittest.mock import Mock import pymacaroons @@ -19,6 +20,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import AuthError, ResourceLimitError from synapse.rest import admin +from synapse.rest.client import login from synapse.server import HomeServer from synapse.util import Clock @@ -29,6 +31,7 @@ from tests.test_utils import make_awaitable class AuthTestCase(unittest.HomeserverTestCase): servlets = [ admin.register_servlets, + login.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -46,6 +49,23 @@ class AuthTestCase(unittest.HomeserverTestCase): self.user1 = self.register_user("a_user", "pass") + def token_login(self, token: str) -> Optional[str]: + body = { + "type": "m.login.token", + "token": token, + } + + channel = self.make_request( + "POST", + "/_matrix/client/v3/login", + body, + ) + + if channel.code == 200: + return channel.json_body["user_id"] + + return None + def test_macaroon_caveats(self) -> None: token = self.macaroon_generator.generate_guest_access_token("a_user") macaroon = pymacaroons.Macaroon.deserialize(token) @@ -73,49 +93,62 @@ class AuthTestCase(unittest.HomeserverTestCase): v.satisfy_general(verify_guest) v.verify(macaroon, self.hs.config.key.macaroon_secret_key) - def test_short_term_login_token_gives_user_id(self) -> None: - token = self.macaroon_generator.generate_short_term_login_token( - self.user1, "", duration_in_ms=5000 + def test_login_token_gives_user_id(self) -> None: + token = self.get_success( + self.auth_handler.create_login_token_for_user_id( + self.user1, + duration_ms=(5 * 1000), + ) ) - res = self.get_success(self.auth_handler.validate_short_term_login_token(token)) + + res = self.get_success(self.auth_handler.consume_login_token(token)) self.assertEqual(self.user1, res.user_id) - self.assertEqual("", res.auth_provider_id) + self.assertEqual(None, res.auth_provider_id) - # when we advance the clock, the token should be rejected - self.reactor.advance(6) - self.get_failure( - self.auth_handler.validate_short_term_login_token(token), - AuthError, + def test_login_token_reuse_fails(self) -> None: + token = self.get_success( + self.auth_handler.create_login_token_for_user_id( + self.user1, + duration_ms=(5 * 1000), + ) ) - def test_short_term_login_token_gives_auth_provider(self) -> None: - token = self.macaroon_generator.generate_short_term_login_token( - self.user1, auth_provider_id="my_idp" - ) - res = self.get_success(self.auth_handler.validate_short_term_login_token(token)) - self.assertEqual(self.user1, res.user_id) - self.assertEqual("my_idp", res.auth_provider_id) + self.get_success(self.auth_handler.consume_login_token(token)) - def test_short_term_login_token_cannot_replace_user_id(self) -> None: - token = self.macaroon_generator.generate_short_term_login_token( - self.user1, "", duration_in_ms=5000 + self.get_failure( + self.auth_handler.consume_login_token(token), + AuthError, ) - macaroon = pymacaroons.Macaroon.deserialize(token) - res = self.get_success( - self.auth_handler.validate_short_term_login_token(macaroon.serialize()) + def test_login_token_expires(self) -> None: + token = self.get_success( + self.auth_handler.create_login_token_for_user_id( + self.user1, + duration_ms=(5 * 1000), + ) ) - self.assertEqual(self.user1, res.user_id) - - # add another "user_id" caveat, which might allow us to override the - # user_id. - macaroon.add_first_party_caveat("user_id = b_user") + # when we advance the clock, the token should be rejected + self.reactor.advance(6) self.get_failure( - self.auth_handler.validate_short_term_login_token(macaroon.serialize()), + self.auth_handler.consume_login_token(token), AuthError, ) + def test_login_token_gives_auth_provider(self) -> None: + token = self.get_success( + self.auth_handler.create_login_token_for_user_id( + self.user1, + auth_provider_id="my_idp", + auth_provider_session_id="11-22-33-44", + duration_ms=(5 * 1000), + ) + ) + res = self.get_success(self.auth_handler.consume_login_token(token)) + self.assertEqual(self.user1, res.user_id) + self.assertEqual("my_idp", res.auth_provider_id) + self.assertEqual("11-22-33-44", res.auth_provider_session_id) + def test_mau_limits_disabled(self) -> None: self.auth_blocking._limit_usage_by_mau = False # Ensure does not throw exception @@ -125,12 +158,12 @@ class AuthTestCase(unittest.HomeserverTestCase): ) ) - self.get_success( - self.auth_handler.validate_short_term_login_token( - self._get_macaroon().serialize() - ) + token = self.get_success( + self.auth_handler.create_login_token_for_user_id(self.user1) ) + self.assertIsNotNone(self.token_login(token)) + def test_mau_limits_exceeded_large(self) -> None: self.auth_blocking._limit_usage_by_mau = True self.hs.get_datastores().main.get_monthly_active_count = Mock( @@ -147,12 +180,10 @@ class AuthTestCase(unittest.HomeserverTestCase): self.hs.get_datastores().main.get_monthly_active_count = Mock( return_value=make_awaitable(self.large_number_of_users) ) - self.get_failure( - self.auth_handler.validate_short_term_login_token( - self._get_macaroon().serialize() - ), - ResourceLimitError, + token = self.get_success( + self.auth_handler.create_login_token_for_user_id(self.user1) ) + self.assertIsNone(self.token_login(token)) def test_mau_limits_parity(self) -> None: # Ensure we're not at the unix epoch. @@ -171,12 +202,10 @@ class AuthTestCase(unittest.HomeserverTestCase): ), ResourceLimitError, ) - self.get_failure( - self.auth_handler.validate_short_term_login_token( - self._get_macaroon().serialize() - ), - ResourceLimitError, + token = self.get_success( + self.auth_handler.create_login_token_for_user_id(self.user1) ) + self.assertIsNone(self.token_login(token)) # If in monthly active cohort self.hs.get_datastores().main.user_last_seen_monthly_active = Mock( @@ -187,11 +216,10 @@ class AuthTestCase(unittest.HomeserverTestCase): self.user1, device_id=None, valid_until_ms=None ) ) - self.get_success( - self.auth_handler.validate_short_term_login_token( - self._get_macaroon().serialize() - ) + token = self.get_success( + self.auth_handler.create_login_token_for_user_id(self.user1) ) + self.assertIsNotNone(self.token_login(token)) def test_mau_limits_not_exceeded(self) -> None: self.auth_blocking._limit_usage_by_mau = True @@ -209,14 +237,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.hs.get_datastores().main.get_monthly_active_count = Mock( return_value=make_awaitable(self.small_number_of_users) ) - self.get_success( - self.auth_handler.validate_short_term_login_token( - self._get_macaroon().serialize() - ) - ) - - def _get_macaroon(self) -> pymacaroons.Macaroon: - token = self.macaroon_generator.generate_short_term_login_token( - self.user1, "", duration_in_ms=5000 + token = self.get_success( + self.auth_handler.create_login_token_for_user_id(self.user1) ) - return pymacaroons.Macaroon.deserialize(token) + self.assertIsNotNone(self.token_login(token)) diff --git a/tests/util/test_macaroons.py b/tests/util/test_macaroons.py index 32125f7bb7..40754a4711 100644 --- a/tests/util/test_macaroons.py +++ b/tests/util/test_macaroons.py @@ -84,34 +84,6 @@ class MacaroonGeneratorTestCase(TestCase): ) self.assertEqual(user_id, "@user:tesths") - def test_short_term_login_token(self): - """Test the generation and verification of short-term login tokens""" - token = self.macaroon_generator.generate_short_term_login_token( - user_id="@user:tesths", - auth_provider_id="oidc", - auth_provider_session_id="sid", - duration_in_ms=2 * 60 * 1000, - ) - - info = self.macaroon_generator.verify_short_term_login_token(token) - self.assertEqual(info.user_id, "@user:tesths") - self.assertEqual(info.auth_provider_id, "oidc") - self.assertEqual(info.auth_provider_session_id, "sid") - - # Raises with another secret key - with self.assertRaises(MacaroonVerificationFailedException): - self.other_macaroon_generator.verify_short_term_login_token(token) - - # Wait a minute - self.reactor.pump([60]) - # Shouldn't raise - self.macaroon_generator.verify_short_term_login_token(token) - # Wait another minute - self.reactor.pump([60]) - # Should raise since it expired - with self.assertRaises(MacaroonVerificationFailedException): - self.macaroon_generator.verify_short_term_login_token(token) - def test_oidc_session_token(self): """Test the generation and verification of OIDC session cookies""" state = "arandomstate" -- cgit 1.5.1 From 04fd6221de026a74e8a3e896796d39dcf5ac6e3b Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 26 Oct 2022 14:00:01 +0100 Subject: Fix incorrectly sending authentication tokens to application service as headers (#14301) --- changelog.d/14301.bugfix | 1 + synapse/appservice/api.py | 12 +++++++----- tests/appservice/test_api.py | 8 +++++--- 3 files changed, 13 insertions(+), 8 deletions(-) create mode 100644 changelog.d/14301.bugfix diff --git a/changelog.d/14301.bugfix b/changelog.d/14301.bugfix new file mode 100644 index 0000000000..668c1f3b9c --- /dev/null +++ b/changelog.d/14301.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.70.0rc1 where access tokens would be incorrectly sent to application services as headers. Application services which were obtaining access tokens from query parameters were not affected. diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index fbac4375b0..60774b240d 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -123,7 +123,7 @@ class ApplicationServiceApi(SimpleHttpClient): response = await self.get_json( uri, {"access_token": service.hs_token}, - headers={"Authorization": f"Bearer {service.hs_token}"}, + headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) if response is not None: # just an empty json object return True @@ -147,7 +147,7 @@ class ApplicationServiceApi(SimpleHttpClient): response = await self.get_json( uri, {"access_token": service.hs_token}, - headers={"Authorization": f"Bearer {service.hs_token}"}, + headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) if response is not None: # just an empty json object return True @@ -190,7 +190,9 @@ class ApplicationServiceApi(SimpleHttpClient): b"access_token": service.hs_token, } response = await self.get_json( - uri, args=args, headers={"Authorization": f"Bearer {service.hs_token}"} + uri, + args=args, + headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) if not isinstance(response, list): logger.warning( @@ -230,7 +232,7 @@ class ApplicationServiceApi(SimpleHttpClient): info = await self.get_json( uri, {"access_token": service.hs_token}, - headers={"Authorization": f"Bearer {service.hs_token}"}, + headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) if not _is_valid_3pe_metadata(info): @@ -327,7 +329,7 @@ class ApplicationServiceApi(SimpleHttpClient): uri=uri, json_body=body, args={"access_token": service.hs_token}, - headers={"Authorization": f"Bearer {service.hs_token}"}, + headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) if logger.isEnabledFor(logging.DEBUG): logger.debug( diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py index 11008ac1fb..89ee79396f 100644 --- a/tests/appservice/test_api.py +++ b/tests/appservice/test_api.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List, Mapping +from typing import Any, List, Mapping, Sequence, Union from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor @@ -70,13 +70,15 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): self.request_url = None async def get_json( - url: str, args: Mapping[Any, Any], headers: Mapping[Any, Any] + url: str, + args: Mapping[Any, Any], + headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], ) -> List[JsonDict]: # Ensure the access token is passed as both a header and query arg. if not headers.get("Authorization") or not args.get(b"access_token"): raise RuntimeError("Access token not provided") - self.assertEqual(headers.get("Authorization"), f"Bearer {TOKEN}") + self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) self.assertEqual(args.get(b"access_token"), TOKEN) self.request_url = url if url == URL_USER: -- cgit 1.5.1 From 0cfbb3513152b8360155c2d75df50e06ea861fa4 Mon Sep 17 00:00:00 2001 From: Ashish Kumar Date: Wed, 26 Oct 2022 18:51:23 +0400 Subject: fix broken avatar checks when server_name contains a port (#13927) Fixes check_avatar_size_and_mime_type() to successfully update avatars on homeservers running on non-default ports which it would mistakenly treat as remote homeserver while validating the avatar's size and mime type. Signed-off-by: Ashish Kumar ashfame@users.noreply.github.com --- changelog.d/13927.bugfix | 1 + synapse/handlers/profile.py | 6 +++++- tests/handlers/test_profile.py | 49 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13927.bugfix diff --git a/changelog.d/13927.bugfix b/changelog.d/13927.bugfix new file mode 100644 index 0000000000..119cd128e7 --- /dev/null +++ b/changelog.d/13927.bugfix @@ -0,0 +1 @@ +Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index d8ff5289b5..4bf9a047a3 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -307,7 +307,11 @@ class ProfileHandler: if not self.max_avatar_size and not self.allowed_avatar_mimetypes: return True - server_name, _, media_id = parse_and_validate_mxc_uri(mxc) + host, port, media_id = parse_and_validate_mxc_uri(mxc) + if port is not None: + server_name = host + ":" + str(port) + else: + server_name = host if server_name == self.server_name: media_info = await self.store.get_local_media(media_id) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index f88c725a42..675aa023ac 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -14,6 +14,8 @@ from typing import Any, Awaitable, Callable, Dict from unittest.mock import Mock +from parameterized import parameterized + from twisted.test.proto_helpers import MemoryReactor import synapse.types @@ -327,6 +329,53 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertFalse(res) + @unittest.override_config( + {"server_name": "test:8888", "allowed_avatar_mimetypes": ["image/png"]} + ) + def test_avatar_constraint_on_local_server_with_port(self): + """Test that avatar metadata is correctly fetched when the media is on a local + server and the server has an explicit port. + + (This was previously a bug) + """ + local_server_name = self.hs.config.server.server_name + media_id = "local" + local_mxc = f"mxc://{local_server_name}/{media_id}" + + # mock up the existence of the avatar file + self._setup_local_files({media_id: {"mimetype": "image/png"}}) + + # and now check that check_avatar_size_and_mime_type is happy + self.assertTrue( + self.get_success(self.handler.check_avatar_size_and_mime_type(local_mxc)) + ) + + @parameterized.expand([("remote",), ("remote:1234",)]) + @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) + def test_check_avatar_on_remote_server(self, remote_server_name: str) -> None: + """Test that avatar metadata is correctly fetched from a remote server""" + media_id = "remote" + remote_mxc = f"mxc://{remote_server_name}/{media_id}" + + # if the media is remote, check_avatar_size_and_mime_type just checks the + # media cache, so we don't need to instantiate a real remote server. It is + # sufficient to poke an entry into the db. + self.get_success( + self.hs.get_datastores().main.store_cached_remote_media( + media_id=media_id, + media_type="image/png", + media_length=50, + origin=remote_server_name, + time_now_ms=self.clock.time_msec(), + upload_name=None, + filesystem_id="xyz", + ) + ) + + self.assertTrue( + self.get_success(self.handler.check_avatar_size_and_mime_type(remote_mxc)) + ) + def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]): """Stores metadata about files in the database. -- cgit 1.5.1 From 0d59ae706a09e95c500a594159fff992260147ac Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 26 Oct 2022 17:22:26 +0100 Subject: Use poetry 1.2 for complement in latest deps (#14305) --- .github/workflows/twisted_trunk.yml | 3 +-- changelog.d/14305.misc | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14305.misc diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index b4e26118c0..f592e632c1 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -151,12 +151,11 @@ jobs: run: | set -x DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx - pipx install poetry==1.1.14 + pipx install poetry==1.2.0 poetry remove -n twisted poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk poetry lock --no-update - # NOT IN 1.1.14 poetry lock --check working-directory: synapse - run: | diff --git a/changelog.d/14305.misc b/changelog.d/14305.misc new file mode 100644 index 0000000000..a77bd947a1 --- /dev/null +++ b/changelog.d/14305.misc @@ -0,0 +1 @@ +Use Poetry 1.2.0 in the Twisted Trunk CI job. -- cgit 1.5.1 From 40fa8294e3096132819287dd0c6d6bd71a408902 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 26 Oct 2022 16:10:55 -0500 Subject: Refactor MSC3030 `/timestamp_to_event` to move away from our snowflake pull from `destination` pattern (#14096) 1. `federation_client.timestamp_to_event(...)` now handles all `destination` looping and uses our generic `_try_destination_list(...)` helper. 2. Consistently handling `NotRetryingDestination` and `FederationDeniedError` across `get_pdu` , backfill, and the generic `_try_destination_list` which is used for many places we use this pattern. 3. `get_pdu(...)` now returns `PulledPduInfo` so we know which `destination` we ended up pulling the PDU from --- changelog.d/14096.misc | 1 + synapse/federation/federation_client.py | 130 ++++++++++++++++++++++++----- synapse/handlers/federation.py | 15 ++-- synapse/handlers/federation_event.py | 31 ++++--- synapse/handlers/room.py | 126 +++++++++++----------------- synapse/util/retryutils.py | 2 +- tests/federation/test_federation_client.py | 12 ++- 7 files changed, 191 insertions(+), 126 deletions(-) create mode 100644 changelog.d/14096.misc diff --git a/changelog.d/14096.misc b/changelog.d/14096.misc new file mode 100644 index 0000000000..2c07dc673b --- /dev/null +++ b/changelog.d/14096.misc @@ -0,0 +1 @@ +Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b220ab43fc..fa225182be 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -80,6 +80,18 @@ PDU_RETRY_TIME_MS = 1 * 60 * 1000 T = TypeVar("T") +@attr.s(frozen=True, slots=True, auto_attribs=True) +class PulledPduInfo: + """ + A result object that stores the PDU and info about it like which homeserver we + pulled it from (`pull_origin`) + """ + + pdu: EventBase + # Which homeserver we pulled the PDU from + pull_origin: str + + class InvalidResponseError(RuntimeError): """Helper for _try_destination_list: indicates that the server returned a response we couldn't parse @@ -114,7 +126,9 @@ class FederationClient(FederationBase): self.hostname = hs.hostname self.signing_key = hs.signing_key - self._get_pdu_cache: ExpiringCache[str, EventBase] = ExpiringCache( + # Cache mapping `event_id` to a tuple of the event itself and the `pull_origin` + # (which server we pulled the event from) + self._get_pdu_cache: ExpiringCache[str, Tuple[EventBase, str]] = ExpiringCache( cache_name="get_pdu_cache", clock=self._clock, max_len=1000, @@ -352,11 +366,11 @@ class FederationClient(FederationBase): @tag_args async def get_pdu( self, - destinations: Iterable[str], + destinations: Collection[str], event_id: str, room_version: RoomVersion, timeout: Optional[int] = None, - ) -> Optional[EventBase]: + ) -> Optional[PulledPduInfo]: """Requests the PDU with given origin and ID from the remote home servers. @@ -371,11 +385,11 @@ class FederationClient(FederationBase): moving to the next destination. None indicates no timeout. Returns: - The requested PDU, or None if we were unable to find it. + The requested PDU wrapped in `PulledPduInfo`, or None if we were unable to find it. """ logger.debug( - "get_pdu: event_id=%s from destinations=%s", event_id, destinations + "get_pdu(event_id=%s): from destinations=%s", event_id, destinations ) # TODO: Rate limit the number of times we try and get the same event. @@ -384,19 +398,25 @@ class FederationClient(FederationBase): # it gets persisted to the database), so we cache the results of the lookup. # Note that this is separate to the regular get_event cache which caches # events once they have been persisted. - event = self._get_pdu_cache.get(event_id) + get_pdu_cache_entry = self._get_pdu_cache.get(event_id) + event = None + pull_origin = None + if get_pdu_cache_entry: + event, pull_origin = get_pdu_cache_entry # If we don't see the event in the cache, go try to fetch it from the # provided remote federated destinations - if not event: + else: pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) + # TODO: We can probably refactor this to use `_try_destination_list` for destination in destinations: now = self._clock.time_msec() last_attempt = pdu_attempts.get(destination, 0) if last_attempt + PDU_RETRY_TIME_MS > now: logger.debug( - "get_pdu: skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)", + "get_pdu(event_id=%s): skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)", + event_id, destination, last_attempt, PDU_RETRY_TIME_MS, @@ -411,43 +431,48 @@ class FederationClient(FederationBase): room_version=room_version, timeout=timeout, ) + pull_origin = destination pdu_attempts[destination] = now if event: # Prime the cache - self._get_pdu_cache[event.event_id] = event + self._get_pdu_cache[event.event_id] = (event, pull_origin) # Now that we have an event, we can break out of this # loop and stop asking other destinations. break + except NotRetryingDestination as e: + logger.info("get_pdu(event_id=%s): %s", event_id, e) + continue + except FederationDeniedError: + logger.info( + "get_pdu(event_id=%s): Not attempting to fetch PDU from %s because the homeserver is not on our federation whitelist", + event_id, + destination, + ) + continue except SynapseError as e: logger.info( - "Failed to get PDU %s from %s because %s", + "get_pdu(event_id=%s): Failed to get PDU from %s because %s", event_id, destination, e, ) continue - except NotRetryingDestination as e: - logger.info(str(e)) - continue - except FederationDeniedError as e: - logger.info(str(e)) - continue except Exception as e: pdu_attempts[destination] = now logger.info( - "Failed to get PDU %s from %s because %s", + "get_pdu(event_id=): Failed to get PDU from %s because %s", event_id, destination, e, ) continue - if not event: + if not event or not pull_origin: return None # `event` now refers to an object stored in `get_pdu_cache`. Our @@ -459,7 +484,7 @@ class FederationClient(FederationBase): event.room_version, ) - return event_copy + return PulledPduInfo(event_copy, pull_origin) @trace @tag_args @@ -699,12 +724,14 @@ class FederationClient(FederationBase): pdu_origin = get_domain_from_id(pdu.sender) if not res and pdu_origin != origin: try: - res = await self.get_pdu( + pulled_pdu_info = await self.get_pdu( destinations=[pdu_origin], event_id=pdu.event_id, room_version=room_version, timeout=10000, ) + if pulled_pdu_info is not None: + res = pulled_pdu_info.pdu except SynapseError: pass @@ -806,6 +833,7 @@ class FederationClient(FederationBase): ) for destination in destinations: + # We don't want to ask our own server for information we don't have if destination == self.server_name: continue @@ -814,9 +842,21 @@ class FederationClient(FederationBase): except ( RequestSendFailed, InvalidResponseError, - NotRetryingDestination, ) as e: logger.warning("Failed to %s via %s: %s", description, destination, e) + # Skip to the next homeserver in the list to try. + continue + except NotRetryingDestination as e: + logger.info("%s: %s", description, e) + continue + except FederationDeniedError: + logger.info( + "%s: Not attempting to %s from %s because the homeserver is not on our federation whitelist", + description, + description, + destination, + ) + continue except UnsupportedRoomVersionError: raise except HttpResponseException as e: @@ -1609,6 +1649,54 @@ class FederationClient(FederationBase): return result async def timestamp_to_event( + self, *, destinations: List[str], room_id: str, timestamp: int, direction: str + ) -> Optional["TimestampToEventResponse"]: + """ + Calls each remote federating server from `destinations` asking for their closest + event to the given timestamp in the given direction until we get a response. + Also validates the response to always return the expected keys or raises an + error. + + Args: + destinations: The domains of homeservers to try fetching from + room_id: Room to fetch the event from + timestamp: The point in time (inclusive) we should navigate from in + the given direction to find the closest event. + direction: ["f"|"b"] to indicate whether we should navigate forward + or backward from the given timestamp to find the closest event. + + Returns: + A parsed TimestampToEventResponse including the closest event_id + and origin_server_ts or None if no destination has a response. + """ + + async def _timestamp_to_event_from_destination( + destination: str, + ) -> TimestampToEventResponse: + return await self._timestamp_to_event_from_destination( + destination, room_id, timestamp, direction + ) + + try: + # Loop through each homeserver candidate until we get a succesful response + timestamp_to_event_response = await self._try_destination_list( + "timestamp_to_event", + destinations, + # TODO: The requested timestamp may lie in a part of the + # event graph that the remote server *also* didn't have, + # in which case they will have returned another event + # which may be nowhere near the requested timestamp. In + # the future, we may need to reconcile that gap and ask + # other homeservers, and/or extend `/timestamp_to_event` + # to return events on *both* sides of the timestamp to + # help reconcile the gap faster. + _timestamp_to_event_from_destination, + ) + return timestamp_to_event_response + except SynapseError: + return None + + async def _timestamp_to_event_from_destination( self, destination: str, room_id: str, timestamp: int, direction: str ) -> "TimestampToEventResponse": """ diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 4fbc79a6cb..5fc3b8bc8c 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -442,6 +442,15 @@ class FederationHandler: # appropriate stuff. # TODO: We can probably do something more intelligent here. return True + except NotRetryingDestination as e: + logger.info("_maybe_backfill_inner: %s", e) + continue + except FederationDeniedError: + logger.info( + "_maybe_backfill_inner: Not attempting to backfill from %s because the homeserver is not on our federation whitelist", + dom, + ) + continue except (SynapseError, InvalidResponseError) as e: logger.info("Failed to backfill from %s because %s", dom, e) continue @@ -477,15 +486,9 @@ class FederationHandler: logger.info("Failed to backfill from %s because %s", dom, e) continue - except NotRetryingDestination as e: - logger.info(str(e)) - continue except RequestSendFailed as e: logger.info("Failed to get backfill from %s because %s", dom, e) continue - except FederationDeniedError as e: - logger.info(e) - continue except Exception as e: logger.exception("Failed to backfill from %s because %s", dom, e) continue diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 7da6316a82..9ca5df7c78 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -58,7 +58,7 @@ from synapse.event_auth import ( ) from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.federation.federation_client import InvalidResponseError +from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo from synapse.logging.context import nested_logging_context from synapse.logging.opentracing import ( SynapseTags, @@ -1517,8 +1517,8 @@ class FederationEventHandler: ) async def backfill_event_id( - self, destination: str, room_id: str, event_id: str - ) -> EventBase: + self, destinations: List[str], room_id: str, event_id: str + ) -> PulledPduInfo: """Backfill a single event and persist it as a non-outlier which means we also pull in all of the state and auth events necessary for it. @@ -1530,24 +1530,21 @@ class FederationEventHandler: Raises: FederationError if we are unable to find the event from the destination """ - logger.info( - "backfill_event_id: event_id=%s from destination=%s", event_id, destination - ) + logger.info("backfill_event_id: event_id=%s", event_id) room_version = await self._store.get_room_version(room_id) - event_from_response = await self._federation_client.get_pdu( - [destination], + pulled_pdu_info = await self._federation_client.get_pdu( + destinations, event_id, room_version, ) - if not event_from_response: + if not pulled_pdu_info: raise FederationError( "ERROR", 404, - "Unable to find event_id=%s from destination=%s to backfill." - % (event_id, destination), + f"Unable to find event_id={event_id} from remote servers to backfill.", affected=event_id, ) @@ -1555,13 +1552,13 @@ class FederationEventHandler: # and auth events to de-outlier it. This also sets up the necessary # `state_groups` for the event. await self._process_pulled_events( - destination, - [event_from_response], + pulled_pdu_info.pull_origin, + [pulled_pdu_info.pdu], # Prevent notifications going to clients backfilled=True, ) - return event_from_response + return pulled_pdu_info @trace @tag_args @@ -1584,19 +1581,19 @@ class FederationEventHandler: async def get_event(event_id: str) -> None: with nested_logging_context(event_id): try: - event = await self._federation_client.get_pdu( + pulled_pdu_info = await self._federation_client.get_pdu( [destination], event_id, room_version, ) - if event is None: + if pulled_pdu_info is None: logger.warning( "Server %s didn't return event %s", destination, event_id, ) return - events.append(event) + events.append(pulled_pdu_info.pdu) except Exception as e: logger.warning( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index cc1e5c8f97..de97886ea9 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -49,7 +49,6 @@ from synapse.api.constants import ( from synapse.api.errors import ( AuthError, Codes, - HttpResponseException, LimitExceededError, NotFoundError, StoreError, @@ -60,7 +59,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase from synapse.events.utils import copy_and_fixup_power_levels_contents -from synapse.federation.federation_client import InvalidResponseError from synapse.handlers.relations import BundledAggregations from synapse.module_api import NOT_SPAM from synapse.rest.admin._base import assert_user_is_admin @@ -1472,7 +1470,12 @@ class TimestampLookupHandler: Raises: SynapseError if unable to find any event locally in the given direction """ - + logger.debug( + "get_event_for_timestamp(room_id=%s, timestamp=%s, direction=%s) Finding closest event...", + room_id, + timestamp, + direction, + ) local_event_id = await self.store.get_event_id_for_timestamp( room_id, timestamp, direction ) @@ -1524,85 +1527,54 @@ class TimestampLookupHandler: ) ) - # Loop through each homeserver candidate until we get a succesful response - for domain in likely_domains: - # We don't want to ask our own server for information we don't have - if domain == self.server_name: - continue + remote_response = await self.federation_client.timestamp_to_event( + destinations=likely_domains, + room_id=room_id, + timestamp=timestamp, + direction=direction, + ) + if remote_response is not None: + logger.debug( + "get_event_for_timestamp: remote_response=%s", + remote_response, + ) - try: - remote_response = await self.federation_client.timestamp_to_event( - domain, room_id, timestamp, direction - ) - logger.debug( - "get_event_for_timestamp: response from domain(%s)=%s", - domain, - remote_response, - ) + remote_event_id = remote_response.event_id + remote_origin_server_ts = remote_response.origin_server_ts - remote_event_id = remote_response.event_id - remote_origin_server_ts = remote_response.origin_server_ts - - # Backfill this event so we can get a pagination token for - # it with `/context` and paginate `/messages` from this - # point. - # - # TODO: The requested timestamp may lie in a part of the - # event graph that the remote server *also* didn't have, - # in which case they will have returned another event - # which may be nowhere near the requested timestamp. In - # the future, we may need to reconcile that gap and ask - # other homeservers, and/or extend `/timestamp_to_event` - # to return events on *both* sides of the timestamp to - # help reconcile the gap faster. - remote_event = ( - await self.federation_event_handler.backfill_event_id( - domain, room_id, remote_event_id - ) - ) + # Backfill this event so we can get a pagination token for + # it with `/context` and paginate `/messages` from this + # point. + pulled_pdu_info = await self.federation_event_handler.backfill_event_id( + likely_domains, room_id, remote_event_id + ) + remote_event = pulled_pdu_info.pdu - # XXX: When we see that the remote server is not trustworthy, - # maybe we should not ask them first in the future. - if remote_origin_server_ts != remote_event.origin_server_ts: - logger.info( - "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.", - domain, - remote_event_id, - remote_origin_server_ts, - remote_event.origin_server_ts, - ) - - # Only return the remote event if it's closer than the local event - if not local_event or ( - abs(remote_event.origin_server_ts - timestamp) - < abs(local_event.origin_server_ts - timestamp) - ): - logger.info( - "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)", - remote_event_id, - remote_event.origin_server_ts, - timestamp, - local_event.event_id if local_event else None, - local_event.origin_server_ts if local_event else None, - ) - return remote_event_id, remote_origin_server_ts - except (HttpResponseException, InvalidResponseError) as ex: - # Let's not put a high priority on some other homeserver - # failing to respond or giving a random response - logger.debug( - "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s", - domain, - type(ex).__name__, - ex, - ex.args, + # XXX: When we see that the remote server is not trustworthy, + # maybe we should not ask them first in the future. + if remote_origin_server_ts != remote_event.origin_server_ts: + logger.info( + "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.", + pulled_pdu_info.pull_origin, + remote_event_id, + remote_origin_server_ts, + remote_event.origin_server_ts, ) - except Exception: - # But we do want to see some exceptions in our code - logger.warning( - "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception", - domain, - exc_info=True, + + # Only return the remote event if it's closer than the local event + if not local_event or ( + abs(remote_event.origin_server_ts - timestamp) + < abs(local_event.origin_server_ts - timestamp) + ): + logger.info( + "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)", + remote_event_id, + remote_event.origin_server_ts, + timestamp, + local_event.event_id if local_event else None, + local_event.origin_server_ts if local_event else None, ) + return remote_event_id, remote_origin_server_ts # To appease mypy, we have to add both of these conditions to check for # `None`. We only expect `local_event` to be `None` when diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index d0a69ff843..dcc037b982 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -51,7 +51,7 @@ class NotRetryingDestination(Exception): destination: the domain in question """ - msg = "Not retrying server %s." % (destination,) + msg = f"Not retrying server {destination} because we tried it recently retry_last_ts={retry_last_ts} and we won't check for another retry_interval={retry_interval}ms." super().__init__(msg) self.retry_last_ts = retry_last_ts diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py index 51d3bb8fff..e67f405826 100644 --- a/tests/federation/test_federation_client.py +++ b/tests/federation/test_federation_client.py @@ -142,14 +142,14 @@ class FederationClientTest(FederatingHomeserverTestCase): def test_get_pdu_returns_nothing_when_event_does_not_exist(self): """No event should be returned when the event does not exist""" - remote_pdu = self.get_success( + pulled_pdu_info = self.get_success( self.hs.get_federation_client().get_pdu( ["yet.another.server"], "event_should_not_exist", RoomVersions.V9, ) ) - self.assertEqual(remote_pdu, None) + self.assertEqual(pulled_pdu_info, None) def test_get_pdu(self): """Test to make sure an event is returned by `get_pdu()`""" @@ -169,13 +169,15 @@ class FederationClientTest(FederatingHomeserverTestCase): remote_pdu.internal_metadata.outlier = True # Get the event again. This time it should read it from cache. - remote_pdu2 = self.get_success( + pulled_pdu_info2 = self.get_success( self.hs.get_federation_client().get_pdu( ["yet.another.server"], remote_pdu.event_id, RoomVersions.V9, ) ) + self.assertIsNotNone(pulled_pdu_info2) + remote_pdu2 = pulled_pdu_info2.pdu # Sanity check that we are working against the same event self.assertEqual(remote_pdu.event_id, remote_pdu2.event_id) @@ -215,13 +217,15 @@ class FederationClientTest(FederatingHomeserverTestCase): ) ) - remote_pdu = self.get_success( + pulled_pdu_info = self.get_success( self.hs.get_federation_client().get_pdu( ["yet.another.server"], "event_id", RoomVersions.V9, ) ) + self.assertIsNotNone(pulled_pdu_info) + remote_pdu = pulled_pdu_info.pdu # check the right call got made to the agent self._mock_agent.request.assert_called_once_with( -- cgit 1.5.1 From cbe01ccc3f9c09a0a7233f90200fbcb8ae5245cf Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 27 Oct 2022 10:52:23 +0100 Subject: Reject history insertion during partial joins (#14291) --- changelog.d/14291.bugfix | 1 + synapse/rest/client/room_batch.py | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/14291.bugfix diff --git a/changelog.d/14291.bugfix b/changelog.d/14291.bugfix new file mode 100644 index 0000000000..bac5065e94 --- /dev/null +++ b/changelog.d/14291.bugfix @@ -0,0 +1 @@ +Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py index dd91dabedd..10be4a781b 100644 --- a/synapse/rest/client/room_batch.py +++ b/synapse/rest/client/room_batch.py @@ -108,6 +108,13 @@ class RoomBatchSendEventRestServlet(RestServlet): errcode=Codes.MISSING_PARAM, ) + if await self.store.is_partial_state_room(room_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Cannot insert history batches until we have fully joined the room", + errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE, + ) + # Verify the batch_id_from_query corresponds to an actual insertion event # and have the batch connected. if batch_id_from_query: -- cgit 1.5.1 From 4dc05f30193935224103e8772b1bbc15293e5cb6 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Thu, 27 Oct 2022 14:16:00 +0200 Subject: Fix presence bug introduced in 1.64 by #13313 (#14243) * Fix presence bug introduced in 1.64 by #13313 Signed-off-by: Mathieu Velten * Add changelog * Add DISTINCT * Apply suggestions from code review Signed-off-by: Mathieu Velten --- changelog.d/14243.bugfix | 1 + synapse/storage/databases/main/roommember.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14243.bugfix diff --git a/changelog.d/14243.bugfix b/changelog.d/14243.bugfix new file mode 100644 index 0000000000..ac0b21c2c5 --- /dev/null +++ b/changelog.d/14243.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 32e1e983a5..ab708b0ba5 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -742,7 +742,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): # user and the set of other users, and then checking if there is any # overlap. sql = f""" - SELECT b.state_key + SELECT DISTINCT b.state_key FROM ( SELECT room_id FROM current_state_events WHERE type = 'm.room.member' AND membership = 'join' AND state_key = ? @@ -751,7 +751,6 @@ class RoomMemberWorkerStore(EventsWorkerStore): SELECT room_id, state_key FROM current_state_events WHERE type = 'm.room.member' AND membership = 'join' AND {clause} ) AS b using (room_id) - LIMIT 1 """ txn.execute(sql, (user_id, *args)) -- cgit 1.5.1 From 1357ae869f279a3f0855c1b1c2750eca2887928e Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 27 Oct 2022 15:39:47 +0200 Subject: Add workers settings to configuration manual (#14086) * Add workers settings to configuration manual * Update `pusher_instances` * update url to python logger * update headlines * update links after headline change * remove link from `daemon process` There is no docs in Synapse for this * extend example for `federation_sender_instances` and `pusher_instances` * more infos about stream writers * add link to DAG * update `pusher_instances` * update `worker_listeners` * update `stream_writers` * Update `worker_name` Co-authored-by: David Robertson --- changelog.d/14086.doc | 1 + docs/sample_log_config.yaml | 2 +- docs/usage/configuration/config_documentation.md | 268 +++++++++++++++++++---- docs/workers.md | 100 ++++++--- synapse/config/logger.py | 2 +- 5 files changed, 291 insertions(+), 82 deletions(-) create mode 100644 changelog.d/14086.doc diff --git a/changelog.d/14086.doc b/changelog.d/14086.doc new file mode 100644 index 0000000000..5b4b938759 --- /dev/null +++ b/changelog.d/14086.doc @@ -0,0 +1 @@ +Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). \ No newline at end of file diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml index 3065a0e2d9..6339160d00 100644 --- a/docs/sample_log_config.yaml +++ b/docs/sample_log_config.yaml @@ -6,7 +6,7 @@ # Synapse also supports structured logging for machine readable logs which can # be ingested by ELK stacks. See [2] for details. # -# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema +# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema # [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html version: 1 diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index d81eda52c1..fb5eb42c52 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -99,7 +99,7 @@ modules: config: {} ``` --- -## Server ## +## Server Define your homeserver name and other base options. @@ -159,7 +159,7 @@ including _matrix/...). This is the same URL a user might enter into the 'Custom Homeserver URL' field on their client. If you use Synapse with a reverse proxy, this should be the URL to reach Synapse via the proxy. Otherwise, it should be the URL to reach Synapse's client HTTP listener (see -'listeners' below). +['listeners'](#listeners) below). Defaults to `https:///`. @@ -570,7 +570,7 @@ Example configuration: delete_stale_devices_after: 1y ``` -## Homeserver blocking ## +## Homeserver blocking Useful options for Synapse admins. --- @@ -922,7 +922,7 @@ retention: interval: 1d ``` --- -## TLS ## +## TLS Options related to TLS. @@ -1012,7 +1012,7 @@ federation_custom_ca_list: - myCA3.pem ``` --- -## Federation ## +## Federation Options related to federation. @@ -1071,7 +1071,7 @@ Example configuration: allow_device_name_lookup_over_federation: true ``` --- -## Caching ## +## Caching Options related to caching. @@ -1185,7 +1185,7 @@ file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using `systemctl reload matrix-synapse`. --- -## Database ## +## Database Config options related to database settings. --- @@ -1332,20 +1332,21 @@ databases: cp_max: 10 ``` --- -## Logging ## +## Logging Config options related to logging. --- ### `log_config` -This option specifies a yaml python logging config file as described [here](https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema). +This option specifies a yaml python logging config file as described +[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema). Example configuration: ```yaml log_config: "CONFDIR/SERVERNAME.log.config" ``` --- -## Ratelimiting ## +## Ratelimiting Options related to ratelimiting in Synapse. Each ratelimiting configuration is made of two parameters: @@ -1576,7 +1577,7 @@ Example configuration: federation_rr_transactions_per_room_per_second: 40 ``` --- -## Media Store ## +## Media Store Config options related to Synapse's media store. --- @@ -1766,7 +1767,7 @@ url_preview_ip_range_blacklist: - 'ff00::/8' - 'fec0::/10' ``` ----- +--- ### `url_preview_ip_range_whitelist` This option sets a list of IP address CIDR ranges that the URL preview spider is allowed @@ -1860,7 +1861,7 @@ Example configuration: - 'fr;q=0.8' - '*;q=0.7' ``` ----- +--- ### `oembed` oEmbed allows for easier embedding content from a website. It can be @@ -1877,7 +1878,7 @@ oembed: - oembed/my_providers.json ``` --- -## Captcha ## +## Captcha See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha. @@ -1926,7 +1927,7 @@ Example configuration: recaptcha_siteverify_api: "https://my.recaptcha.site" ``` --- -## TURN ## +## TURN Options related to adding a TURN server to Synapse. --- @@ -1947,7 +1948,7 @@ Example configuration: ```yaml turn_shared_secret: "YOUR_SHARED_SECRET" ``` ----- +--- ### `turn_username` and `turn_password` The Username and password if the TURN server needs them and does not use a token. @@ -2366,7 +2367,7 @@ Example configuration: ```yaml session_lifetime: 24h ``` ----- +--- ### `refresh_access_token_lifetime` Time that an access token remains valid for, if the session is using refresh tokens. @@ -2422,7 +2423,7 @@ nonrefreshable_access_token_lifetime: 24h ``` --- -## Metrics ### +## Metrics Config options related to metrics. --- @@ -2519,7 +2520,7 @@ Example configuration: report_stats_endpoint: https://example.com/report-usage-stats/push ``` --- -## API Configuration ## +## API Configuration Config settings related to the client/server API --- @@ -2619,7 +2620,7 @@ Example configuration: form_secret: ``` --- -## Signing Keys ## +## Signing Keys Config options relating to signing keys --- @@ -2728,7 +2729,7 @@ Example configuration: key_server_signing_keys_path: "key_server_signing_keys.key" ``` --- -## Single sign-on integration ## +## Single sign-on integration The following settings can be used to make Synapse use a single sign-on provider for authentication, instead of its internal password database. @@ -3348,7 +3349,7 @@ email: email_validation: "[%(server_name)s] Validate your email" ``` --- -## Push ## +## Push Configuration settings related to push notifications --- @@ -3381,7 +3382,7 @@ push: group_unread_count_by_room: false ``` --- -## Rooms ## +## Rooms Config options relating to rooms. --- @@ -3627,7 +3628,7 @@ default_power_level_content_override: ``` --- -## Opentracing ## +## Opentracing Configuration options related to Opentracing support. --- @@ -3670,14 +3671,71 @@ opentracing: false ``` --- -## Workers ## -Configuration options related to workers. +## Coordinating workers +Configuration options related to workers which belong in the main config file +(usually called `homeserver.yaml`). +A Synapse deployment can scale horizontally by running multiple Synapse processes +called _workers_. Incoming requests are distributed between workers to handle higher +loads. Some workers are privileged and can accept requests from other workers. + +As a result, the worker configuration is divided into two parts. + +1. The first part (in this section of the manual) defines which shardable tasks + are delegated to privileged workers. This allows unprivileged workers to make + request a privileged worker to act on their behalf. +1. [The second part](#individual-worker-configuration) + controls the behaviour of individual workers in isolation. + +For guidance on setting up workers, see the [worker documentation](../../workers.md). + +--- +### `worker_replication_secret` + +A shared secret used by the replication APIs on the main process to authenticate +HTTP requests from workers. + +The default, this value is omitted (equivalently `null`), which means that +traffic between the workers and the main process is not authenticated. + +Example configuration: +```yaml +worker_replication_secret: "secret_secret" +``` +--- +### `start_pushers` + +Controls sending of push notifications on the main process. Set to `false` +if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`. + +Example configuration: +```yaml +start_pushers: false +``` +--- +### `pusher_instances` + +It is possible to run multiple [pusher workers](../../workers.md#synapseapppusher), +in which case the work is balanced across them. Use this setting to list the pushers by +[`worker_name`](#worker_name). Ensure the main process and all pusher workers are +restarted after changing this option. +If no or only one pusher worker is configured, this setting is not necessary. +The main process will send out push notifications by default if you do not disable +it by setting [`start_pushers: false`](#start_pushers). + +Example configuration: +```yaml +start_pushers: false +pusher_instances: + - pusher_worker1 + - pusher_worker2 +``` --- ### `send_federation` Controls sending of outbound federation transactions on the main process. -Set to false if using a federation sender worker. Defaults to true. +Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender). +Defaults to `true`. Example configuration: ```yaml @@ -3686,8 +3744,9 @@ send_federation: false --- ### `federation_sender_instances` -It is possible to run multiple federation sender workers, in which case the -work is balanced across them. Use this setting to list the senders. +It is possible to run multiple +[federation sender worker](../../workers.md#synapseappfederation_sender), in which +case the work is balanced across them. Use this setting to list the senders. This configuration setting must be shared between all federation sender workers, and if changed all federation sender workers must be stopped at the same time and then @@ -3696,14 +3755,19 @@ events may be dropped). Example configuration: ```yaml +send_federation: false federation_sender_instances: - federation_sender1 ``` --- ### `instance_map` -When using workers this should be a map from worker name to the +When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP replication listener of the worker, if configured. +Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs +a HTTP replication listener, and that listener should be included in the `instance_map`. +(The main process also needs an HTTP replication listener, but it should not be +listed in the `instance_map`.) Example configuration: ```yaml @@ -3716,8 +3780,11 @@ instance_map: ### `stream_writers` Experimental: When using workers you can define which workers should -handle event persistence and typing notifications. Any worker -specified here must also be in the `instance_map`. +handle writing to streams such as event persistence and typing notifications. +Any worker specified here must also be in the [`instance_map`](#instance_map). + +See the list of available streams in the +[worker documentation](../../workers.md#stream-writers). Example configuration: ```yaml @@ -3728,29 +3795,18 @@ stream_writers: --- ### `run_background_tasks_on` -The worker that is used to run background tasks (e.g. cleaning up expired -data). If not provided this defaults to the main process. +The [worker](../../workers.md#background-tasks) that is used to run +background tasks (e.g. cleaning up expired data). If not provided this +defaults to the main process. Example configuration: ```yaml run_background_tasks_on: worker1 ``` --- -### `worker_replication_secret` - -A shared secret used by the replication APIs to authenticate HTTP requests -from workers. - -By default this is unused and traffic is not authenticated. - -Example configuration: -```yaml -worker_replication_secret: "secret_secret" -``` ### `redis` -Configuration for Redis when using workers. This *must* be enabled when -using workers (unless using old style direct TCP configuration). +Configuration for Redis when using workers. This *must* be enabled when using workers. This setting has the following sub-options: * `enabled`: whether to use Redis support. Defaults to false. * `host` and `port`: Optional host and port to use to connect to redis. Defaults to @@ -3765,7 +3821,123 @@ redis: port: 6379 password: ``` -## Background Updates ## +--- +## Individual worker configuration +These options configure an individual worker, in its worker configuration file. +They should be not be provided when configuring the main process. + +Note also the configuration above for +[coordinating a cluster of workers](#coordinating-workers). + +For guidance on setting up workers, see the [worker documentation](../../workers.md). + +--- +### `worker_app` + +The type of worker. The currently available worker applications are listed +in [worker documentation](../../workers.md#available-worker-applications). + +The most common worker is the +[`synapse.app.generic_worker`](../../workers.md#synapseappgeneric_worker). + +Example configuration: +```yaml +worker_app: synapse.app.generic_worker +``` +--- +### `worker_name` + +A unique name for the worker. The worker needs a name to be addressed in +further parameters and identification in log files. We strongly recommend +giving each worker a unique `worker_name`. + +Example configuration: +```yaml +worker_name: generic_worker1 +``` +--- +### `worker_replication_host` + +The HTTP replication endpoint that it should talk to on the main Synapse process. +The main Synapse process defines this with a `replication` resource in +[`listeners` option](#listeners). + +Example configuration: +```yaml +worker_replication_host: 127.0.0.1 +``` +--- +### `worker_replication_http_port` + +The HTTP replication port that it should talk to on the main Synapse process. +The main Synapse process defines this with a `replication` resource in +[`listeners` option](#listeners). + +Example configuration: +```yaml +worker_replication_http_port: 9093 +``` +--- +### `worker_listeners` + +A worker can handle HTTP requests. To do so, a `worker_listeners` option +must be declared, in the same way as the [`listeners` option](#listeners) +in the shared config. + +Workers declared in [`stream_writers`](#stream_writers) will need to include a +`replication` listener here, in order to accept internal HTTP requests from +other workers. + +Example configuration: +```yaml +worker_listeners: + - type: http + port: 8083 + resources: + - names: [client, federation] +``` +--- +### `worker_daemonize` + +Specifies whether the worker should be started as a daemon process. +If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option +must be omitted or set to `false`. + +Defaults to `false`. + +Example configuration: +```yaml +worker_daemonize: true +``` +--- +### `worker_pid_file` + +When running a worker as a daemon, we need a place to store the +[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker. +This option defines the location of that "pid file". + +This option is required if `worker_daemonize` is `true` and ignored +otherwise. It has no default. + +See also the [`pid_file` option](#pid_file) option for the main Synapse process. + +Example configuration: +```yaml +worker_pid_file: DATADIR/generic_worker1.pid +``` +--- +### `worker_log_config` + +This option specifies a yaml python logging config file as described +[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema). +See also the [`log_config` option](#log_config) option for the main Synapse process. + +Example configuration: +```yaml +worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml +``` +--- +## Background Updates Configuration settings related to background updates. --- diff --git a/docs/workers.md b/docs/workers.md index c27b3f8bd5..5e1b9ba220 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -88,10 +88,12 @@ shared configuration file. ### Shared configuration Normally, only a couple of changes are needed to make an existing configuration -file suitable for use with workers. First, you need to enable an "HTTP replication -listener" for the main process; and secondly, you need to enable redis-based -replication. Optionally, a shared secret can be used to authenticate HTTP -traffic between workers. For example: +file suitable for use with workers. First, you need to enable an +["HTTP replication listener"](usage/configuration/config_documentation.md#listeners) +for the main process; and secondly, you need to enable +[redis-based replication](usage/configuration/config_documentation.md#redis). +Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret) +can be used to authenticate HTTP traffic between workers. For example: ```yaml # extend the existing `listeners` section. This defines the ports that the @@ -111,25 +113,28 @@ redis: enabled: true ``` -See the [configuration manual](usage/configuration/config_documentation.html) for the full documentation of each option. +See the [configuration manual](usage/configuration/config_documentation.md) +for the full documentation of each option. Under **no circumstances** should the replication listener be exposed to the public internet; replication traffic is: * always unencrypted -* unauthenticated, unless `worker_replication_secret` is configured +* unauthenticated, unless [`worker_replication_secret`](usage/configuration/config_documentation.md#worker_replication_secret) + is configured ### Worker configuration In the config file for each worker, you must specify: - * The type of worker (`worker_app`). The currently available worker applications are listed below. - * A unique name for the worker (`worker_name`). + * The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)). + The currently available worker applications are listed [below](#available-worker-applications). + * A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)). * The HTTP replication endpoint that it should talk to on the main synapse process - (`worker_replication_host` and `worker_replication_http_port`) - * If handling HTTP requests, a `worker_listeners` option with an `http` - listener, in the same way as the [`listeners`](usage/configuration/config_documentation.md#listeners) - option in the shared config. + ([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and + [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). + * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option + with an `http` listener. * If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for the main process (`worker_main_http_uri`). @@ -146,7 +151,6 @@ plain HTTP endpoint on port 8083 separately serving various endpoints, e.g. Obviously you should configure your reverse-proxy to route the relevant endpoints to the worker (`localhost:8083` in the above example). - ### Running Synapse with workers Finally, you need to start your worker processes. This can be done with either @@ -288,7 +292,8 @@ For multiple workers not handling the SSO endpoints properly, see [#9427](https://github.com/matrix-org/synapse/issues/9427). Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners) -with `client` and `federation` `resources` must be configured in the `worker_listeners` +with `client` and `federation` `resources` must be configured in the +[`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option in the worker config. #### Load balancing @@ -331,9 +336,10 @@ of the main process to a particular worker. To enable this, the worker must have a [HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured, -have a `worker_name` and be listed in the `instance_map` config. The same worker -can handle multiple streams, but unless otherwise documented, each stream can only -have a single writer. +have a [`worker_name`](usage/configuration/config_documentation.md#worker_name) +and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map) +config. The same worker can handle multiple streams, but unless otherwise documented, +each stream can only have a single writer. For example, to move event persistence off to a dedicated worker, the shared configuration would include: @@ -360,9 +366,26 @@ streams and the endpoints associated with them: ##### The `events` stream -The `events` stream experimentally supports having multiple writers, where work -is sharded between them by room ID. Note that you *must* restart all worker -instances when adding or removing event persisters. An example `stream_writers` +The `events` stream experimentally supports having multiple writer workers, where load +is sharded between them by room ID. Each writer is called an _event persister_. They are +responsible for +- receiving new events, +- linking them to those already in the room [DAG](development/room-dag-concepts.md), +- persisting them to the DB, and finally +- updating the events stream. + +Because load is sharded in this way, you *must* restart all worker instances when +adding or removing event persisters. + +An `event_persister` should not be mistaken for an `event_creator`. +An `event_creator` listens for requests from clients to create new events and does +so. It will then pass those events over HTTP replication to any configured event +persisters (or the main process if none are configured). + +Note that `event_creator`s and `event_persister`s are implemented using the same +[`synapse.app.generic_worker`](#synapse.app.generic_worker). + +An example [`stream_writers`](usage/configuration/config_documentation.md#stream_writers) configuration with multiple writers: ```yaml @@ -416,16 +439,18 @@ worker. Background tasks are run periodically or started via replication. Exactl which tasks are configured to run depends on your Synapse configuration (e.g. if stats is enabled). This worker doesn't handle any REST endpoints itself. -To enable this, the worker must have a `worker_name` and can be configured to run -background tasks. For example, to move background tasks to a dedicated worker, -the shared configuration would include: +To enable this, the worker must have a unique +[`worker_name`](usage/configuration/config_documentation.md#worker_name) +and can be configured to run background tasks. For example, to move background tasks +to a dedicated worker, the shared configuration would include: ```yaml run_background_tasks_on: background_worker ``` -You might also wish to investigate the `update_user_directory_from_worker` and -`media_instance_running_background_jobs` settings. +You might also wish to investigate the +[`update_user_directory_from_worker`](#updating-the-user-directory) and +[`media_instance_running_background_jobs`](#synapseappmedia_repository) settings. An example for a dedicated background worker instance: @@ -478,13 +503,17 @@ worker application type. ### `synapse.app.pusher` Handles sending push notifications to sygnal and email. Doesn't handle any -REST endpoints itself, but you should set `start_pushers: False` in the +REST endpoints itself, but you should set +[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the shared configuration file to stop the main synapse sending push notifications. -To run multiple instances at once the `pusher_instances` option should list all -pusher instances by their worker name, e.g.: +To run multiple instances at once the +[`pusher_instances`](usage/configuration/config_documentation.md#pusher_instances) +option should list all pusher instances by their +[`worker_name`](usage/configuration/config_documentation.md#worker_name), e.g.: ```yaml +start_pushers: false pusher_instances: - pusher_worker1 - pusher_worker2 @@ -512,15 +541,20 @@ Note this worker cannot be load-balanced: only one instance should be active. ### `synapse.app.federation_sender` Handles sending federation traffic to other servers. Doesn't handle any -REST endpoints itself, but you should set `send_federation: False` in the -shared configuration file to stop the main synapse sending this traffic. +REST endpoints itself, but you should set +[`send_federation: false`](usage/configuration/config_documentation.md#send_federation) +in the shared configuration file to stop the main synapse sending this traffic. If running multiple federation senders then you must list each -instance in the `federation_sender_instances` option by their `worker_name`. +instance in the +[`federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances) +option by their +[`worker_name`](usage/configuration/config_documentation.md#worker_name). All instances must be stopped and started when adding or removing instances. For example: ```yaml +send_federation: false federation_sender_instances: - federation_sender1 - federation_sender2 @@ -547,7 +581,9 @@ Handles the media repository. It can handle all endpoints starting with: ^/_synapse/admin/v1/quarantine_media/.*$ ^/_synapse/admin/v1/users/.*/media$ -You should also set `enable_media_repo: False` in the shared configuration +You should also set +[`enable_media_repo: False`](usage/configuration/config_documentation.md#enable_media_repo) +in the shared configuration file to stop the main synapse running background jobs related to managing the media repository. Note that doing so will prevent the main process from being able to handle the above endpoints. diff --git a/synapse/config/logger.py b/synapse/config/logger.py index b62b3b9205..94d1150415 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -53,7 +53,7 @@ DEFAULT_LOG_CONFIG = Template( # Synapse also supports structured logging for machine readable logs which can # be ingested by ELK stacks. See [2] for details. # -# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema +# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema # [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html version: 1 -- cgit 1.5.1 From 67583281e3f8ea923eedbc56a4c85c7ba75d1582 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 27 Oct 2022 09:58:12 -0400 Subject: Fix tests for change in PostgreSQL 14 behavior change. (#14310) PostgreSQL 14 changed the behavior of `websearch_to_tsquery` to improve some behaviour. The tests were hitting those edge-cases about handling of hanging double quotes. This fixes the tests to take into account the PostgreSQL version. --- changelog.d/14310.feature | 1 + synapse/storage/databases/main/search.py | 5 ++--- tests/storage/test_room_search.py | 16 ++++++++++++---- 3 files changed, 15 insertions(+), 7 deletions(-) create mode 100644 changelog.d/14310.feature diff --git a/changelog.d/14310.feature b/changelog.d/14310.feature new file mode 100644 index 0000000000..94c8a83212 --- /dev/null +++ b/changelog.d/14310.feature @@ -0,0 +1 @@ +Allow use of postgres and sqllite full-text search operators in search queries. \ No newline at end of file diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index a89fc54c2c..594b935614 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -824,9 +824,8 @@ def _tokenize_query(query: str) -> TokenList: in_phrase = False parts = deque(query.split('"')) for i, part in enumerate(parts): - # The contents inside double quotes is treated as a phrase, a trailing - # double quote is not implied. - in_phrase = bool(i % 2) and i != (len(parts) - 1) + # The contents inside double quotes is treated as a phrase. + in_phrase = bool(i % 2) # Pull out the individual words, discarding any non-word characters. words = deque(re.findall(r"([\w\-]+)", part, re.UNICODE)) diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py index 9ddc19900a..868b5bee84 100644 --- a/tests/storage/test_room_search.py +++ b/tests/storage/test_room_search.py @@ -239,7 +239,6 @@ class MessageSearchTest(HomeserverTestCase): ("fox -nope", (True, False)), ("fox -brown", (False, True)), ('"fox" quick', True), - ('"fox quick', True), ('"quick brown', True), ('" quick "', True), ('" nope"', False), @@ -269,6 +268,15 @@ class MessageSearchTest(HomeserverTestCase): response = self.helper.send(self.room_id, self.PHRASE, tok=self.access_token) self.assertIn("event_id", response) + # The behaviour of a missing trailing double quote changed in PostgreSQL 14 + # from ignoring the initial double quote to treating it as a phrase. + main_store = homeserver.get_datastores().main + found = False + if isinstance(main_store.database_engine, PostgresEngine): + assert main_store.database_engine._version is not None + found = main_store.database_engine._version < 140000 + self.COMMON_CASES.append(('"fox quick', (found, True))) + def test_tokenize_query(self) -> None: """Test the custom logic to tokenize a user's query.""" cases = ( @@ -280,9 +288,9 @@ class MessageSearchTest(HomeserverTestCase): ("fox -brown", ["fox", SearchToken.Not, "brown"]), ("- fox", [SearchToken.Not, "fox"]), ('"fox" quick', [Phrase(["fox"]), SearchToken.And, "quick"]), - # No trailing double quoe. - ('"fox quick', ["fox", SearchToken.And, "quick"]), - ('"-fox quick', [SearchToken.Not, "fox", SearchToken.And, "quick"]), + # No trailing double quote. + ('"fox quick', [Phrase(["fox", "quick"])]), + ('"-fox quick', [Phrase(["-fox", "quick"])]), ('" quick "', [Phrase(["quick"])]), ( 'q"uick brow"n', -- cgit 1.5.1 From aa70556699e649f46f51a198fb104eecdc0d311b Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 27 Oct 2022 13:29:23 -0500 Subject: Check appservice user interest against the local users instead of all users (`get_users_in_room` mis-use) (#13958) --- changelog.d/13958.bugfix | 1 + docs/upgrade.md | 19 ++++ synapse/appservice/__init__.py | 16 ++- synapse/storage/databases/main/appservice.py | 17 ++- synapse/storage/databases/main/roommember.py | 3 + tests/appservice/test_appservice.py | 10 +- tests/handlers/test_appservice.py | 162 ++++++++++++++++++++++++++- 7 files changed, 214 insertions(+), 14 deletions(-) create mode 100644 changelog.d/13958.bugfix diff --git a/changelog.d/13958.bugfix b/changelog.d/13958.bugfix new file mode 100644 index 0000000000..f9f651bfdc --- /dev/null +++ b/changelog.d/13958.bugfix @@ -0,0 +1 @@ +Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). diff --git a/docs/upgrade.md b/docs/upgrade.md index 78c34d0c15..f095bbc3a6 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -97,6 +97,25 @@ As announced with the release of [Synapse 1.69.0](#deprecation-of-the-generate_s Modules relying on it can instead use the `create_login_token` method. +## Changes to the events received by application services (interest) + +To align with spec (changed in +[MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905)), Synapse now +only considers local users to be interesting. In other words, the `users` namespace +regex is only be applied against local users of the homeserver. + +Please note, this probably doesn't affect the expected behavior of your application +service, since an interesting local user in a room still means all messages in the room +(from local or remote users) will still be considered interesting. And matching a room +with the `rooms` or `aliases` namespace regex will still consider all events sent in the +room to be interesting to the application service. + +If one of your application service's `users` regex was intending to match a remote user, +this will no longer match as you expect. The behavioral mismatch between matching all +local users and some remote users is why the spec was changed/clarified and this +caveat is no longer supported. + + # Upgrading to v1.69.0 ## Changes to the receipts replication streams diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 0dfa00df44..500bdde3a9 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -172,12 +172,24 @@ class ApplicationService: Returns: True if this service would like to know about this room. """ - member_list = await store.get_users_in_room( + # We can use `get_local_users_in_room(...)` here because an application service + # can only be interested in local users of the server it's on (ignore any remote + # users that might match the user namespace regex). + # + # In the future, we can consider re-using + # `store.get_app_service_users_in_room` which is very similar to this + # function but has a slightly worse performance than this because we + # have an early escape-hatch if we find a single user that the + # appservice is interested in. The juice would be worth the squeeze if + # `store.get_app_service_users_in_room` was used in more places besides + # an experimental MSC. But for now we can avoid doing more work and + # barely using it later. + local_user_ids = await store.get_local_users_in_room( room_id, on_invalidate=cache_context.invalidate ) # check joined member events - for user_id in member_list: + for user_id in local_user_ids: if self.is_interested_in_user(user_id): return True return False diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 64b70a7b28..63046c0527 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -157,10 +157,23 @@ class ApplicationServiceWorkerStore(RoomMemberWorkerStore): app_service: "ApplicationService", cache_context: _CacheContext, ) -> List[str]: - users_in_room = await self.get_users_in_room( + """ + Get all users in a room that the appservice controls. + + Args: + room_id: The room to check in. + app_service: The application service to check interest/control against + + Returns: + List of user IDs that the appservice controls. + """ + # We can use `get_local_users_in_room(...)` here because an application service + # can only be interested in local users of the server it's on (ignore any remote + # users that might match the user namespace regex). + local_users_in_room = await self.get_local_users_in_room( room_id, on_invalidate=cache_context.invalidate ) - return list(filter(app_service.is_interested_in_user, users_in_room)) + return list(filter(app_service.is_interested_in_user, local_users_in_room)) class ApplicationServiceStore(ApplicationServiceWorkerStore): diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index ab708b0ba5..e56a13f21e 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -152,6 +152,9 @@ class RoomMemberWorkerStore(EventsWorkerStore): the forward extremities of those rooms will exclude most members. We may also calculate room state incorrectly for such rooms and believe that a member is or is not in the room when the opposite is true. + + Note: If you only care about users in the room local to the homeserver, use + `get_local_users_in_room(...)` instead which will be more performant. """ return await self.db_pool.simple_select_onecol( table="current_state_events", diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 3018d3fc6f..d4dccfc2f0 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -43,7 +43,7 @@ class ApplicationServiceTestCase(unittest.TestCase): self.store = Mock() self.store.get_aliases_for_room = simple_async_mock([]) - self.store.get_users_in_room = simple_async_mock([]) + self.store.get_local_users_in_room = simple_async_mock([]) @defer.inlineCallbacks def test_regex_user_id_prefix_match(self): @@ -129,7 +129,7 @@ class ApplicationServiceTestCase(unittest.TestCase): self.store.get_aliases_for_room = simple_async_mock( ["#irc_foobar:matrix.org", "#athing:matrix.org"] ) - self.store.get_users_in_room = simple_async_mock([]) + self.store.get_local_users_in_room = simple_async_mock([]) self.assertTrue( ( yield defer.ensureDeferred( @@ -184,7 +184,7 @@ class ApplicationServiceTestCase(unittest.TestCase): self.store.get_aliases_for_room = simple_async_mock( ["#xmpp_foobar:matrix.org", "#athing:matrix.org"] ) - self.store.get_users_in_room = simple_async_mock([]) + self.store.get_local_users_in_room = simple_async_mock([]) self.assertFalse( ( yield defer.ensureDeferred( @@ -203,7 +203,7 @@ class ApplicationServiceTestCase(unittest.TestCase): self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@irc_foobar:matrix.org" self.store.get_aliases_for_room = simple_async_mock(["#irc_barfoo:matrix.org"]) - self.store.get_users_in_room = simple_async_mock([]) + self.store.get_local_users_in_room = simple_async_mock([]) self.assertTrue( ( yield defer.ensureDeferred( @@ -236,7 +236,7 @@ class ApplicationServiceTestCase(unittest.TestCase): def test_member_list_match(self): self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) # Note that @irc_fo:here is the AS user. - self.store.get_users_in_room = simple_async_mock( + self.store.get_local_users_in_room = simple_async_mock( ["@alice:here", "@irc_fo:here", "@bob:here"] ) self.store.get_aliases_for_room = simple_async_mock([]) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 7e4570f990..144e49d0fd 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -22,7 +22,7 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin import synapse.storage -from synapse.api.constants import EduTypes +from synapse.api.constants import EduTypes, EventTypes from synapse.appservice import ( ApplicationService, TransactionOneTimeKeyCounts, @@ -36,7 +36,7 @@ from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest -from tests.test_utils import make_awaitable, simple_async_mock +from tests.test_utils import event_injection, make_awaitable, simple_async_mock from tests.unittest import override_config from tests.utils import MockClock @@ -390,15 +390,16 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): receipts.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): + self.hs = hs # Mock the ApplicationServiceScheduler's _TransactionController's send method so that # we can track any outgoing ephemeral events self.send_mock = simple_async_mock() - hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock + hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock # type: ignore[assignment] # Mock out application services, and allow defining our own in tests self._services: List[ApplicationService] = [] - self.hs.get_datastores().main.get_app_services = Mock( + self.hs.get_datastores().main.get_app_services = Mock( # type: ignore[assignment] return_value=self._services ) @@ -416,6 +417,157 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): "exclusive_as_user", "password", self.exclusive_as_user_device_id ) + def _notify_interested_services(self): + # This is normally set in `notify_interested_services` but we need to call the + # internal async version so the reactor gets pushed to completion. + self.hs.get_application_service_handler().current_max += 1 + self.get_success( + self.hs.get_application_service_handler()._notify_interested_services( + RoomStreamToken( + None, self.hs.get_application_service_handler().current_max + ) + ) + ) + + @parameterized.expand( + [ + ("@local_as_user:test", True), + # Defining remote users in an application service user namespace regex is a + # footgun since the appservice might assume that it'll receive all events + # sent by that remote user, but it will only receive events in rooms that + # are shared with a local user. So we just remove this footgun possibility + # entirely and we won't notify the application service based on remote + # users. + ("@remote_as_user:remote", False), + ] + ) + def test_match_interesting_room_members( + self, interesting_user: str, should_notify: bool + ): + """ + Test to make sure that a interesting user (local or remote) in the room is + notified as expected when someone else in the room sends a message. + """ + # Register an application service that's interested in the `interesting_user` + interested_appservice = self._register_application_service( + namespaces={ + ApplicationService.NS_USERS: [ + { + "regex": interesting_user, + "exclusive": False, + }, + ], + }, + ) + + # Create a room + alice = self.register_user("alice", "pass") + alice_access_token = self.login("alice", "pass") + room_id = self.helper.create_room_as(room_creator=alice, tok=alice_access_token) + + # Join the interesting user to the room + self.get_success( + event_injection.inject_member_event( + self.hs, room_id, interesting_user, "join" + ) + ) + # Kick the appservice into checking this membership event to get the event out + # of the way + self._notify_interested_services() + # We don't care about the interesting user join event (this test is making sure + # the next thing works) + self.send_mock.reset_mock() + + # Send a message from an uninteresting user + self.helper.send_event( + room_id, + type=EventTypes.Message, + content={ + "msgtype": "m.text", + "body": "message from uninteresting user", + }, + tok=alice_access_token, + ) + # Kick the appservice into checking this new event + self._notify_interested_services() + + if should_notify: + self.send_mock.assert_called_once() + ( + service, + events, + _ephemeral, + _to_device_messages, + _otks, + _fbks, + _device_list_summary, + ) = self.send_mock.call_args[0] + + # Even though the message came from an uninteresting user, it should still + # notify us because the interesting user is joined to the room where the + # message was sent. + self.assertEqual(service, interested_appservice) + self.assertEqual(events[0]["type"], "m.room.message") + self.assertEqual(events[0]["sender"], alice) + else: + self.send_mock.assert_not_called() + + def test_application_services_receive_events_sent_by_interesting_local_user(self): + """ + Test to make sure that a messages sent from a local user can be interesting and + picked up by the appservice. + """ + # Register an application service that's interested in all local users + interested_appservice = self._register_application_service( + namespaces={ + ApplicationService.NS_USERS: [ + { + "regex": ".*", + "exclusive": False, + }, + ], + }, + ) + + # Create a room + alice = self.register_user("alice", "pass") + alice_access_token = self.login("alice", "pass") + room_id = self.helper.create_room_as(room_creator=alice, tok=alice_access_token) + + # We don't care about interesting events before this (this test is making sure + # the next thing works) + self.send_mock.reset_mock() + + # Send a message from the interesting local user + self.helper.send_event( + room_id, + type=EventTypes.Message, + content={ + "msgtype": "m.text", + "body": "message from interesting local user", + }, + tok=alice_access_token, + ) + # Kick the appservice into checking this new event + self._notify_interested_services() + + self.send_mock.assert_called_once() + ( + service, + events, + _ephemeral, + _to_device_messages, + _otks, + _fbks, + _device_list_summary, + ) = self.send_mock.call_args[0] + + # Events sent from an interesting local user should also be picked up as + # interesting to the appservice. + self.assertEqual(service, interested_appservice) + self.assertEqual(events[0]["type"], "m.room.message") + self.assertEqual(events[0]["sender"], alice) + def test_sending_read_receipt_batches_to_application_services(self): """Tests that a large batch of read receipts are sent correctly to interested application services. -- cgit 1.5.1 From 6a6e1e8c0711939338f25d8d41d1e4d33d984949 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 28 Oct 2022 10:53:34 +0000 Subject: Fix room creation being rate limited too aggressively since Synapse v1.69.0. (#14314) * Introduce a test for the old behaviour which we want to restore * Reintroduce the old behaviour in a simpler way * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * Use 1 credit instead of 2 for creating a room: be more lenient than before Notably, the UI in Element Web was still broken after restoring to prior behaviour. After discussion, we agreed that it would be sensible to increase the limit. Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/14314.bugfix | 1 + synapse/api/ratelimiting.py | 8 +++++- synapse/handlers/room.py | 16 ++++++++---- tests/rest/client/test_rooms.py | 54 ++++++++++++++++++++++++++++++++++++++--- 4 files changed, 70 insertions(+), 9 deletions(-) create mode 100644 changelog.d/14314.bugfix diff --git a/changelog.d/14314.bugfix b/changelog.d/14314.bugfix new file mode 100644 index 0000000000..8be47ee083 --- /dev/null +++ b/changelog.d/14314.bugfix @@ -0,0 +1 @@ +Fix room creation being rate limited too aggressively since Synapse v1.69.0. \ No newline at end of file diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 044c7d4926..511790c7c5 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -343,6 +343,7 @@ class RequestRatelimiter: requester: Requester, update: bool = True, is_admin_redaction: bool = False, + n_actions: int = 1, ) -> None: """Ratelimits requests. @@ -355,6 +356,8 @@ class RequestRatelimiter: is_admin_redaction: Whether this is a room admin/moderator redacting an event. If so then we may apply different ratelimits depending on config. + n_actions: Multiplier for the number of actions to apply to the + rate limiter at once. Raises: LimitExceededError if the request should be ratelimited @@ -383,7 +386,9 @@ class RequestRatelimiter: if is_admin_redaction and self.admin_redaction_ratelimiter: # If we have separate config for admin redactions, use a separate # ratelimiter as to not have user_ids clash - await self.admin_redaction_ratelimiter.ratelimit(requester, update=update) + await self.admin_redaction_ratelimiter.ratelimit( + requester, update=update, n_actions=n_actions + ) else: # Override rate and burst count per-user await self.request_ratelimiter.ratelimit( @@ -391,4 +396,5 @@ class RequestRatelimiter: rate_hz=messages_per_second, burst_count=burst_count, update=update, + n_actions=n_actions, ) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 638f54051a..d74b675adc 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -559,7 +559,6 @@ class RoomCreationHandler: invite_list=[], initial_state=initial_state, creation_content=creation_content, - ratelimit=False, ) # Transfer membership events @@ -753,6 +752,10 @@ class RoomCreationHandler: ) if ratelimit: + # Rate limit once in advance, but don't rate limit the individual + # events in the room — room creation isn't atomic and it's very + # janky if half the events in the initial state don't make it because + # of rate limiting. await self.request_ratelimiter.ratelimit(requester) room_version_id = config.get( @@ -913,7 +916,6 @@ class RoomCreationHandler: room_alias=room_alias, power_level_content_override=power_level_content_override, creator_join_profile=creator_join_profile, - ratelimit=ratelimit, ) if "name" in config: @@ -1037,7 +1039,6 @@ class RoomCreationHandler: room_alias: Optional[RoomAlias] = None, power_level_content_override: Optional[JsonDict] = None, creator_join_profile: Optional[JsonDict] = None, - ratelimit: bool = True, ) -> Tuple[int, str, int]: """Sends the initial events into a new room. Sends the room creation, membership, and power level events into the room sequentially, then creates and batches up the @@ -1046,6 +1047,8 @@ class RoomCreationHandler: `power_level_content_override` doesn't apply when initial state has power level state event content. + Rate limiting should already have been applied by this point. + Returns: A tuple containing the stream ID, event ID and depth of the last event sent to the room. @@ -1144,7 +1147,7 @@ class RoomCreationHandler: creator.user, room_id, "join", - ratelimit=ratelimit, + ratelimit=False, content=creator_join_profile, new_room=True, prev_event_ids=[last_sent_event_id], @@ -1269,7 +1272,10 @@ class RoomCreationHandler: events_to_send.append((encryption_event, encryption_context)) last_event = await self.event_creation_handler.handle_new_client_event( - creator, events_to_send, ignore_shadow_ban=True + creator, + events_to_send, + ignore_shadow_ban=True, + ratelimit=False, ) assert last_event.internal_metadata.stream_ordering is not None return last_event.internal_metadata.stream_ordering, last_event.event_id, depth diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 716366eb90..1084d4ad9d 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -54,6 +54,7 @@ from tests.http.server._base import make_request_with_cancellation_test from tests.storage.test_stream import PaginationTestCase from tests.test_utils import make_awaitable from tests.test_utils.event_injection import create_event +from tests.unittest import override_config PATH_PREFIX = b"/_matrix/client/api/v1" @@ -871,6 +872,41 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) self.assertEqual(join_mock.call_count, 0) + def _create_basic_room(self) -> Tuple[int, object]: + """ + Tries to create a basic room and returns the response code. + """ + channel = self.make_request( + "POST", + "/createRoom", + {}, + ) + return channel.code, channel.json_body + + @override_config( + { + "rc_message": {"per_second": 0.2, "burst_count": 10}, + } + ) + def test_room_creation_ratelimiting(self) -> None: + """ + Regression test for #14312, where ratelimiting was made too strict. + Clients should be able to create 10 rooms in a row + without hitting rate limits, using default rate limit config. + (We override rate limiting config back to its default value.) + + To ensure we don't make ratelimiting too generous accidentally, + also check that we can't create an 11th room. + """ + + for _ in range(10): + code, json_body = self._create_basic_room() + self.assertEqual(code, HTTPStatus.OK, json_body) + + # The 6th room hits the rate limit. + code, json_body = self._create_basic_room() + self.assertEqual(code, HTTPStatus.TOO_MANY_REQUESTS, json_body) + class RoomTopicTestCase(RoomBase): """Tests /rooms/$room_id/topic REST events.""" @@ -1390,10 +1426,22 @@ class RoomJoinRatelimitTestCase(RoomBase): ) def test_join_local_ratelimit(self) -> None: """Tests that local joins are actually rate-limited.""" - for _ in range(3): - self.helper.create_room_as(self.user_id) + # Create 4 rooms + room_ids = [ + self.helper.create_room_as(self.user_id, is_public=True) for _ in range(4) + ] + + joiner_user_id = self.register_user("joiner", "secret") + # Now make a new user try to join some of them. - self.helper.create_room_as(self.user_id, expect_code=429) + # The user can join 3 rooms + for room_id in room_ids[0:3]: + self.helper.join(room_id, joiner_user_id) + + # But the user cannot join a 4th room + self.helper.join( + room_ids[3], joiner_user_id, expect_code=HTTPStatus.TOO_MANY_REQUESTS + ) @unittest.override_config( {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} -- cgit 1.5.1 From cc3a04876f5342bfee7cf6238eaa84295a58a965 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 28 Oct 2022 12:10:37 +0100 Subject: 1.70.1 --- CHANGES.md | 10 ++++++++++ changelog.d/14301.bugfix | 1 - changelog.d/14314.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 5 files changed, 17 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/14301.bugfix delete mode 100644 changelog.d/14314.bugfix diff --git a/CHANGES.md b/CHANGES.md index 444d790e80..ce6661fc05 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,13 @@ +Synapse 1.70.1 (2022-10-28) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.70.0rc1 where access tokens would be incorrectly sent to application services as headers. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301)) +- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314)) + + Synapse 1.70.0 (2022-10-26) =========================== diff --git a/changelog.d/14301.bugfix b/changelog.d/14301.bugfix deleted file mode 100644 index 668c1f3b9c..0000000000 --- a/changelog.d/14301.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.70.0rc1 where access tokens would be incorrectly sent to application services as headers. Application services which were obtaining access tokens from query parameters were not affected. diff --git a/changelog.d/14314.bugfix b/changelog.d/14314.bugfix deleted file mode 100644 index 8be47ee083..0000000000 --- a/changelog.d/14314.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix room creation being rate limited too aggressively since Synapse v1.69.0. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index c3c1cc538a..b9b48e5fbd 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.70.1) stable; urgency=medium + + * New Synapse release 1.70.1. + + -- Synapse Packaging team Fri, 28 Oct 2022 12:10:21 +0100 + matrix-synapse-py3 (1.70.0) stable; urgency=medium * New Synapse release 1.70.0. diff --git a/pyproject.toml b/pyproject.toml index 0e8f6af728..4160809276 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.70.0" +version = "1.70.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From e0d9013adfaa6f541974c5a78043e4f49e46aa18 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 28 Oct 2022 12:26:40 +0100 Subject: Tweak changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ce6661fc05..c7845b0e38 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.70.1 (2022-10-28) Bugfixes -------- -- Fix a bug introduced in Synapse 1.70.0rc1 where access tokens would be incorrectly sent to application services as headers. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301)) +- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301)) - Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314)) -- cgit 1.5.1 From 44f0d573cf57772b7f53e918d0c3580cba4fde32 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 28 Oct 2022 14:55:03 +0200 Subject: Add docs for an empty `trusted_key_servers` config option (#13999) * Add docs for an empty `trusted_key_servers` config option * small rewording * Tweak changelog --- changelog.d/13999.doc | 1 + docs/usage/configuration/config_documentation.md | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 changelog.d/13999.doc diff --git a/changelog.d/13999.doc b/changelog.d/13999.doc new file mode 100644 index 0000000000..26538049e4 --- /dev/null +++ b/changelog.d/13999.doc @@ -0,0 +1 @@ +Explain how to disable the use of `trusted_key_servers`. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index fb5eb42c52..97fb505a5f 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2681,6 +2681,12 @@ is still supported for backwards-compatibility, but it is deprecated. warning on start-up. To suppress this warning, set `suppress_key_server_warning` to true. +If the use of a trusted key server has to be deactivated, e.g. in a private +federation or for privacy reasons, this can be realised by setting +an empty array (`trusted_key_servers: []`). Then Synapse will request the keys +directly from the server that owns the keys. If Synapse does not get keys directly +from the server, the events of this server will be rejected. + Options for each entry in the list include: * `server_name`: the name of the server. Required. * `verify_keys`: an optional map from key id to base64-encoded public key. -- cgit 1.5.1 From d1efa7b3a42c94ccf1501b52785ead2abb23dbbe Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 28 Oct 2022 16:28:30 +0100 Subject: Add a note that #14300 did not get a fix for 1.70.1 (#14318) --- CHANGES.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index c7845b0e38..d34fd09024 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,14 @@ Synapse 1.70.1 (2022-10-28) =========================== +This release fixes some regressions that were discovered in 1.70.0. + +[#14300](https://github.com/matrix-org/synapse/issues/14300) +was previously reported to be a regression in 1.70.0 as well. However, we have +since concluded that it was limited to the reporter and thus have not needed +to include any fix for it in 1.70.1. + + Bugfixes -------- -- cgit 1.5.1 From 81815e0561eea91dbf0c29731589fac2e6f98a40 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 28 Oct 2022 11:44:10 -0400 Subject: Switch search SQL to triple-quote strings. (#14311) For ease of reading we switch from concatenated strings to triple quote strings. --- changelog.d/14311.feature | 1 + synapse/storage/databases/main/search.py | 188 ++++++++++++++++--------------- 2 files changed, 100 insertions(+), 89 deletions(-) create mode 100644 changelog.d/14311.feature diff --git a/changelog.d/14311.feature b/changelog.d/14311.feature new file mode 100644 index 0000000000..94c8a83212 --- /dev/null +++ b/changelog.d/14311.feature @@ -0,0 +1 @@ +Allow use of postgres and sqllite full-text search operators in search queries. \ No newline at end of file diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 594b935614..e9588d1755 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -80,11 +80,11 @@ class SearchWorkerStore(SQLBaseStore): if not self.hs.config.server.enable_search: return if isinstance(self.database_engine, PostgresEngine): - sql = ( - "INSERT INTO event_search" - " (event_id, room_id, key, vector, stream_ordering, origin_server_ts)" - " VALUES (?,?,?,to_tsvector('english', ?),?,?)" - ) + sql = """ + INSERT INTO event_search + (event_id, room_id, key, vector, stream_ordering, origin_server_ts) + VALUES (?,?,?,to_tsvector('english', ?),?,?) + """ args1 = ( ( @@ -101,20 +101,20 @@ class SearchWorkerStore(SQLBaseStore): txn.execute_batch(sql, args1) elif isinstance(self.database_engine, Sqlite3Engine): - sql = ( - "INSERT INTO event_search (event_id, room_id, key, value)" - " VALUES (?,?,?,?)" - ) - args2 = ( - ( - entry.event_id, - entry.room_id, - entry.key, - _clean_value_for_search(entry.value), - ) - for entry in entries + self.db_pool.simple_insert_many_txn( + txn, + table="event_search", + keys=("event_id", "room_id", "key", "value"), + values=( + ( + entry.event_id, + entry.room_id, + entry.key, + _clean_value_for_search(entry.value), + ) + for entry in entries + ), ) - txn.execute_batch(sql, args2) else: # This should be unreachable. @@ -162,15 +162,17 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): TYPES = ["m.room.name", "m.room.message", "m.room.topic"] def reindex_search_txn(txn: LoggingTransaction) -> int: - sql = ( - "SELECT stream_ordering, event_id, room_id, type, json, " - " origin_server_ts FROM events" - " JOIN event_json USING (room_id, event_id)" - " WHERE ? <= stream_ordering AND stream_ordering < ?" - " AND (%s)" - " ORDER BY stream_ordering DESC" - " LIMIT ?" - ) % (" OR ".join("type = '%s'" % (t,) for t in TYPES),) + sql = """ + SELECT stream_ordering, event_id, room_id, type, json, origin_server_ts + FROM events + JOIN event_json USING (room_id, event_id) + WHERE ? <= stream_ordering AND stream_ordering < ? + AND (%s) + ORDER BY stream_ordering DESC + LIMIT ? + """ % ( + " OR ".join("type = '%s'" % (t,) for t in TYPES), + ) txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) @@ -284,8 +286,10 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): try: c.execute( - "CREATE INDEX CONCURRENTLY event_search_fts_idx" - " ON event_search USING GIN (vector)" + """ + CREATE INDEX CONCURRENTLY event_search_fts_idx + ON event_search USING GIN (vector) + """ ) except psycopg2.ProgrammingError as e: logger.warning( @@ -323,12 +327,16 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): # We create with NULLS FIRST so that when we search *backwards* # we get the ones with non null origin_server_ts *first* c.execute( - "CREATE INDEX CONCURRENTLY event_search_room_order ON event_search(" - "room_id, origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)" + """ + CREATE INDEX CONCURRENTLY event_search_room_order + ON event_search(room_id, origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST) + """ ) c.execute( - "CREATE INDEX CONCURRENTLY event_search_order ON event_search(" - "origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)" + """ + CREATE INDEX CONCURRENTLY event_search_order + ON event_search(origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST) + """ ) conn.set_session(autocommit=False) @@ -345,14 +353,14 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): ) def reindex_search_txn(txn: LoggingTransaction) -> Tuple[int, bool]: - sql = ( - "UPDATE event_search AS es SET stream_ordering = e.stream_ordering," - " origin_server_ts = e.origin_server_ts" - " FROM events AS e" - " WHERE e.event_id = es.event_id" - " AND ? <= e.stream_ordering AND e.stream_ordering < ?" - " RETURNING es.stream_ordering" - ) + sql = """ + UPDATE event_search AS es + SET stream_ordering = e.stream_ordering, origin_server_ts = e.origin_server_ts + FROM events AS e + WHERE e.event_id = es.event_id + AND ? <= e.stream_ordering AND e.stream_ordering < ? + RETURNING es.stream_ordering + """ min_stream_id = max_stream_id - batch_size txn.execute(sql, (min_stream_id, max_stream_id)) @@ -456,33 +464,33 @@ class SearchStore(SearchBackgroundUpdateStore): if isinstance(self.database_engine, PostgresEngine): search_query = search_term tsquery_func = self.database_engine.tsquery_func - sql = ( - f"SELECT ts_rank_cd(vector, {tsquery_func}('english', ?)) AS rank," - " room_id, event_id" - " FROM event_search" - f" WHERE vector @@ {tsquery_func}('english', ?)" - ) + sql = f""" + SELECT ts_rank_cd(vector, {tsquery_func}('english', ?)) AS rank, + room_id, event_id + FROM event_search + WHERE vector @@ {tsquery_func}('english', ?) + """ args = [search_query, search_query] + args - count_sql = ( - "SELECT room_id, count(*) as count FROM event_search" - f" WHERE vector @@ {tsquery_func}('english', ?)" - ) + count_sql = f""" + SELECT room_id, count(*) as count FROM event_search + WHERE vector @@ {tsquery_func}('english', ?) + """ count_args = [search_query] + count_args elif isinstance(self.database_engine, Sqlite3Engine): search_query = _parse_query_for_sqlite(search_term) - sql = ( - "SELECT rank(matchinfo(event_search)) as rank, room_id, event_id" - " FROM event_search" - " WHERE value MATCH ?" - ) + sql = """ + SELECT rank(matchinfo(event_search)) as rank, room_id, event_id + FROM event_search + WHERE value MATCH ? + """ args = [search_query] + args - count_sql = ( - "SELECT room_id, count(*) as count FROM event_search" - " WHERE value MATCH ?" - ) + count_sql = """ + SELECT room_id, count(*) as count FROM event_search + WHERE value MATCH ? + """ count_args = [search_query] + count_args else: # This should be unreachable. @@ -588,26 +596,27 @@ class SearchStore(SearchBackgroundUpdateStore): raise SynapseError(400, "Invalid pagination token") clauses.append( - "(origin_server_ts < ?" - " OR (origin_server_ts = ? AND stream_ordering < ?))" + """ + (origin_server_ts < ? OR (origin_server_ts = ? AND stream_ordering < ?)) + """ ) args.extend([origin_server_ts, origin_server_ts, stream]) if isinstance(self.database_engine, PostgresEngine): search_query = search_term tsquery_func = self.database_engine.tsquery_func - sql = ( - f"SELECT ts_rank_cd(vector, {tsquery_func}('english', ?)) as rank," - " origin_server_ts, stream_ordering, room_id, event_id" - " FROM event_search" - f" WHERE vector @@ {tsquery_func}('english', ?) AND " - ) + sql = f""" + SELECT ts_rank_cd(vector, {tsquery_func}('english', ?)) as rank, + origin_server_ts, stream_ordering, room_id, event_id + FROM event_search + WHERE vector @@ {tsquery_func}('english', ?) AND + """ args = [search_query, search_query] + args - count_sql = ( - "SELECT room_id, count(*) as count FROM event_search" - f" WHERE vector @@ {tsquery_func}('english', ?) AND " - ) + count_sql = f""" + SELECT room_id, count(*) as count FROM event_search + WHERE vector @@ {tsquery_func}('english', ?) AND + """ count_args = [search_query] + count_args elif isinstance(self.database_engine, Sqlite3Engine): @@ -619,23 +628,24 @@ class SearchStore(SearchBackgroundUpdateStore): # in the events table to get the topological ordering. We need # to use the indexes in this order because sqlite refuses to # MATCH unless it uses the full text search index - sql = ( - "SELECT rank(matchinfo) as rank, room_id, event_id," - " origin_server_ts, stream_ordering" - " FROM (SELECT key, event_id, matchinfo(event_search) as matchinfo" - " FROM event_search" - " WHERE value MATCH ?" - " )" - " CROSS JOIN events USING (event_id)" - " WHERE " + sql = """ + SELECT + rank(matchinfo) as rank, room_id, event_id, origin_server_ts, stream_ordering + FROM ( + SELECT key, event_id, matchinfo(event_search) as matchinfo + FROM event_search + WHERE value MATCH ? ) + CROSS JOIN events USING (event_id) + WHERE + """ search_query = _parse_query_for_sqlite(search_term) args = [search_query] + args - count_sql = ( - "SELECT room_id, count(*) as count FROM event_search" - " WHERE value MATCH ? AND " - ) + count_sql = """ + SELECT room_id, count(*) as count FROM event_search + WHERE value MATCH ? AND + """ count_args = [search_query] + count_args else: # This should be unreachable. @@ -647,10 +657,10 @@ class SearchStore(SearchBackgroundUpdateStore): # We add an arbitrary limit here to ensure we don't try to pull the # entire table from the database. if isinstance(self.database_engine, PostgresEngine): - sql += ( - " ORDER BY origin_server_ts DESC NULLS LAST," - " stream_ordering DESC NULLS LAST LIMIT ?" - ) + sql += """ + ORDER BY origin_server_ts DESC NULLS LAST, stream_ordering DESC NULLS LAST + LIMIT ? + """ elif isinstance(self.database_engine, Sqlite3Engine): sql += " ORDER BY origin_server_ts DESC, stream_ordering DESC LIMIT ?" else: -- cgit 1.5.1 From 730b13dbc9e48181b1aaf38be870ec21364b1e9c Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 28 Oct 2022 17:04:02 +0100 Subject: Improve `RawHeaders` type hints (#14303) --- changelog.d/14303.misc | 1 + synapse/app/generic_worker.py | 8 ++++---- synapse/http/client.py | 24 +++++++++++++++++++----- 3 files changed, 24 insertions(+), 9 deletions(-) create mode 100644 changelog.d/14303.misc diff --git a/changelog.d/14303.misc b/changelog.d/14303.misc new file mode 100644 index 0000000000..24ce238223 --- /dev/null +++ b/changelog.d/14303.misc @@ -0,0 +1 @@ +Improve type hinting of `RawHeaders`. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 2a9f039367..cb5892f041 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -178,13 +178,13 @@ class KeyUploadServlet(RestServlet): # Proxy headers from the original request, such as the auth headers # (in case the access token is there) and the original IP / # User-Agent of the request. - headers = { - header: request.requestHeaders.getRawHeaders(header, []) + headers: Dict[bytes, List[bytes]] = { + header: list(request.requestHeaders.getRawHeaders(header, [])) for header in (b"Authorization", b"User-Agent") } # Add the previous hop to the X-Forwarded-For header. - x_forwarded_for = request.requestHeaders.getRawHeaders( - b"X-Forwarded-For", [] + x_forwarded_for = list( + request.requestHeaders.getRawHeaders(b"X-Forwarded-For", []) ) # we use request.client here, since we want the previous hop, not the # original client (as returned by request.getClientAddress()). diff --git a/synapse/http/client.py b/synapse/http/client.py index 084d0a5b84..4eb740c040 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -25,7 +25,6 @@ from typing import ( List, Mapping, Optional, - Sequence, Tuple, Union, ) @@ -90,14 +89,29 @@ incoming_responses_counter = Counter( "synapse_http_client_responses", "", ["method", "code"] ) -# the type of the headers list, to be passed to the t.w.h.Headers. -# Actually we can mix str and bytes keys, but Mapping treats 'key' as invariant so -# we simplify. +# the type of the headers map, to be passed to the t.w.h.Headers. +# +# The actual type accepted by Twisted is +# Mapping[Union[str, bytes], Sequence[Union[str, bytes]] , +# allowing us to mix and match str and bytes freely. However: any str is also a +# Sequence[str]; passing a header string value which is a +# standalone str is interpreted as a sequence of 1-codepoint strings. This is a disastrous footgun. +# We use a narrower value type (RawHeaderValue) to avoid this footgun. +# +# We also simplify the keys to be either all str or all bytes. This helps because +# Dict[K, V] is invariant in K (and indeed V). RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]] # the value actually has to be a List, but List is invariant so we can't specify that # the entries can either be Lists or bytes. -RawHeaderValue = Sequence[Union[str, bytes]] +RawHeaderValue = Union[ + List[str], + List[bytes], + List[Union[str, bytes]], + Tuple[str, ...], + Tuple[bytes, ...], + Tuple[Union[str, bytes], ...], +] def check_against_blacklist( -- cgit 1.5.1 From 7911e2835df7b4bf1dec98b09da89beda65e2ab2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 28 Oct 2022 18:06:02 +0100 Subject: Prevent federation user keys query from returning device names if disallowed (#14304) --- changelog.d/14304.bugfix | 1 + synapse/handlers/e2e_keys.py | 37 ++++++++++++++++++++--- synapse/storage/databases/main/end_to_end_keys.py | 17 ++++++++--- 3 files changed, 46 insertions(+), 9 deletions(-) create mode 100644 changelog.d/14304.bugfix diff --git a/changelog.d/14304.bugfix b/changelog.d/14304.bugfix new file mode 100644 index 0000000000..b8d4d91034 --- /dev/null +++ b/changelog.d/14304.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. \ No newline at end of file diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 09a2492afc..a9912c467d 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -49,6 +49,7 @@ logger = logging.getLogger(__name__) class E2eKeysHandler: def __init__(self, hs: "HomeServer"): + self.config = hs.config self.store = hs.get_datastores().main self.federation = hs.get_federation_client() self.device_handler = hs.get_device_handler() @@ -431,13 +432,17 @@ class E2eKeysHandler: @trace @cancellable async def query_local_devices( - self, query: Mapping[str, Optional[List[str]]] + self, + query: Mapping[str, Optional[List[str]]], + include_displaynames: bool = True, ) -> Dict[str, Dict[str, dict]]: """Get E2E device keys for local users Args: query: map from user_id to a list of devices to query (None for all devices) + include_displaynames: Whether to include device displaynames in the returned + device details. Returns: A map from user_id -> device_id -> device details @@ -469,7 +474,9 @@ class E2eKeysHandler: # make sure that each queried user appears in the result dict result_dict[user_id] = {} - results = await self.store.get_e2e_device_keys_for_cs_api(local_query) + results = await self.store.get_e2e_device_keys_for_cs_api( + local_query, include_displaynames + ) # Build the result structure for user_id, device_keys in results.items(): @@ -482,11 +489,33 @@ class E2eKeysHandler: async def on_federation_query_client_keys( self, query_body: Dict[str, Dict[str, Optional[List[str]]]] ) -> JsonDict: - """Handle a device key query from a federated server""" + """Handle a device key query from a federated server: + + Handles the path: GET /_matrix/federation/v1/users/keys/query + + Args: + query_body: The body of the query request. Should contain a key + "device_keys" that map to a dictionary of user ID's -> list of + device IDs. If the list of device IDs is empty, all devices of + that user will be queried. + + Returns: + A json dictionary containing the following: + - device_keys: A dictionary containing the requested device information. + - master_keys: An optional dictionary of user ID -> master cross-signing + key info. + - self_signing_key: An optional dictionary of user ID -> self-signing + key info. + """ device_keys_query: Dict[str, Optional[List[str]]] = query_body.get( "device_keys", {} ) - res = await self.query_local_devices(device_keys_query) + res = await self.query_local_devices( + device_keys_query, + include_displaynames=( + self.config.federation.allow_device_name_lookup_over_federation + ), + ) ret = {"device_keys": res} # add in the cross-signing keys diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 8a10ae800c..2a4f58ed92 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -139,11 +139,15 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @trace @cancellable async def get_e2e_device_keys_for_cs_api( - self, query_list: List[Tuple[str, Optional[str]]] + self, + query_list: List[Tuple[str, Optional[str]]], + include_displaynames: bool = True, ) -> Dict[str, Dict[str, JsonDict]]: """Fetch a list of device keys, formatted suitably for the C/S API. Args: - query_list(list): List of pairs of user_ids and device_ids. + query_list: List of pairs of user_ids and device_ids. + include_displaynames: Whether to include the displayname of returned devices + (if one exists). Returns: Dict mapping from user-id to dict mapping from device_id to key data. The key data will be a dict in the same format as the @@ -166,9 +170,12 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker continue r["unsigned"] = {} - display_name = device_info.display_name - if display_name is not None: - r["unsigned"]["device_display_name"] = display_name + if include_displaynames: + # Include the device's display name in the "unsigned" dictionary + display_name = device_info.display_name + if display_name is not None: + r["unsigned"]["device_display_name"] = display_name + rv[user_id][device_id] = r return rv -- cgit 1.5.1 From 2bb2c32e8ed5642a5bf3ba1e8c49e10cecc88905 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 31 Oct 2022 13:02:07 +0000 Subject: Avoid incrementing bg process utime/stime counters by negative durations (#14323) --- changelog.d/14323.bugfix | 1 + mypy.ini | 4 +- synapse/metrics/background_process_metrics.py | 6 +- tests/metrics/__init__.py | 0 tests/metrics/test_background_process_metrics.py | 19 +++ tests/metrics/test_metrics.py | 206 +++++++++++++++++++++++ tests/test_metrics.py | 200 ---------------------- 7 files changed, 233 insertions(+), 203 deletions(-) create mode 100644 changelog.d/14323.bugfix create mode 100644 tests/metrics/__init__.py create mode 100644 tests/metrics/test_background_process_metrics.py create mode 100644 tests/metrics/test_metrics.py delete mode 100644 tests/test_metrics.py diff --git a/changelog.d/14323.bugfix b/changelog.d/14323.bugfix new file mode 100644 index 0000000000..da39bc020c --- /dev/null +++ b/changelog.d/14323.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 0.34.0rc2 where logs could include error spam when background processes are measured as taking a negative amount of time. diff --git a/mypy.ini b/mypy.ini index 34b4523e00..8f1141a239 100644 --- a/mypy.ini +++ b/mypy.ini @@ -56,7 +56,6 @@ exclude = (?x) |tests/rest/media/v1/test_media_storage.py |tests/server.py |tests/server_notices/test_resource_limits_server_notices.py - |tests/test_metrics.py |tests/test_state.py |tests/test_terms_auth.py |tests/util/caches/test_cached_call.py @@ -106,6 +105,9 @@ disallow_untyped_defs = False [mypy-tests.handlers.test_user_directory] disallow_untyped_defs = True +[mypy-tests.metrics.test_background_process_metrics] +disallow_untyped_defs = True + [mypy-tests.push.test_bulk_push_rule_evaluator] disallow_untyped_defs = True diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 7a1516d3a8..9ea4e23b31 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -174,8 +174,10 @@ class _BackgroundProcess: diff = new_stats - self._reported_stats self._reported_stats = new_stats - _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime) - _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime) + # For unknown reasons, the difference in times can be negative. See comment in + # synapse.http.request_metrics.RequestMetrics.update_metrics. + _background_process_ru_utime.labels(self.desc).inc(max(diff.ru_utime, 0)) + _background_process_ru_stime.labels(self.desc).inc(max(diff.ru_stime, 0)) _background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count) _background_process_db_txn_duration.labels(self.desc).inc( diff.db_txn_duration_sec diff --git a/tests/metrics/__init__.py b/tests/metrics/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/metrics/test_background_process_metrics.py b/tests/metrics/test_background_process_metrics.py new file mode 100644 index 0000000000..f0f6cb2912 --- /dev/null +++ b/tests/metrics/test_background_process_metrics.py @@ -0,0 +1,19 @@ +from unittest import TestCase as StdlibTestCase +from unittest.mock import Mock + +from synapse.logging.context import ContextResourceUsage, LoggingContext +from synapse.metrics.background_process_metrics import _BackgroundProcess + + +class TestBackgroundProcessMetrics(StdlibTestCase): + def test_update_metrics_with_negative_time_diff(self) -> None: + """We should ignore negative reported utime and stime differences""" + usage = ContextResourceUsage() + usage.ru_stime = usage.ru_utime = -1.0 + + mock_logging_context = Mock(spec=LoggingContext) + mock_logging_context.get_resource_usage.return_value = usage + + process = _BackgroundProcess("test process", mock_logging_context) + # Should not raise + process.update_metrics() diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py new file mode 100644 index 0000000000..bddc4228bc --- /dev/null +++ b/tests/metrics/test_metrics.py @@ -0,0 +1,206 @@ +# Copyright 2018 New Vector Ltd +# Copyright 2019 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing_extensions import Protocol + +try: + from importlib import metadata +except ImportError: + import importlib_metadata as metadata # type: ignore[no-redef] + +from unittest.mock import patch + +from pkg_resources import parse_version + +from synapse.app._base import _set_prometheus_client_use_created_metrics +from synapse.metrics import REGISTRY, InFlightGauge, generate_latest +from synapse.util.caches.deferred_cache import DeferredCache + +from tests import unittest + + +def get_sample_labels_value(sample): + """Extract the labels and values of a sample. + + prometheus_client 0.5 changed the sample type to a named tuple with more + members than the plain tuple had in 0.4 and earlier. This function can + extract the labels and value from the sample for both sample types. + + Args: + sample: The sample to get the labels and value from. + Returns: + A tuple of (labels, value) from the sample. + """ + + # If the sample has a labels and value attribute, use those. + if hasattr(sample, "labels") and hasattr(sample, "value"): + return sample.labels, sample.value + # Otherwise fall back to treating it as a plain 3 tuple. + else: + _, labels, value = sample + return labels, value + + +class TestMauLimit(unittest.TestCase): + def test_basic(self): + class MetricEntry(Protocol): + foo: int + bar: int + + gauge: InFlightGauge[MetricEntry] = InFlightGauge( + "test1", "", labels=["test_label"], sub_metrics=["foo", "bar"] + ) + + def handle1(metrics): + metrics.foo += 2 + metrics.bar = max(metrics.bar, 5) + + def handle2(metrics): + metrics.foo += 3 + metrics.bar = max(metrics.bar, 7) + + gauge.register(("key1",), handle1) + + self.assert_dict( + { + "test1_total": {("key1",): 1}, + "test1_foo": {("key1",): 2}, + "test1_bar": {("key1",): 5}, + }, + self.get_metrics_from_gauge(gauge), + ) + + gauge.unregister(("key1",), handle1) + + self.assert_dict( + { + "test1_total": {("key1",): 0}, + "test1_foo": {("key1",): 0}, + "test1_bar": {("key1",): 0}, + }, + self.get_metrics_from_gauge(gauge), + ) + + gauge.register(("key1",), handle1) + gauge.register(("key2",), handle2) + + self.assert_dict( + { + "test1_total": {("key1",): 1, ("key2",): 1}, + "test1_foo": {("key1",): 2, ("key2",): 3}, + "test1_bar": {("key1",): 5, ("key2",): 7}, + }, + self.get_metrics_from_gauge(gauge), + ) + + gauge.unregister(("key2",), handle2) + gauge.register(("key1",), handle2) + + self.assert_dict( + { + "test1_total": {("key1",): 2, ("key2",): 0}, + "test1_foo": {("key1",): 5, ("key2",): 0}, + "test1_bar": {("key1",): 7, ("key2",): 0}, + }, + self.get_metrics_from_gauge(gauge), + ) + + def get_metrics_from_gauge(self, gauge): + results = {} + + for r in gauge.collect(): + results[r.name] = { + tuple(labels[x] for x in gauge.labels): value + for labels, value in map(get_sample_labels_value, r.samples) + } + + return results + + +class BuildInfoTests(unittest.TestCase): + def test_get_build(self): + """ + The synapse_build_info metric reports the OS version, Python version, + and Synapse version. + """ + items = list( + filter( + lambda x: b"synapse_build_info{" in x, + generate_latest(REGISTRY).split(b"\n"), + ) + ) + self.assertEqual(len(items), 1) + self.assertTrue(b"osversion=" in items[0]) + self.assertTrue(b"pythonversion=" in items[0]) + self.assertTrue(b"version=" in items[0]) + + +class CacheMetricsTests(unittest.HomeserverTestCase): + def test_cache_metric(self): + """ + Caches produce metrics reflecting their state when scraped. + """ + CACHE_NAME = "cache_metrics_test_fgjkbdfg" + cache: DeferredCache[str, str] = DeferredCache(CACHE_NAME, max_entries=777) + + items = { + x.split(b"{")[0].decode("ascii"): x.split(b" ")[1].decode("ascii") + for x in filter( + lambda x: b"cache_metrics_test_fgjkbdfg" in x, + generate_latest(REGISTRY).split(b"\n"), + ) + } + + self.assertEqual(items["synapse_util_caches_cache_size"], "0.0") + self.assertEqual(items["synapse_util_caches_cache_max_size"], "777.0") + + cache.prefill("1", "hi") + + items = { + x.split(b"{")[0].decode("ascii"): x.split(b" ")[1].decode("ascii") + for x in filter( + lambda x: b"cache_metrics_test_fgjkbdfg" in x, + generate_latest(REGISTRY).split(b"\n"), + ) + } + + self.assertEqual(items["synapse_util_caches_cache_size"], "1.0") + self.assertEqual(items["synapse_util_caches_cache_max_size"], "777.0") + + +class PrometheusMetricsHackTestCase(unittest.HomeserverTestCase): + if parse_version(metadata.version("prometheus_client")) < parse_version("0.14.0"): + skip = "prometheus-client too old" + + def test_created_metrics_disabled(self) -> None: + """ + Tests that a brittle hack, to disable `_created` metrics, works. + This involves poking at the internals of prometheus-client. + It's not the end of the world if this doesn't work. + + This test gives us a way to notice if prometheus-client changes + their internals. + """ + import prometheus_client.metrics + + PRIVATE_FLAG_NAME = "_use_created" + + # By default, the pesky `_created` metrics are enabled. + # Check this assumption is still valid. + self.assertTrue(getattr(prometheus_client.metrics, PRIVATE_FLAG_NAME)) + + with patch("prometheus_client.metrics") as mock: + setattr(mock, PRIVATE_FLAG_NAME, True) + _set_prometheus_client_use_created_metrics(False) + self.assertFalse(getattr(mock, PRIVATE_FLAG_NAME, False)) diff --git a/tests/test_metrics.py b/tests/test_metrics.py deleted file mode 100644 index 1a70eddc9b..0000000000 --- a/tests/test_metrics.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2018 New Vector Ltd -# Copyright 2019 Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -try: - from importlib import metadata -except ImportError: - import importlib_metadata as metadata # type: ignore[no-redef] - -from unittest.mock import patch - -from pkg_resources import parse_version - -from synapse.app._base import _set_prometheus_client_use_created_metrics -from synapse.metrics import REGISTRY, InFlightGauge, generate_latest -from synapse.util.caches.deferred_cache import DeferredCache - -from tests import unittest - - -def get_sample_labels_value(sample): - """Extract the labels and values of a sample. - - prometheus_client 0.5 changed the sample type to a named tuple with more - members than the plain tuple had in 0.4 and earlier. This function can - extract the labels and value from the sample for both sample types. - - Args: - sample: The sample to get the labels and value from. - Returns: - A tuple of (labels, value) from the sample. - """ - - # If the sample has a labels and value attribute, use those. - if hasattr(sample, "labels") and hasattr(sample, "value"): - return sample.labels, sample.value - # Otherwise fall back to treating it as a plain 3 tuple. - else: - _, labels, value = sample - return labels, value - - -class TestMauLimit(unittest.TestCase): - def test_basic(self): - gauge = InFlightGauge( - "test1", "", labels=["test_label"], sub_metrics=["foo", "bar"] - ) - - def handle1(metrics): - metrics.foo += 2 - metrics.bar = max(metrics.bar, 5) - - def handle2(metrics): - metrics.foo += 3 - metrics.bar = max(metrics.bar, 7) - - gauge.register(("key1",), handle1) - - self.assert_dict( - { - "test1_total": {("key1",): 1}, - "test1_foo": {("key1",): 2}, - "test1_bar": {("key1",): 5}, - }, - self.get_metrics_from_gauge(gauge), - ) - - gauge.unregister(("key1",), handle1) - - self.assert_dict( - { - "test1_total": {("key1",): 0}, - "test1_foo": {("key1",): 0}, - "test1_bar": {("key1",): 0}, - }, - self.get_metrics_from_gauge(gauge), - ) - - gauge.register(("key1",), handle1) - gauge.register(("key2",), handle2) - - self.assert_dict( - { - "test1_total": {("key1",): 1, ("key2",): 1}, - "test1_foo": {("key1",): 2, ("key2",): 3}, - "test1_bar": {("key1",): 5, ("key2",): 7}, - }, - self.get_metrics_from_gauge(gauge), - ) - - gauge.unregister(("key2",), handle2) - gauge.register(("key1",), handle2) - - self.assert_dict( - { - "test1_total": {("key1",): 2, ("key2",): 0}, - "test1_foo": {("key1",): 5, ("key2",): 0}, - "test1_bar": {("key1",): 7, ("key2",): 0}, - }, - self.get_metrics_from_gauge(gauge), - ) - - def get_metrics_from_gauge(self, gauge): - results = {} - - for r in gauge.collect(): - results[r.name] = { - tuple(labels[x] for x in gauge.labels): value - for labels, value in map(get_sample_labels_value, r.samples) - } - - return results - - -class BuildInfoTests(unittest.TestCase): - def test_get_build(self): - """ - The synapse_build_info metric reports the OS version, Python version, - and Synapse version. - """ - items = list( - filter( - lambda x: b"synapse_build_info{" in x, - generate_latest(REGISTRY).split(b"\n"), - ) - ) - self.assertEqual(len(items), 1) - self.assertTrue(b"osversion=" in items[0]) - self.assertTrue(b"pythonversion=" in items[0]) - self.assertTrue(b"version=" in items[0]) - - -class CacheMetricsTests(unittest.HomeserverTestCase): - def test_cache_metric(self): - """ - Caches produce metrics reflecting their state when scraped. - """ - CACHE_NAME = "cache_metrics_test_fgjkbdfg" - cache = DeferredCache(CACHE_NAME, max_entries=777) - - items = { - x.split(b"{")[0].decode("ascii"): x.split(b" ")[1].decode("ascii") - for x in filter( - lambda x: b"cache_metrics_test_fgjkbdfg" in x, - generate_latest(REGISTRY).split(b"\n"), - ) - } - - self.assertEqual(items["synapse_util_caches_cache_size"], "0.0") - self.assertEqual(items["synapse_util_caches_cache_max_size"], "777.0") - - cache.prefill("1", "hi") - - items = { - x.split(b"{")[0].decode("ascii"): x.split(b" ")[1].decode("ascii") - for x in filter( - lambda x: b"cache_metrics_test_fgjkbdfg" in x, - generate_latest(REGISTRY).split(b"\n"), - ) - } - - self.assertEqual(items["synapse_util_caches_cache_size"], "1.0") - self.assertEqual(items["synapse_util_caches_cache_max_size"], "777.0") - - -class PrometheusMetricsHackTestCase(unittest.HomeserverTestCase): - if parse_version(metadata.version("prometheus_client")) < parse_version("0.14.0"): - skip = "prometheus-client too old" - - def test_created_metrics_disabled(self) -> None: - """ - Tests that a brittle hack, to disable `_created` metrics, works. - This involves poking at the internals of prometheus-client. - It's not the end of the world if this doesn't work. - - This test gives us a way to notice if prometheus-client changes - their internals. - """ - import prometheus_client.metrics - - PRIVATE_FLAG_NAME = "_use_created" - - # By default, the pesky `_created` metrics are enabled. - # Check this assumption is still valid. - self.assertTrue(getattr(prometheus_client.metrics, PRIVATE_FLAG_NAME)) - - with patch("prometheus_client.metrics") as mock: - setattr(mock, PRIVATE_FLAG_NAME, True) - _set_prometheus_client_use_created_metrics(False) - self.assertFalse(getattr(mock, PRIVATE_FLAG_NAME, False)) -- cgit 1.5.1 From 00d108fce48b139788a2fda79444e5acc281d72c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Oct 2022 13:29:14 +0000 Subject: Bump black from 22.3.0 to 22.10.0 (#14328) * Bump black from 22.3.0 to 22.10.0 Bumps [black](https://github.com/psf/black) from 22.3.0 to 22.10.0. - [Release notes](https://github.com/psf/black/releases) - [Changelog](https://github.com/psf/black/blob/main/CHANGES.md) - [Commits](https://github.com/psf/black/compare/22.3.0...22.10.0) --- updated-dependencies: - dependency-name: black dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14328.misc | 1 + poetry.lock | 52 ++++++++++++++++++++++++-------------------------- 2 files changed, 26 insertions(+), 27 deletions(-) create mode 100644 changelog.d/14328.misc diff --git a/changelog.d/14328.misc b/changelog.d/14328.misc new file mode 100644 index 0000000000..c88583d1eb --- /dev/null +++ b/changelog.d/14328.misc @@ -0,0 +1 @@ +Bump black from 22.3.0 to 22.10.0. diff --git a/poetry.lock b/poetry.lock index 3270b90a71..01f6d4da4e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -52,18 +52,18 @@ typecheck = ["mypy"] [[package]] name = "black" -version = "22.3.0" +version = "22.10.0" description = "The uncompromising code formatter." category = "dev" optional = false -python-versions = ">=3.6.2" +python-versions = ">=3.7" [package.dependencies] click = ">=8.0.0" mypy-extensions = ">=0.4.3" pathspec = ">=0.9.0" platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} @@ -1633,7 +1633,7 @@ url-preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "9400cb5c92bb4648238f652f5e7f81df51cdcf9b7c69d645f35beaa4acb2f420" +content-hash = "27811bd21d56ceeb0f68ded5a00375efcd1a004928f0736f5b02927ce8594cb0" [metadata.files] attrs = [ @@ -1672,29 +1672,27 @@ bcrypt = [ {file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"}, ] black = [ - {file = "black-22.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2497f9c2386572e28921fa8bec7be3e51de6801f7459dffd6e62492531c47e09"}, - {file = "black-22.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5795a0375eb87bfe902e80e0c8cfaedf8af4d49694d69161e5bd3206c18618bb"}, - {file = "black-22.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3556168e2e5c49629f7b0f377070240bd5511e45e25a4497bb0073d9dda776a"}, - {file = "black-22.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67c8301ec94e3bcc8906740fe071391bce40a862b7be0b86fb5382beefecd968"}, - {file = "black-22.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:fd57160949179ec517d32ac2ac898b5f20d68ed1a9c977346efbac9c2f1e779d"}, - {file = "black-22.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cc1e1de68c8e5444e8f94c3670bb48a2beef0e91dddfd4fcc29595ebd90bb9ce"}, - {file = "black-22.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2fc92002d44746d3e7db7cf9313cf4452f43e9ea77a2c939defce3b10b5c82"}, - {file = "black-22.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:a6342964b43a99dbc72f72812bf88cad8f0217ae9acb47c0d4f141a6416d2d7b"}, - {file = "black-22.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:328efc0cc70ccb23429d6be184a15ce613f676bdfc85e5fe8ea2a9354b4e9015"}, - {file = "black-22.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06f9d8846f2340dfac80ceb20200ea5d1b3f181dd0556b47af4e8e0b24fa0a6b"}, - {file = "black-22.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4efa5fad66b903b4a5f96d91461d90b9507a812b3c5de657d544215bb7877a"}, - {file = "black-22.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8477ec6bbfe0312c128e74644ac8a02ca06bcdb8982d4ee06f209be28cdf163"}, - {file = "black-22.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:637a4014c63fbf42a692d22b55d8ad6968a946b4a6ebc385c5505d9625b6a464"}, - {file = "black-22.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:863714200ada56cbc366dc9ae5291ceb936573155f8bf8e9de92aef51f3ad0f0"}, - {file = "black-22.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10dbe6e6d2988049b4655b2b739f98785a884d4d6b85bc35133a8fb9a2233176"}, - {file = "black-22.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:cee3e11161dde1b2a33a904b850b0899e0424cc331b7295f2a9698e79f9a69a0"}, - {file = "black-22.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5891ef8abc06576985de8fa88e95ab70641de6c1fca97e2a15820a9b69e51b20"}, - {file = "black-22.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:30d78ba6bf080eeaf0b7b875d924b15cd46fec5fd044ddfbad38c8ea9171043a"}, - {file = "black-22.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee8f1f7228cce7dffc2b464f07ce769f478968bfb3dd1254a4c2eeed84928aad"}, - {file = "black-22.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee227b696ca60dd1c507be80a6bc849a5a6ab57ac7352aad1ffec9e8b805f21"}, - {file = "black-22.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:9b542ced1ec0ceeff5b37d69838106a6348e60db7b8fdd245294dc1d26136265"}, - {file = "black-22.3.0-py3-none-any.whl", hash = "sha256:bc58025940a896d7e5356952228b68f793cf5fcb342be703c3a2669a1488cb72"}, - {file = "black-22.3.0.tar.gz", hash = "sha256:35020b8886c022ced9282b51b5a875b6d1ab0c387b31a065b84db7c33085ca79"}, + {file = "black-22.10.0-1fixedarch-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:5cc42ca67989e9c3cf859e84c2bf014f6633db63d1cbdf8fdb666dcd9e77e3fa"}, + {file = "black-22.10.0-1fixedarch-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:5d8f74030e67087b219b032aa33a919fae8806d49c867846bfacde57f43972ef"}, + {file = "black-22.10.0-1fixedarch-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:197df8509263b0b8614e1df1756b1dd41be6738eed2ba9e9769f3880c2b9d7b6"}, + {file = "black-22.10.0-1fixedarch-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:2644b5d63633702bc2c5f3754b1b475378fbbfb481f62319388235d0cd104c2d"}, + {file = "black-22.10.0-1fixedarch-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:e41a86c6c650bcecc6633ee3180d80a025db041a8e2398dcc059b3afa8382cd4"}, + {file = "black-22.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2039230db3c6c639bd84efe3292ec7b06e9214a2992cd9beb293d639c6402edb"}, + {file = "black-22.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14ff67aec0a47c424bc99b71005202045dc09270da44a27848d534600ac64fc7"}, + {file = "black-22.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:819dc789f4498ecc91438a7de64427c73b45035e2e3680c92e18795a839ebb66"}, + {file = "black-22.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5b9b29da4f564ba8787c119f37d174f2b69cdfdf9015b7d8c5c16121ddc054ae"}, + {file = "black-22.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8b49776299fece66bffaafe357d929ca9451450f5466e997a7285ab0fe28e3b"}, + {file = "black-22.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:21199526696b8f09c3997e2b4db8d0b108d801a348414264d2eb8eb2532e540d"}, + {file = "black-22.10.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e464456d24e23d11fced2bc8c47ef66d471f845c7b7a42f3bd77bf3d1789650"}, + {file = "black-22.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9311e99228ae10023300ecac05be5a296f60d2fd10fff31cf5c1fa4ca4b1988d"}, + {file = "black-22.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fba8a281e570adafb79f7755ac8721b6cf1bbf691186a287e990c7929c7692ff"}, + {file = "black-22.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:915ace4ff03fdfff953962fa672d44be269deb2eaf88499a0f8805221bc68c87"}, + {file = "black-22.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:444ebfb4e441254e87bad00c661fe32df9969b2bf224373a448d8aca2132b395"}, + {file = "black-22.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:974308c58d057a651d182208a484ce80a26dac0caef2895836a92dd6ebd725e0"}, + {file = "black-22.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72ef3925f30e12a184889aac03d77d031056860ccae8a1e519f6cbb742736383"}, + {file = "black-22.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:432247333090c8c5366e69627ccb363bc58514ae3e63f7fc75c54b1ea80fa7de"}, + {file = "black-22.10.0-py3-none-any.whl", hash = "sha256:c957b2b4ea88587b46cf49d1dc17681c1e672864fd7af32fc1e9664d572b3458"}, + {file = "black-22.10.0.tar.gz", hash = "sha256:f513588da599943e0cde4e32cc9879e825d58720d6557062d1098c5ad80080e1"}, ] bleach = [ {file = "bleach-5.0.1-py3-none-any.whl", hash = "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a"}, -- cgit 1.5.1 From 278f8543be10d0417dea52c441cd1bf21c291980 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Oct 2022 13:32:04 +0000 Subject: Bump twine from 3.8.0 to 4.0.1 (#14332) * Bump twine from 3.8.0 to 4.0.1 Bumps [twine](https://github.com/pypa/twine) from 3.8.0 to 4.0.1. - [Release notes](https://github.com/pypa/twine/releases) - [Changelog](https://github.com/pypa/twine/blob/main/docs/changelog.rst) - [Commits](https://github.com/pypa/twine/compare/3.8.0...4.0.1) --- updated-dependencies: - dependency-name: twine dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14332.misc | 1 + poetry.lock | 61 +++++++++++++++++++++++++------------------------- 2 files changed, 31 insertions(+), 31 deletions(-) create mode 100644 changelog.d/14332.misc diff --git a/changelog.d/14332.misc b/changelog.d/14332.misc new file mode 100644 index 0000000000..8d883fe38f --- /dev/null +++ b/changelog.d/14332.misc @@ -0,0 +1 @@ +Bump twine from 3.8.0 to 4.0.1. diff --git a/poetry.lock b/poetry.lock index 01f6d4da4e..85c5335123 100644 --- a/poetry.lock +++ b/poetry.lock @@ -976,11 +976,11 @@ python-versions = ">=3.6" [[package]] name = "readme-renderer" -version = "33.0" +version = "37.2" description = "readme_renderer is a library for rendering \"readme\" descriptions for Warehouse" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] bleach = ">=2.1.0" @@ -1030,6 +1030,22 @@ python-versions = ">=3.7" [package.extras] idna2008 = ["idna"] +[[package]] +name = "rich" +version = "12.6.0" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +category = "dev" +optional = false +python-versions = ">=3.6.3,<4.0.0" + +[package.dependencies] +commonmark = ">=0.9.0,<0.10.0" +pygments = ">=2.6.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] + [[package]] name = "secretstorage" version = "3.3.1" @@ -1251,22 +1267,6 @@ tomli = {version = "*", markers = "python_version >= \"3.6\""} [package.extras] dev = ["packaging"] -[[package]] -name = "tqdm" -version = "4.63.0" -description = "Fast, Extensible Progress Meter" -category = "dev" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -dev = ["py-make (>=0.1.0)", "twine", "wheel"] -notebook = ["ipywidgets (>=6)"] -telegram = ["requests"] - [[package]] name = "treq" version = "22.2.0" @@ -1288,22 +1288,21 @@ docs = ["sphinx (>=1.4.8)"] [[package]] name = "twine" -version = "3.8.0" +version = "4.0.1" description = "Collection of utilities for publishing packages on PyPI" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] -colorama = ">=0.4.3" importlib-metadata = ">=3.6" keyring = ">=15.1" pkginfo = ">=1.8.1" -readme-renderer = ">=21.0" +readme-renderer = ">=35.0" requests = ">=2.20" requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0" rfc3986 = ">=1.4.0" -tqdm = ">=4.14" +rich = ">=12.0.0" urllib3 = ">=1.26.0" [[package]] @@ -2501,8 +2500,8 @@ pyyaml = [ {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, ] readme-renderer = [ - {file = "readme_renderer-33.0-py3-none-any.whl", hash = "sha256:f02cee0c4de9636b5a62b6be50c9742427ba1b956aad1d938bfb087d0d72ccdf"}, - {file = "readme_renderer-33.0.tar.gz", hash = "sha256:e3b53bc84bd6af054e4cc1fe3567dc1ae19f554134221043a3f8c674e22209db"}, + {file = "readme_renderer-37.2-py3-none-any.whl", hash = "sha256:d3f06a69e8c40fca9ab3174eca48f96d9771eddb43517b17d96583418427b106"}, + {file = "readme_renderer-37.2.tar.gz", hash = "sha256:e8ad25293c98f781dbc2c5a36a309929390009f902f99e1798c761aaf04a7923"}, ] requests = [ {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"}, @@ -2516,6 +2515,10 @@ rfc3986 = [ {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"}, {file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"}, ] +rich = [ + {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"}, + {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"}, +] secretstorage = [ {file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"}, {file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"}, @@ -2680,17 +2683,13 @@ towncrier = [ {file = "towncrier-21.9.0-py2.py3-none-any.whl", hash = "sha256:fc5a88a2a54988e3a8ed2b60d553599da8330f65722cc607c839614ed87e0f92"}, {file = "towncrier-21.9.0.tar.gz", hash = "sha256:9cb6f45c16e1a1eec9d0e7651165e7be60cd0ab81d13a5c96ca97a498ae87f48"}, ] -tqdm = [ - {file = "tqdm-4.63.0-py2.py3-none-any.whl", hash = "sha256:e643e071046f17139dea55b880dc9b33822ce21613b4a4f5ea57f202833dbc29"}, - {file = "tqdm-4.63.0.tar.gz", hash = "sha256:1d9835ede8e394bb8c9dcbffbca02d717217113adc679236873eeaac5bc0b3cd"}, -] treq = [ {file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"}, {file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"}, ] twine = [ - {file = "twine-3.8.0-py3-none-any.whl", hash = "sha256:d0550fca9dc19f3d5e8eadfce0c227294df0a2a951251a4385797c8a6198b7c8"}, - {file = "twine-3.8.0.tar.gz", hash = "sha256:8efa52658e0ae770686a13b675569328f1fba9837e5de1867bfe5f46a9aefe19"}, + {file = "twine-4.0.1-py3-none-any.whl", hash = "sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e"}, + {file = "twine-4.0.1.tar.gz", hash = "sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0"}, ] twisted = [ {file = "Twisted-22.8.0-py3-none-any.whl", hash = "sha256:8d4718d1e48dcc28933f8beb48dc71cfe77a125e37ad1eb7a3d0acc49baf6c99"}, -- cgit 1.5.1 From b2890369cd3361e0721f279b3ef046e344441106 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Oct 2022 13:34:00 +0000 Subject: Bump psycopg2 from 2.9.4 to 2.9.5 (#14331) * Bump psycopg2 from 2.9.4 to 2.9.5 Bumps [psycopg2](https://github.com/psycopg/psycopg2) from 2.9.4 to 2.9.5. - [Release notes](https://github.com/psycopg/psycopg2/releases) - [Changelog](https://github.com/psycopg/psycopg2/blob/master/NEWS) - [Commits](https://github.com/psycopg/psycopg2/commits) --- updated-dependencies: - dependency-name: psycopg2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14331.misc | 1 + poetry.lock | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 12 deletions(-) create mode 100644 changelog.d/14331.misc diff --git a/changelog.d/14331.misc b/changelog.d/14331.misc new file mode 100644 index 0000000000..be1c89245c --- /dev/null +++ b/changelog.d/14331.misc @@ -0,0 +1 @@ +Bump psycopg2 from 2.9.4 to 2.9.5. diff --git a/poetry.lock b/poetry.lock index 85c5335123..29d262ebff 100644 --- a/poetry.lock +++ b/poetry.lock @@ -721,7 +721,7 @@ twisted = ["twisted"] [[package]] name = "psycopg2" -version = "2.9.4" +version = "2.9.5" description = "psycopg2 - Python-PostgreSQL Database Adapter" category = "main" optional = true @@ -2302,17 +2302,17 @@ prometheus-client = [ {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"}, ] psycopg2 = [ - {file = "psycopg2-2.9.4-cp310-cp310-win32.whl", hash = "sha256:8de6a9fc5f42fa52f559e65120dcd7502394692490c98fed1221acf0819d7797"}, - {file = "psycopg2-2.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:1da77c061bdaab450581458932ae5e469cc6e36e0d62f988376e9f513f11cb5c"}, - {file = "psycopg2-2.9.4-cp36-cp36m-win32.whl", hash = "sha256:a11946bad3557ca254f17357d5a4ed63bdca45163e7a7d2bfb8e695df069cc3a"}, - {file = "psycopg2-2.9.4-cp36-cp36m-win_amd64.whl", hash = "sha256:46361c054df612c3cc813fdb343733d56543fb93565cff0f8ace422e4da06acb"}, - {file = "psycopg2-2.9.4-cp37-cp37m-win32.whl", hash = "sha256:aafa96f2da0071d6dd0cbb7633406d99f414b40ab0f918c9d9af7df928a1accb"}, - {file = "psycopg2-2.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:aa184d551a767ad25df3b8d22a0a62ef2962e0e374c04f6cbd1204947f540d61"}, - {file = "psycopg2-2.9.4-cp38-cp38-win32.whl", hash = "sha256:839f9ea8f6098e39966d97fcb8d08548fbc57c523a1e27a1f0609addf40f777c"}, - {file = "psycopg2-2.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:c7fa041b4acb913f6968fce10169105af5200f296028251d817ab37847c30184"}, - {file = "psycopg2-2.9.4-cp39-cp39-win32.whl", hash = "sha256:07b90a24d5056687781ddaef0ea172fd951f2f7293f6ffdd03d4f5077801f426"}, - {file = "psycopg2-2.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:849bd868ae3369932127f0771c08d1109b254f08d48dc42493c3d1b87cb2d308"}, - {file = "psycopg2-2.9.4.tar.gz", hash = "sha256:d529926254e093a1b669f692a3aa50069bc71faf5b0ecd91686a78f62767d52f"}, + {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"}, + {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"}, + {file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"}, + {file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"}, + {file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"}, + {file = "psycopg2-2.9.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1e5a38aa85bd660c53947bd28aeaafb6a97d70423606f1ccb044a03a1203fe4a"}, + {file = "psycopg2-2.9.5-cp38-cp38-win32.whl", hash = "sha256:f5b6320dbc3cf6cfb9f25308286f9f7ab464e65cfb105b64cc9c52831748ced2"}, + {file = "psycopg2-2.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:1a5c7d7d577e0eabfcf15eb87d1e19314c8c4f0e722a301f98e0e3a65e238b4e"}, + {file = "psycopg2-2.9.5-cp39-cp39-win32.whl", hash = "sha256:322fd5fca0b1113677089d4ebd5222c964b1760e361f151cbb2706c4912112c5"}, + {file = "psycopg2-2.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:190d51e8c1b25a47484e52a79638a8182451d6f6dff99f26ad9bd81e5359a0fa"}, + {file = "psycopg2-2.9.5.tar.gz", hash = "sha256:a5246d2e683a972e2187a8714b5c2cf8156c064629f9a9b1a873c1730d9e245a"}, ] psycopg2cffi = [ {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"}, -- cgit 1.5.1 From 15bdb0da522ba902e6a1c55f7f6775faeb47176a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Oct 2022 13:43:19 +0000 Subject: Bump sentry-sdk from 1.5.11 to 1.10.1 (#14330) * Bump sentry-sdk from 1.5.11 to 1.10.1 Bumps [sentry-sdk](https://github.com/getsentry/sentry-python) from 1.5.11 to 1.10.1. - [Release notes](https://github.com/getsentry/sentry-python/releases) - [Changelog](https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-python/compare/1.5.11...1.10.1) --- updated-dependencies: - dependency-name: sentry-sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14330.misc | 1 + poetry.lock | 22 ++++++++++++---------- 2 files changed, 13 insertions(+), 10 deletions(-) create mode 100644 changelog.d/14330.misc diff --git a/changelog.d/14330.misc b/changelog.d/14330.misc new file mode 100644 index 0000000000..e980cfc38f --- /dev/null +++ b/changelog.d/14330.misc @@ -0,0 +1 @@ +Bump sentry-sdk from 1.5.11 to 1.10.1. diff --git a/poetry.lock b/poetry.lock index 29d262ebff..c01cfcfa58 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1072,7 +1072,7 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.5.11" +version = "1.10.1" description = "Python client for Sentry (https://sentry.io)" category = "main" optional = true @@ -1080,7 +1080,7 @@ python-versions = "*" [package.dependencies] certifi = "*" -urllib3 = ">=1.10.0" +urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} [package.extras] aiohttp = ["aiohttp (>=3.5)"] @@ -1090,6 +1090,7 @@ celery = ["celery (>=3)"] chalice = ["chalice (>=1.16.0)"] django = ["django (>=1.8)"] falcon = ["falcon (>=1.4)"] +fastapi = ["fastapi (>=0.79.0)"] flask = ["blinker (>=1.1)", "flask (>=0.11)"] httpx = ["httpx (>=0.16.0)"] pure-eval = ["asttokens", "executing", "pure-eval"] @@ -1098,6 +1099,7 @@ quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] rq = ["rq (>=0.6)"] sanic = ["sanic (>=0.8)"] sqlalchemy = ["sqlalchemy (>=1.2)"] +starlette = ["starlette (>=0.19.1)"] tornado = ["tornado (>=5)"] [[package]] @@ -1511,15 +1513,15 @@ python-versions = ">=3.6,<4.0" [[package]] name = "urllib3" -version = "1.26.8" +version = "1.26.12" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4" [package.extras] -brotli = ["brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)"] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] @@ -2528,8 +2530,8 @@ semantic-version = [ {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, ] sentry-sdk = [ - {file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"}, - {file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"}, + {file = "sentry-sdk-1.10.1.tar.gz", hash = "sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691"}, + {file = "sentry_sdk-1.10.1-py2.py3-none-any.whl", hash = "sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad"}, ] service-identity = [ {file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"}, @@ -2804,8 +2806,8 @@ unpaddedbase64 = [ {file = "unpaddedbase64-2.1.0.tar.gz", hash = "sha256:7273c60c089de39d90f5d6d4a7883a79e319dc9d9b1c8924a7fab96178a5f005"}, ] urllib3 = [ - {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"}, - {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"}, + {file = "urllib3-1.26.12-py2.py3-none-any.whl", hash = "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"}, + {file = "urllib3-1.26.12.tar.gz", hash = "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e"}, ] webencodings = [ {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, -- cgit 1.5.1 From cc3a52b33df72bb4230367536b924a6d1f510d36 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 31 Oct 2022 18:07:30 +0100 Subject: Support OIDC backchannel logouts (#11414) If configured an OIDC IdP can log a user's session out of Synapse when they log out of the identity provider. The IdP sends a request directly to Synapse (and must be configured with an endpoint) when a user logs out. --- changelog.d/11414.feature | 1 + docs/openid.md | 14 + docs/usage/configuration/config_documentation.md | 9 + synapse/config/oidc.py | 12 + synapse/handlers/oidc.py | 381 ++++++++++++++++++-- synapse/handlers/sso.py | 71 ++++ synapse/rest/synapse/client/oidc/__init__.py | 4 + .../client/oidc/backchannel_logout_resource.py | 35 ++ synapse/storage/databases/main/registration.py | 21 ++ tests/rest/client/test_auth.py | 390 +++++++++++++++++++-- tests/rest/client/utils.py | 55 ++- tests/server.py | 6 + tests/test_utils/oidc.py | 27 +- 13 files changed, 960 insertions(+), 66 deletions(-) create mode 100644 changelog.d/11414.feature create mode 100644 synapse/rest/synapse/client/oidc/backchannel_logout_resource.py diff --git a/changelog.d/11414.feature b/changelog.d/11414.feature new file mode 100644 index 0000000000..fc035e50a7 --- /dev/null +++ b/changelog.d/11414.feature @@ -0,0 +1 @@ +Support back-channel logouts from OpenID Connect providers. diff --git a/docs/openid.md b/docs/openid.md index 87ebea4c29..37c5eb244d 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -49,6 +49,13 @@ setting in your configuration file. See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as the text below for example configurations for specific providers. +## OIDC Back-Channel Logout + +Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications. + +This lets the OpenID Connect Provider notify Synapse when a user logs out, so that Synapse can end that user session. +This feature can be enabled by setting the `backchannel_logout_enabled` property to `true` in the provider configuration, and setting the following URL as destination for Back-Channel Logout notifications in your OpenID Connect Provider: `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` + ## Sample configs Here are a few configs for providers that should work with Synapse. @@ -123,6 +130,9 @@ oidc_providers: [Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat. +Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak. +This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak. + Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm. 1. Click `Clients` in the sidebar and click `Create` @@ -144,6 +154,8 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to | Client Protocol | `openid-connect` | | Access Type | `confidential` | | Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` | +| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` | +| Backchannel Logout Session Required (optional) | `On` | 5. Click `Save` 6. On the Credentials tab, update the fields: @@ -167,7 +179,9 @@ oidc_providers: config: localpart_template: "{{ user.preferred_username }}" display_name_template: "{{ user.name }}" + backchannel_logout_enabled: true # Optional ``` + ### Auth0 [Auth0][auth0] is a hosted SaaS IdP solution. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 97fb505a5f..44358faf59 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3021,6 +3021,15 @@ Options for each entry include: which is set to the claims returned by the UserInfo Endpoint and/or in the ID Token. +* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications. + Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`. + Defaults to `false`. + +* `backchannel_logout_ignore_sub`: by default, the OIDC Back-Channel Logout feature checks that the + `sub` claim matches the subject claim received during login. This check can be disabled by setting + this to `true`. Defaults to `false`. + + You might want to disable this if the `subject_claim` returned by the mapping provider is not `sub`. It is possible to configure Synapse to only allow logins if certain attributes match particular values in the OIDC userinfo. The requirements can be listed under diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index 5418a332da..0bd83f4010 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -123,6 +123,8 @@ OIDC_PROVIDER_CONFIG_SCHEMA = { "userinfo_endpoint": {"type": "string"}, "jwks_uri": {"type": "string"}, "skip_verification": {"type": "boolean"}, + "backchannel_logout_enabled": {"type": "boolean"}, + "backchannel_logout_ignore_sub": {"type": "boolean"}, "user_profile_method": { "type": "string", "enum": ["auto", "userinfo_endpoint"], @@ -292,6 +294,10 @@ def _parse_oidc_config_dict( token_endpoint=oidc_config.get("token_endpoint"), userinfo_endpoint=oidc_config.get("userinfo_endpoint"), jwks_uri=oidc_config.get("jwks_uri"), + backchannel_logout_enabled=oidc_config.get("backchannel_logout_enabled", False), + backchannel_logout_ignore_sub=oidc_config.get( + "backchannel_logout_ignore_sub", False + ), skip_verification=oidc_config.get("skip_verification", False), user_profile_method=oidc_config.get("user_profile_method", "auto"), allow_existing_users=oidc_config.get("allow_existing_users", False), @@ -368,6 +374,12 @@ class OidcProviderConfig: # "openid" scope is used. jwks_uri: Optional[str] + # Whether Synapse should react to backchannel logouts + backchannel_logout_enabled: bool + + # Whether Synapse should ignore the `sub` claim in backchannel logouts or not. + backchannel_logout_ignore_sub: bool + # Whether to skip metadata verification skip_verification: bool diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 9759daf043..867973dcca 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -12,14 +12,28 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import binascii import inspect +import json import logging -from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + List, + Optional, + Type, + TypeVar, + Union, +) from urllib.parse import urlencode, urlparse import attr +import unpaddedbase64 from authlib.common.security import generate_token -from authlib.jose import JsonWebToken, jwt +from authlib.jose import JsonWebToken, JWTClaims +from authlib.jose.errors import InvalidClaimError, JoseError, MissingClaimError from authlib.oauth2.auth import ClientAuth from authlib.oauth2.rfc6749.parameters import prepare_grant_uri from authlib.oidc.core import CodeIDToken, UserInfo @@ -35,9 +49,12 @@ from typing_extensions import TypedDict from twisted.web.client import readBody from twisted.web.http_headers import Headers +from synapse.api.errors import SynapseError from synapse.config import ConfigError from synapse.config.oidc import OidcProviderClientSecretJwtKey, OidcProviderConfig from synapse.handlers.sso import MappingException, UserAttributes +from synapse.http.server import finish_request +from synapse.http.servlet import parse_string from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart @@ -88,6 +105,8 @@ class Token(TypedDict): #: there is no real point of doing this in our case. JWK = Dict[str, str] +C = TypeVar("C") + #: A JWK Set, as per RFC7517 sec 5. class JWKS(TypedDict): @@ -247,6 +266,80 @@ class OidcHandler: await oidc_provider.handle_oidc_callback(request, session_data, code) + async def handle_backchannel_logout(self, request: SynapseRequest) -> None: + """Handle an incoming request to /_synapse/client/oidc/backchannel_logout + + This extracts the logout_token from the request and tries to figure out + which OpenID Provider it is comming from. This works by matching the iss claim + with the issuer and the aud claim with the client_id. + + Since at this point we don't know who signed the JWT, we can't just + decode it using authlib since it will always verifies the signature. We + have to decode it manually without validating the signature. The actual JWT + verification is done in the `OidcProvider.handler_backchannel_logout` method, + once we figured out which provider sent the request. + + Args: + request: the incoming request from the browser. + """ + logout_token = parse_string(request, "logout_token") + if logout_token is None: + raise SynapseError(400, "Missing logout_token in request") + + # A JWT looks like this: + # header.payload.signature + # where all parts are encoded with urlsafe base64. + # The aud and iss claims we care about are in the payload part, which + # is a JSON object. + try: + # By destructuring the list after splitting, we ensure that we have + # exactly 3 segments + _, payload, _ = logout_token.split(".") + except ValueError: + raise SynapseError(400, "Invalid logout_token in request") + + try: + payload_bytes = unpaddedbase64.decode_base64(payload) + claims = json_decoder.decode(payload_bytes.decode("utf-8")) + except (json.JSONDecodeError, binascii.Error, UnicodeError): + raise SynapseError(400, "Invalid logout_token payload in request") + + try: + # Let's extract the iss and aud claims + iss = claims["iss"] + aud = claims["aud"] + # The aud claim can be either a string or a list of string. Here we + # normalize it as a list of strings. + if isinstance(aud, str): + aud = [aud] + + # Check that we have the right types for the aud and the iss claims + if not isinstance(iss, str) or not isinstance(aud, list): + raise TypeError() + for a in aud: + if not isinstance(a, str): + raise TypeError() + + # At this point we properly checked both claims types + issuer: str = iss + audience: List[str] = aud + except (TypeError, KeyError): + raise SynapseError(400, "Invalid issuer/audience in logout_token") + + # Now that we know the audience and the issuer, we can figure out from + # what provider it is coming from + oidc_provider: Optional[OidcProvider] = None + for provider in self._providers.values(): + if provider.issuer == issuer and provider.client_id in audience: + oidc_provider = provider + break + + if oidc_provider is None: + raise SynapseError(400, "Could not find the OP that issued this event") + + # Ask the provider to handle the logout request. + await oidc_provider.handle_backchannel_logout(request, logout_token) + class OidcError(Exception): """Used to catch errors when calling the token_endpoint""" @@ -342,6 +435,7 @@ class OidcProvider: self.idp_brand = provider.idp_brand self._sso_handler = hs.get_sso_handler() + self._device_handler = hs.get_device_handler() self._sso_handler.register_identity_provider(self) @@ -400,6 +494,41 @@ class OidcProvider: # If we're not using userinfo, we need a valid jwks to validate the ID token m.validate_jwks_uri() + if self._config.backchannel_logout_enabled: + if not m.get("backchannel_logout_supported", False): + logger.warning( + "OIDC Back-Channel Logout is enabled for issuer %r" + "but it does not advertise support for it", + self.issuer, + ) + + elif not m.get("backchannel_logout_session_supported", False): + logger.warning( + "OIDC Back-Channel Logout is enabled and supported " + "by issuer %r but it might not send a session ID with " + "logout tokens, which is required for the logouts to work", + self.issuer, + ) + + if not self._config.backchannel_logout_ignore_sub: + # If OIDC backchannel logouts are enabled, the provider mapping provider + # should use the `sub` claim. We verify that by mapping a dumb user and + # see if we get back the sub claim + user = UserInfo({"sub": "thisisasubject"}) + try: + subject = self._user_mapping_provider.get_remote_user_id(user) + if subject != user["sub"]: + raise ValueError("Unexpected subject") + except Exception: + logger.warning( + f"OIDC Back-Channel Logout is enabled for issuer {self.issuer!r} " + "but it looks like the configured `user_mapping_provider` " + "does not use the `sub` claim as subject. If it is the case, " + "and you want Synapse to ignore the `sub` claim in OIDC " + "Back-Channel Logouts, set `backchannel_logout_ignore_sub` " + "to `true` in the issuer config." + ) + @property def _uses_userinfo(self) -> bool: """Returns True if the ``userinfo_endpoint`` should be used. @@ -415,6 +544,16 @@ class OidcProvider: or self._user_profile_method == "userinfo_endpoint" ) + @property + def issuer(self) -> str: + """The issuer identifying this provider.""" + return self._config.issuer + + @property + def client_id(self) -> str: + """The client_id used when interacting with this provider.""" + return self._config.client_id + async def load_metadata(self, force: bool = False) -> OpenIDProviderMetadata: """Return the provider metadata. @@ -662,6 +801,59 @@ class OidcProvider: return UserInfo(resp) + async def _verify_jwt( + self, + alg_values: List[str], + token: str, + claims_cls: Type[C], + claims_options: Optional[dict] = None, + claims_params: Optional[dict] = None, + ) -> C: + """Decode and validate a JWT, re-fetching the JWKS as needed. + + Args: + alg_values: list of `alg` values allowed when verifying the JWT. + token: the JWT. + claims_cls: the JWTClaims class to use to validate the claims. + claims_options: dict of options passed to the `claims_cls` constructor. + claims_params: dict of params passed to the `claims_cls` constructor. + + Returns: + The decoded claims in the JWT. + """ + jwt = JsonWebToken(alg_values) + + logger.debug("Attempting to decode JWT (%s) %r", claims_cls.__name__, token) + + # Try to decode the keys in cache first, then retry by forcing the keys + # to be reloaded + jwk_set = await self.load_jwks() + try: + claims = jwt.decode( + token, + key=jwk_set, + claims_cls=claims_cls, + claims_options=claims_options, + claims_params=claims_params, + ) + except ValueError: + logger.info("Reloading JWKS after decode error") + jwk_set = await self.load_jwks(force=True) # try reloading the jwks + claims = jwt.decode( + token, + key=jwk_set, + claims_cls=claims_cls, + claims_options=claims_options, + claims_params=claims_params, + ) + + logger.debug("Decoded JWT (%s) %r; validating", claims_cls.__name__, claims) + + claims.validate( + now=self._clock.time(), leeway=120 + ) # allows 2 min of clock skew + return claims + async def _parse_id_token(self, token: Token, nonce: str) -> CodeIDToken: """Return an instance of UserInfo from token's ``id_token``. @@ -675,13 +867,13 @@ class OidcProvider: The decoded claims in the ID token. """ id_token = token.get("id_token") - logger.debug("Attempting to decode JWT id_token %r", id_token) # That has been theoritically been checked by the caller, so even though # assertion are not enabled in production, it is mainly here to appease mypy assert id_token is not None metadata = await self.load_metadata() + claims_params = { "nonce": nonce, "client_id": self._client_auth.client_id, @@ -691,38 +883,17 @@ class OidcProvider: # in the `id_token` that we can check against. claims_params["access_token"] = token["access_token"] - alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"]) - jwt = JsonWebToken(alg_values) - - claim_options = {"iss": {"values": [metadata["issuer"]]}} + claims_options = {"iss": {"values": [metadata["issuer"]]}} - # Try to decode the keys in cache first, then retry by forcing the keys - # to be reloaded - jwk_set = await self.load_jwks() - try: - claims = jwt.decode( - id_token, - key=jwk_set, - claims_cls=CodeIDToken, - claims_options=claim_options, - claims_params=claims_params, - ) - except ValueError: - logger.info("Reloading JWKS after decode error") - jwk_set = await self.load_jwks(force=True) # try reloading the jwks - claims = jwt.decode( - id_token, - key=jwk_set, - claims_cls=CodeIDToken, - claims_options=claim_options, - claims_params=claims_params, - ) - - logger.debug("Decoded id_token JWT %r; validating", claims) + alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"]) - claims.validate( - now=self._clock.time(), leeway=120 - ) # allows 2 min of clock skew + claims = await self._verify_jwt( + alg_values=alg_values, + token=id_token, + claims_cls=CodeIDToken, + claims_options=claims_options, + claims_params=claims_params, + ) return claims @@ -1043,6 +1214,146 @@ class OidcProvider: # to be strings. return str(remote_user_id) + async def handle_backchannel_logout( + self, request: SynapseRequest, logout_token: str + ) -> None: + """Handle an incoming request to /_synapse/client/oidc/backchannel_logout + + The OIDC Provider posts a logout token to this endpoint when a user + session ends. That token is a JWT signed with the same keys as + ID tokens. The OpenID Connect Back-Channel Logout draft explains how to + validate the JWT and figure out what session to end. + + Args: + request: The request to respond to + logout_token: The logout token (a JWT) extracted from the request body + """ + # Back-Channel Logout can be disabled in the config, hence this check. + # This is not that important for now since Synapse is registered + # manually to the OP, so not specifying the backchannel-logout URI is + # as effective than disabling it here. It might make more sense if we + # support dynamic registration in Synapse at some point. + if not self._config.backchannel_logout_enabled: + logger.warning( + f"Received an OIDC Back-Channel Logout request from issuer {self.issuer!r} but it is disabled in config" + ) + + # TODO: this responds with a 400 status code, which is what the OIDC + # Back-Channel Logout spec expects, but spec also suggests answering with + # a JSON object, with the `error` and `error_description` fields set, which + # we are not doing here. + # See https://openid.net/specs/openid-connect-backchannel-1_0.html#BCResponse + raise SynapseError( + 400, "OpenID Connect Back-Channel Logout is disabled for this provider" + ) + + metadata = await self.load_metadata() + + # As per OIDC Back-Channel Logout 1.0 sec. 2.4: + # A Logout Token MUST be signed and MAY also be encrypted. The same + # keys are used to sign and encrypt Logout Tokens as are used for ID + # Tokens. If the Logout Token is encrypted, it SHOULD replicate the + # iss (issuer) claim in the JWT Header Parameters, as specified in + # Section 5.3 of [JWT]. + alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"]) + + # As per sec. 2.6: + # 3. Validate the iss, aud, and iat Claims in the same way they are + # validated in ID Tokens. + # Which means the audience should contain Synapse's client_id and the + # issuer should be the IdP issuer + claims_options = { + "iss": {"values": [metadata["issuer"]]}, + "aud": {"values": [self.client_id]}, + } + + try: + claims = await self._verify_jwt( + alg_values=alg_values, + token=logout_token, + claims_cls=LogoutToken, + claims_options=claims_options, + ) + except JoseError: + logger.exception("Invalid logout_token") + raise SynapseError(400, "Invalid logout_token") + + # As per sec. 2.6: + # 4. Verify that the Logout Token contains a sub Claim, a sid Claim, + # or both. + # 5. Verify that the Logout Token contains an events Claim whose + # value is JSON object containing the member name + # http://schemas.openid.net/event/backchannel-logout. + # 6. Verify that the Logout Token does not contain a nonce Claim. + # This is all verified by the LogoutToken claims class, so at this + # point the `sid` claim exists and is a string. + sid: str = claims.get("sid") + + # If the `sub` claim was included in the logout token, we check that it matches + # that it matches the right user. We can have cases where the `sub` claim is not + # the ID saved in database, so we let admins disable this check in config. + sub: Optional[str] = claims.get("sub") + expected_user_id: Optional[str] = None + if sub is not None and not self._config.backchannel_logout_ignore_sub: + expected_user_id = await self._store.get_user_by_external_id( + self.idp_id, sub + ) + + # Invalidate any running user-mapping sessions, in-flight login tokens and + # active devices + await self._sso_handler.revoke_sessions_for_provider_session_id( + auth_provider_id=self.idp_id, + auth_provider_session_id=sid, + expected_user_id=expected_user_id, + ) + + request.setResponseCode(200) + request.setHeader(b"Cache-Control", b"no-cache, no-store") + request.setHeader(b"Pragma", b"no-cache") + finish_request(request) + + +class LogoutToken(JWTClaims): + """ + Holds and verify claims of a logout token, as per + https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken + """ + + REGISTERED_CLAIMS = ["iss", "sub", "aud", "iat", "jti", "events", "sid"] + + def validate(self, now: Optional[int] = None, leeway: int = 0) -> None: + """Validate everything in claims payload.""" + super().validate(now, leeway) + self.validate_sid() + self.validate_events() + self.validate_nonce() + + def validate_sid(self) -> None: + """Ensure the sid claim is present""" + sid = self.get("sid") + if not sid: + raise MissingClaimError("sid") + + if not isinstance(sid, str): + raise InvalidClaimError("sid") + + def validate_nonce(self) -> None: + """Ensure the nonce claim is absent""" + if "nonce" in self: + raise InvalidClaimError("nonce") + + def validate_events(self) -> None: + """Ensure the events claim is present and with the right value""" + events = self.get("events") + if not events: + raise MissingClaimError("events") + + if not isinstance(events, dict): + raise InvalidClaimError("events") + + if "http://schemas.openid.net/event/backchannel-logout" not in events: + raise InvalidClaimError("events") + # number of seconds a newly-generated client secret should be valid for CLIENT_SECRET_VALIDITY_SECONDS = 3600 @@ -1112,6 +1423,7 @@ class JwtClientSecret: logger.info( "Generating new JWT for %s: %s %s", self._oauth_issuer, header, payload ) + jwt = JsonWebToken(header["alg"]) self._cached_secret = jwt.encode(header, payload, self._key.key) self._cached_secret_replacement_time = ( expires_at - CLIENT_SECRET_MIN_VALIDITY_SECONDS @@ -1126,9 +1438,6 @@ class UserAttributeDict(TypedDict): emails: List[str] -C = TypeVar("C") - - class OidcMappingProvider(Generic[C]): """A mapping provider maps a UserInfo object to user attributes. diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 5943f08e91..749d7e93b0 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -191,6 +191,7 @@ class SsoHandler: self._server_name = hs.hostname self._registration_handler = hs.get_registration_handler() self._auth_handler = hs.get_auth_handler() + self._device_handler = hs.get_device_handler() self._error_template = hs.config.sso.sso_error_template self._bad_user_template = hs.config.sso.sso_auth_bad_user_template self._profile_handler = hs.get_profile_handler() @@ -1026,6 +1027,76 @@ class SsoHandler: return True + async def revoke_sessions_for_provider_session_id( + self, + auth_provider_id: str, + auth_provider_session_id: str, + expected_user_id: Optional[str] = None, + ) -> None: + """Revoke any devices and in-flight logins tied to a provider session. + + Args: + auth_provider_id: A unique identifier for this SSO provider, e.g. + "oidc" or "saml". + auth_provider_session_id: The session ID from the provider to logout + expected_user_id: The user we're expecting to logout. If set, it will ignore + sessions belonging to other users and log an error. + """ + # Invalidate any running user-mapping sessions + to_delete = [] + for session_id, session in self._username_mapping_sessions.items(): + if ( + session.auth_provider_id == auth_provider_id + and session.auth_provider_session_id == auth_provider_session_id + ): + to_delete.append(session_id) + + for session_id in to_delete: + logger.info("Revoking mapping session %s", session_id) + del self._username_mapping_sessions[session_id] + + # Invalidate any in-flight login tokens + await self._store.invalidate_login_tokens_by_session_id( + auth_provider_id=auth_provider_id, + auth_provider_session_id=auth_provider_session_id, + ) + + # Fetch any device(s) in the store associated with the session ID. + devices = await self._store.get_devices_by_auth_provider_session_id( + auth_provider_id=auth_provider_id, + auth_provider_session_id=auth_provider_session_id, + ) + + # We have no guarantee that all the devices of that session are for the same + # `user_id`. Hence, we have to iterate over the list of devices and log them out + # one by one. + for device in devices: + user_id = device["user_id"] + device_id = device["device_id"] + + # If the user_id associated with that device/session is not the one we got + # out of the `sub` claim, skip that device and show log an error. + if expected_user_id is not None and user_id != expected_user_id: + logger.error( + "Received a logout notification from SSO provider " + f"{auth_provider_id!r} for the user {expected_user_id!r}, but with " + f"a session ID ({auth_provider_session_id!r}) which belongs to " + f"{user_id!r}. This may happen when the SSO provider user mapper " + "uses something else than the standard attribute as mapping ID. " + "For OIDC providers, set `backchannel_logout_ignore_sub` to `true` " + "in the provider config if that is the case." + ) + continue + + logger.info( + "Logging out %r (device %r) via SSO (%r) logout notification (session %r).", + user_id, + device_id, + auth_provider_id, + auth_provider_session_id, + ) + await self._device_handler.delete_devices(user_id, [device_id]) + def get_username_mapping_session_cookie_from_request(request: IRequest) -> str: """Extract the session ID from the cookie diff --git a/synapse/rest/synapse/client/oidc/__init__.py b/synapse/rest/synapse/client/oidc/__init__.py index 81fec39659..e4b28ce3df 100644 --- a/synapse/rest/synapse/client/oidc/__init__.py +++ b/synapse/rest/synapse/client/oidc/__init__.py @@ -17,6 +17,9 @@ from typing import TYPE_CHECKING from twisted.web.resource import Resource +from synapse.rest.synapse.client.oidc.backchannel_logout_resource import ( + OIDCBackchannelLogoutResource, +) from synapse.rest.synapse.client.oidc.callback_resource import OIDCCallbackResource if TYPE_CHECKING: @@ -29,6 +32,7 @@ class OIDCResource(Resource): def __init__(self, hs: "HomeServer"): Resource.__init__(self) self.putChild(b"callback", OIDCCallbackResource(hs)) + self.putChild(b"backchannel_logout", OIDCBackchannelLogoutResource(hs)) __all__ = ["OIDCResource"] diff --git a/synapse/rest/synapse/client/oidc/backchannel_logout_resource.py b/synapse/rest/synapse/client/oidc/backchannel_logout_resource.py new file mode 100644 index 0000000000..e07e76855a --- /dev/null +++ b/synapse/rest/synapse/client/oidc/backchannel_logout_resource.py @@ -0,0 +1,35 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import TYPE_CHECKING + +from synapse.http.server import DirectServeJsonResource +from synapse.http.site import SynapseRequest + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class OIDCBackchannelLogoutResource(DirectServeJsonResource): + isLeaf = 1 + + def __init__(self, hs: "HomeServer"): + super().__init__() + self._oidc_handler = hs.get_oidc_handler() + + async def _async_render_POST(self, request: SynapseRequest) -> None: + await self._oidc_handler.handle_backchannel_logout(request) diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 0255295317..5167089e03 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1920,6 +1920,27 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): self._clock.time_msec(), ) + async def invalidate_login_tokens_by_session_id( + self, auth_provider_id: str, auth_provider_session_id: str + ) -> None: + """Invalidate login tokens with the given IdP session ID. + + Args: + auth_provider_id: The SSO Identity Provider that the user authenticated with + to get this token + auth_provider_session_id: The session ID advertised by the SSO Identity + Provider + """ + await self.db_pool.simple_update( + table="login_tokens", + keyvalues={ + "auth_provider_id": auth_provider_id, + "auth_provider_session_id": auth_provider_session_id, + }, + updatevalues={"used_ts": self._clock.time_msec()}, + desc="invalidate_login_tokens_by_session_id", + ) + @cached() async def is_guest(self, user_id: str) -> bool: res = await self.db_pool.simple_select_one_onecol( diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py index ebf653d018..847294dc8e 100644 --- a/tests/rest/client/test_auth.py +++ b/tests/rest/client/test_auth.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import re from http import HTTPStatus from typing import Any, Dict, List, Optional, Tuple, Union @@ -21,7 +22,7 @@ from twisted.web.resource import Resource import synapse.rest.admin from synapse.api.constants import ApprovalNoticeMedium, LoginType -from synapse.api.errors import Codes +from synapse.api.errors import Codes, SynapseError from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker from synapse.rest.client import account, auth, devices, login, logout, register from synapse.rest.synapse.client import build_synapse_client_resource_tree @@ -32,8 +33,8 @@ from synapse.util import Clock from tests import unittest from tests.handlers.test_oidc import HAS_OIDC -from tests.rest.client.utils import TEST_OIDC_CONFIG -from tests.server import FakeChannel +from tests.rest.client.utils import TEST_OIDC_CONFIG, TEST_OIDC_ISSUER +from tests.server import FakeChannel, make_request from tests.unittest import override_config, skip_unless @@ -638,19 +639,6 @@ class RefreshAuthTests(unittest.HomeserverTestCase): {"refresh_token": refresh_token}, ) - def is_access_token_valid(self, access_token: str) -> bool: - """ - Checks whether an access token is valid, returning whether it is or not. - """ - code = self.make_request( - "GET", "/_matrix/client/v3/account/whoami", access_token=access_token - ).code - - # Either 200 or 401 is what we get back; anything else is a bug. - assert code in {HTTPStatus.OK, HTTPStatus.UNAUTHORIZED} - - return code == HTTPStatus.OK - def test_login_issue_refresh_token(self) -> None: """ A login response should include a refresh_token only if asked. @@ -847,29 +835,37 @@ class RefreshAuthTests(unittest.HomeserverTestCase): self.reactor.advance(59.0) # Both tokens should still be valid. - self.assertTrue(self.is_access_token_valid(refreshable_access_token)) - self.assertTrue(self.is_access_token_valid(nonrefreshable_access_token)) + self.helper.whoami(refreshable_access_token, expect_code=HTTPStatus.OK) + self.helper.whoami(nonrefreshable_access_token, expect_code=HTTPStatus.OK) # Advance to 61 s (just past 1 minute, the time of expiry) self.reactor.advance(2.0) # Only the non-refreshable token is still valid. - self.assertFalse(self.is_access_token_valid(refreshable_access_token)) - self.assertTrue(self.is_access_token_valid(nonrefreshable_access_token)) + self.helper.whoami( + refreshable_access_token, expect_code=HTTPStatus.UNAUTHORIZED + ) + self.helper.whoami(nonrefreshable_access_token, expect_code=HTTPStatus.OK) # Advance to 599 s (just shy of 10 minutes, the time of expiry) self.reactor.advance(599.0 - 61.0) # It's still the case that only the non-refreshable token is still valid. - self.assertFalse(self.is_access_token_valid(refreshable_access_token)) - self.assertTrue(self.is_access_token_valid(nonrefreshable_access_token)) + self.helper.whoami( + refreshable_access_token, expect_code=HTTPStatus.UNAUTHORIZED + ) + self.helper.whoami(nonrefreshable_access_token, expect_code=HTTPStatus.OK) # Advance to 601 s (just past 10 minutes, the time of expiry) self.reactor.advance(2.0) # Now neither token is valid. - self.assertFalse(self.is_access_token_valid(refreshable_access_token)) - self.assertFalse(self.is_access_token_valid(nonrefreshable_access_token)) + self.helper.whoami( + refreshable_access_token, expect_code=HTTPStatus.UNAUTHORIZED + ) + self.helper.whoami( + nonrefreshable_access_token, expect_code=HTTPStatus.UNAUTHORIZED + ) @override_config( {"refreshable_access_token_lifetime": "1m", "refresh_token_lifetime": "2m"} @@ -1165,3 +1161,349 @@ class RefreshAuthTests(unittest.HomeserverTestCase): # and no refresh token self.assertEqual(_table_length("access_tokens"), 0) self.assertEqual(_table_length("refresh_tokens"), 0) + + +def oidc_config( + id: str, with_localpart_template: bool, **kwargs: Any +) -> Dict[str, Any]: + """Sample OIDC provider config used in backchannel logout tests. + + Args: + id: IDP ID for this provider + with_localpart_template: Set to `true` to have a default localpart_template in + the `user_mapping_provider` config and skip the user mapping session + **kwargs: rest of the config + + Returns: + A dict suitable for the `oidc_config` or the `oidc_providers[]` parts of + the HS config + """ + config: Dict[str, Any] = { + "idp_id": id, + "idp_name": id, + "issuer": TEST_OIDC_ISSUER, + "client_id": "test-client-id", + "client_secret": "test-client-secret", + "scopes": ["openid"], + } + + if with_localpart_template: + config["user_mapping_provider"] = { + "config": {"localpart_template": "{{ user.sub }}"} + } + else: + config["user_mapping_provider"] = {"config": {}} + + config.update(kwargs) + + return config + + +@skip_unless(HAS_OIDC, "Requires OIDC") +class OidcBackchannelLogoutTests(unittest.HomeserverTestCase): + servlets = [ + account.register_servlets, + login.register_servlets, + ] + + def default_config(self) -> Dict[str, Any]: + config = super().default_config() + + # public_baseurl uses an http:// scheme because FakeChannel.isSecure() returns + # False, so synapse will see the requested uri as http://..., so using http in + # the public_baseurl stops Synapse trying to redirect to https. + config["public_baseurl"] = "http://synapse.test" + + return config + + def create_resource_dict(self) -> Dict[str, Resource]: + resource_dict = super().create_resource_dict() + resource_dict.update(build_synapse_client_resource_tree(self.hs)) + return resource_dict + + def submit_logout_token(self, logout_token: str) -> FakeChannel: + return self.make_request( + "POST", + "/_synapse/client/oidc/backchannel_logout", + content=f"logout_token={logout_token}", + content_is_form=True, + ) + + @override_config( + { + "oidc_providers": [ + oidc_config( + id="oidc", + with_localpart_template=True, + backchannel_logout_enabled=True, + ) + ] + } + ) + def test_simple_logout(self) -> None: + """ + Receiving a logout token should logout the user + """ + fake_oidc_server = self.helper.fake_oidc_server() + user = "john" + + login_resp, first_grant = self.helper.login_via_oidc( + fake_oidc_server, user, with_sid=True + ) + first_access_token: str = login_resp["access_token"] + self.helper.whoami(first_access_token, expect_code=HTTPStatus.OK) + + login_resp, second_grant = self.helper.login_via_oidc( + fake_oidc_server, user, with_sid=True + ) + second_access_token: str = login_resp["access_token"] + self.helper.whoami(second_access_token, expect_code=HTTPStatus.OK) + + self.assertNotEqual(first_grant.sid, second_grant.sid) + self.assertEqual(first_grant.userinfo["sub"], second_grant.userinfo["sub"]) + + # Logging out of the first session + logout_token = fake_oidc_server.generate_logout_token(first_grant) + channel = self.submit_logout_token(logout_token) + self.assertEqual(channel.code, 200) + + self.helper.whoami(first_access_token, expect_code=HTTPStatus.UNAUTHORIZED) + self.helper.whoami(second_access_token, expect_code=HTTPStatus.OK) + + # Logging out of the second session + logout_token = fake_oidc_server.generate_logout_token(second_grant) + channel = self.submit_logout_token(logout_token) + self.assertEqual(channel.code, 200) + + @override_config( + { + "oidc_providers": [ + oidc_config( + id="oidc", + with_localpart_template=True, + backchannel_logout_enabled=True, + ) + ] + } + ) + def test_logout_during_login(self) -> None: + """ + It should revoke login tokens when receiving a logout token + """ + fake_oidc_server = self.helper.fake_oidc_server() + user = "john" + + # Get an authentication, and logout before submitting the logout token + client_redirect_url = "https://x" + userinfo = {"sub": user} + channel, grant = self.helper.auth_via_oidc( + fake_oidc_server, + userinfo, + client_redirect_url, + with_sid=True, + ) + + # expect a confirmation page + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + # fish the matrix login token out of the body of the confirmation page + m = re.search( + 'a href="%s.*loginToken=([^"]*)"' % (client_redirect_url,), + channel.text_body, + ) + assert m, channel.text_body + login_token = m.group(1) + + # Submit a logout + logout_token = fake_oidc_server.generate_logout_token(grant) + channel = self.submit_logout_token(logout_token) + self.assertEqual(channel.code, 200) + + # Now try to exchange the login token + channel = make_request( + self.hs.get_reactor(), + self.site, + "POST", + "/login", + content={"type": "m.login.token", "token": login_token}, + ) + # It should have failed + self.assertEqual(channel.code, 403) + + @override_config( + { + "oidc_providers": [ + oidc_config( + id="oidc", + with_localpart_template=False, + backchannel_logout_enabled=True, + ) + ] + } + ) + def test_logout_during_mapping(self) -> None: + """ + It should stop ongoing user mapping session when receiving a logout token + """ + fake_oidc_server = self.helper.fake_oidc_server() + user = "john" + + # Get an authentication, and logout before submitting the logout token + client_redirect_url = "https://x" + userinfo = {"sub": user} + channel, grant = self.helper.auth_via_oidc( + fake_oidc_server, + userinfo, + client_redirect_url, + with_sid=True, + ) + + # Expect a user mapping page + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) + + # We should have a user_mapping_session cookie + cookie_headers = channel.headers.getRawHeaders("Set-Cookie") + assert cookie_headers + cookies: Dict[str, str] = {} + for h in cookie_headers: + key, value = h.split(";")[0].split("=", maxsplit=1) + cookies[key] = value + + user_mapping_session_id = cookies["username_mapping_session"] + + # Getting that session should not raise + session = self.hs.get_sso_handler().get_mapping_session(user_mapping_session_id) + self.assertIsNotNone(session) + + # Submit a logout + logout_token = fake_oidc_server.generate_logout_token(grant) + channel = self.submit_logout_token(logout_token) + self.assertEqual(channel.code, 200) + + # Now it should raise + with self.assertRaises(SynapseError): + self.hs.get_sso_handler().get_mapping_session(user_mapping_session_id) + + @override_config( + { + "oidc_providers": [ + oidc_config( + id="oidc", + with_localpart_template=True, + backchannel_logout_enabled=False, + ) + ] + } + ) + def test_disabled(self) -> None: + """ + Receiving a logout token should do nothing if it is disabled in the config + """ + fake_oidc_server = self.helper.fake_oidc_server() + user = "john" + + login_resp, grant = self.helper.login_via_oidc( + fake_oidc_server, user, with_sid=True + ) + access_token: str = login_resp["access_token"] + self.helper.whoami(access_token, expect_code=HTTPStatus.OK) + + # Logging out shouldn't work + logout_token = fake_oidc_server.generate_logout_token(grant) + channel = self.submit_logout_token(logout_token) + self.assertEqual(channel.code, 400) + + # And the token should still be valid + self.helper.whoami(access_token, expect_code=HTTPStatus.OK) + + @override_config( + { + "oidc_providers": [ + oidc_config( + id="oidc", + with_localpart_template=True, + backchannel_logout_enabled=True, + ) + ] + } + ) + def test_no_sid(self) -> None: + """ + Receiving a logout token without `sid` during the login should do nothing + """ + fake_oidc_server = self.helper.fake_oidc_server() + user = "john" + + login_resp, grant = self.helper.login_via_oidc( + fake_oidc_server, user, with_sid=False + ) + access_token: str = login_resp["access_token"] + self.helper.whoami(access_token, expect_code=HTTPStatus.OK) + + # Logging out shouldn't work + logout_token = fake_oidc_server.generate_logout_token(grant) + channel = self.submit_logout_token(logout_token) + self.assertEqual(channel.code, 400) + + # And the token should still be valid + self.helper.whoami(access_token, expect_code=HTTPStatus.OK) + + @override_config( + { + "oidc_providers": [ + oidc_config( + "first", + issuer="https://first-issuer.com/", + with_localpart_template=True, + backchannel_logout_enabled=True, + ), + oidc_config( + "second", + issuer="https://second-issuer.com/", + with_localpart_template=True, + backchannel_logout_enabled=True, + ), + ] + } + ) + def test_multiple_providers(self) -> None: + """ + It should be able to distinguish login tokens from two different IdPs + """ + first_server = self.helper.fake_oidc_server(issuer="https://first-issuer.com/") + second_server = self.helper.fake_oidc_server( + issuer="https://second-issuer.com/" + ) + user = "john" + + login_resp, first_grant = self.helper.login_via_oidc( + first_server, user, with_sid=True, idp_id="oidc-first" + ) + first_access_token: str = login_resp["access_token"] + self.helper.whoami(first_access_token, expect_code=HTTPStatus.OK) + + login_resp, second_grant = self.helper.login_via_oidc( + second_server, user, with_sid=True, idp_id="oidc-second" + ) + second_access_token: str = login_resp["access_token"] + self.helper.whoami(second_access_token, expect_code=HTTPStatus.OK) + + # `sid` in the fake providers are generated by a counter, so the first grant of + # each provider should give the same SID + self.assertEqual(first_grant.sid, second_grant.sid) + self.assertEqual(first_grant.userinfo["sub"], second_grant.userinfo["sub"]) + + # Logging out of the first session + logout_token = first_server.generate_logout_token(first_grant) + channel = self.submit_logout_token(logout_token) + self.assertEqual(channel.code, 200) + + self.helper.whoami(first_access_token, expect_code=HTTPStatus.UNAUTHORIZED) + self.helper.whoami(second_access_token, expect_code=HTTPStatus.OK) + + # Logging out of the second session + logout_token = second_server.generate_logout_token(second_grant) + channel = self.submit_logout_token(logout_token) + self.assertEqual(channel.code, 200) + + self.helper.whoami(second_access_token, expect_code=HTTPStatus.UNAUTHORIZED) diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 967d229223..706399fae5 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -553,6 +553,34 @@ class RestHelper: return channel.json_body + def whoami( + self, + access_token: str, + expect_code: Literal[HTTPStatus.OK, HTTPStatus.UNAUTHORIZED] = HTTPStatus.OK, + ) -> JsonDict: + """Perform a 'whoami' request, which can be a quick way to check for access + token validity + + Args: + access_token: The user token to use during the request + expect_code: The return code to expect from attempting the whoami request + """ + channel = make_request( + self.hs.get_reactor(), + self.site, + "GET", + "account/whoami", + access_token=access_token, + ) + + assert channel.code == expect_code, "Exepcted: %d, got %d, resp: %r" % ( + expect_code, + channel.code, + channel.result["body"], + ) + + return channel.json_body + def fake_oidc_server(self, issuer: str = TEST_OIDC_ISSUER) -> FakeOidcServer: """Create a ``FakeOidcServer``. @@ -572,6 +600,7 @@ class RestHelper: fake_server: FakeOidcServer, remote_user_id: str, with_sid: bool = False, + idp_id: Optional[str] = None, expected_status: int = 200, ) -> Tuple[JsonDict, FakeAuthorizationGrant]: """Log in (as a new user) via OIDC @@ -588,7 +617,11 @@ class RestHelper: client_redirect_url = "https://x" userinfo = {"sub": remote_user_id} channel, grant = self.auth_via_oidc( - fake_server, userinfo, client_redirect_url, with_sid=with_sid + fake_server, + userinfo, + client_redirect_url, + with_sid=with_sid, + idp_id=idp_id, ) # expect a confirmation page @@ -623,6 +656,7 @@ class RestHelper: client_redirect_url: Optional[str] = None, ui_auth_session_id: Optional[str] = None, with_sid: bool = False, + idp_id: Optional[str] = None, ) -> Tuple[FakeChannel, FakeAuthorizationGrant]: """Perform an OIDC authentication flow via a mock OIDC provider. @@ -648,6 +682,7 @@ class RestHelper: ui_auth_session_id: if set, we will perform a UI Auth flow. The session id of the UI auth. with_sid: if True, generates a random `sid` (OIDC session ID) + idp_id: if set, explicitely chooses one specific IDP Returns: A FakeChannel containing the result of calling the OIDC callback endpoint. @@ -665,7 +700,9 @@ class RestHelper: oauth_uri = self.initiate_sso_ui_auth(ui_auth_session_id, cookies) else: # otherwise, hit the login redirect endpoint - oauth_uri = self.initiate_sso_login(client_redirect_url, cookies) + oauth_uri = self.initiate_sso_login( + client_redirect_url, cookies, idp_id=idp_id + ) # we now have a URI for the OIDC IdP, but we skip that and go straight # back to synapse's OIDC callback resource. However, we do need the "state" @@ -742,7 +779,10 @@ class RestHelper: return channel, grant def initiate_sso_login( - self, client_redirect_url: Optional[str], cookies: MutableMapping[str, str] + self, + client_redirect_url: Optional[str], + cookies: MutableMapping[str, str], + idp_id: Optional[str] = None, ) -> str: """Make a request to the login-via-sso redirect endpoint, and return the target @@ -753,6 +793,7 @@ class RestHelper: client_redirect_url: the client redirect URL to pass to the login redirect endpoint cookies: any cookies returned will be added to this dict + idp_id: if set, explicitely chooses one specific IDP Returns: the URI that the client gets redirected to (ie, the SSO server) @@ -761,6 +802,12 @@ class RestHelper: if client_redirect_url: params["redirectUrl"] = client_redirect_url + uri = "/_matrix/client/r0/login/sso/redirect" + if idp_id is not None: + uri = f"{uri}/{idp_id}" + + uri = f"{uri}?{urllib.parse.urlencode(params)}" + # hit the redirect url (which should redirect back to the redirect url. This # is the easiest way of figuring out what the Host header ought to be set to # to keep Synapse happy. @@ -768,7 +815,7 @@ class RestHelper: self.hs.get_reactor(), self.site, "GET", - "/_matrix/client/r0/login/sso/redirect?" + urllib.parse.urlencode(params), + uri, ) assert channel.code == 302 diff --git a/tests/server.py b/tests/server.py index 8b1d186219..b1730fcc8d 100644 --- a/tests/server.py +++ b/tests/server.py @@ -362,6 +362,12 @@ def make_request( # Twisted expects to be at the end of the content when parsing the request. req.content.seek(0, SEEK_END) + # Old version of Twisted (<20.3.0) have issues with parsing x-www-form-urlencoded + # bodies if the Content-Length header is missing + req.requestHeaders.addRawHeader( + b"Content-Length", str(len(content)).encode("ascii") + ) + if access_token: req.requestHeaders.addRawHeader( b"Authorization", b"Bearer " + access_token.encode("ascii") diff --git a/tests/test_utils/oidc.py b/tests/test_utils/oidc.py index de134bbc89..1461d23ee8 100644 --- a/tests/test_utils/oidc.py +++ b/tests/test_utils/oidc.py @@ -51,6 +51,8 @@ class FakeOidcServer: get_userinfo_handler: Mock post_token_handler: Mock + sid_counter: int = 0 + def __init__(self, clock: Clock, issuer: str): from authlib.jose import ECKey, KeySet @@ -146,7 +148,7 @@ class FakeOidcServer: return jws.serialize_compact(protected, json_payload, self._key).decode("utf-8") def generate_id_token(self, grant: FakeAuthorizationGrant) -> str: - now = self._clock.time() + now = int(self._clock.time()) id_token = { **grant.userinfo, "iss": self.issuer, @@ -166,6 +168,26 @@ class FakeOidcServer: return self._sign(id_token) + def generate_logout_token(self, grant: FakeAuthorizationGrant) -> str: + now = int(self._clock.time()) + logout_token = { + "iss": self.issuer, + "aud": grant.client_id, + "iat": now, + "jti": random_string(10), + "events": { + "http://schemas.openid.net/event/backchannel-logout": {}, + }, + } + + if grant.sid is not None: + logout_token["sid"] = grant.sid + + if "sub" in grant.userinfo: + logout_token["sub"] = grant.userinfo["sub"] + + return self._sign(logout_token) + def id_token_override(self, overrides: dict): """Temporarily patch the ID token generated by the token endpoint.""" return patch.object(self, "_id_token_overrides", overrides) @@ -183,7 +205,8 @@ class FakeOidcServer: code = random_string(10) sid = None if with_sid: - sid = random_string(10) + sid = str(self.sid_counter) + self.sid_counter += 1 grant = FakeAuthorizationGrant( userinfo=userinfo, -- cgit 1.5.1 From dbfc9b803ee32f7b31c2b5ccbc53a1bfcaa95983 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 31 Oct 2022 20:31:43 +0000 Subject: Fix dehydrated device REST checks (#14336) --- changelog.d/14336.bugfix | 1 + synapse/rest/client/devices.py | 5 ++--- tests/rest/client/test_devices.py | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14336.bugfix diff --git a/changelog.d/14336.bugfix b/changelog.d/14336.bugfix new file mode 100644 index 0000000000..d44ff1bbc7 --- /dev/null +++ b/changelog.d/14336.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.70 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 90828c95c4..8f3cbd4ea2 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -231,7 +231,7 @@ class DehydratedDeviceServlet(RestServlet): } } - PUT /org.matrix.msc2697/dehydrated_device + PUT /org.matrix.msc2697.v2/dehydrated_device Content-Type: application/json { @@ -271,7 +271,6 @@ class DehydratedDeviceServlet(RestServlet): raise errors.NotFoundError("No dehydrated device available") class PutBody(RequestBodyModel): - device_id: StrictStr device_data: DehydratedDeviceDataModel initial_device_display_name: Optional[StrictStr] @@ -281,7 +280,7 @@ class DehydratedDeviceServlet(RestServlet): device_id = await self.device_handler.store_dehydrated_device( requester.user.to_string(), - submission.device_data, + submission.device_data.dict(), submission.initial_device_display_name, ) return 200, {"device_id": device_id} diff --git a/tests/rest/client/test_devices.py b/tests/rest/client/test_devices.py index aa98222434..d80eea17d3 100644 --- a/tests/rest/client/test_devices.py +++ b/tests/rest/client/test_devices.py @@ -200,3 +200,37 @@ class DevicesTestCase(unittest.HomeserverTestCase): self.reactor.advance(43200) self.get_success(self.handler.get_device(user_id, "abc")) self.get_failure(self.handler.get_device(user_id, "def"), NotFoundError) + + +class DehydratedDeviceTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + register.register_servlets, + devices.register_servlets, + ] + + def test_PUT(self) -> None: + """Sanity-check that we can PUT a dehydrated device. + + Detects https://github.com/matrix-org/synapse/issues/14334. + """ + alice = self.register_user("alice", "correcthorse") + token = self.login(alice, "correcthorse") + + # Have alice update their device list + channel = self.make_request( + "PUT", + "_matrix/client/unstable/org.matrix.msc2697.v2/dehydrated_device", + { + "device_data": { + "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm", + "account": "dehydrated_device", + } + }, + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + device_id = channel.json_body.get("device_id") + self.assertIsInstance(device_id, str) -- cgit 1.5.1 From b922b54b6143f13c0786a18fcbb5f55724ea72fc Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 1 Nov 2022 10:30:43 +0000 Subject: Fix type annotation causing import time error in the Complement forking launcher. (#14084) Co-authored-by: David Robertson --- changelog.d/14084.misc | 1 + synapse/app/complement_fork_starter.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14084.misc diff --git a/changelog.d/14084.misc b/changelog.d/14084.misc new file mode 100644 index 0000000000..988e55f437 --- /dev/null +++ b/changelog.d/14084.misc @@ -0,0 +1 @@ +Fix type annotation causing import time error in the Complement forking launcher. \ No newline at end of file diff --git a/synapse/app/complement_fork_starter.py b/synapse/app/complement_fork_starter.py index b22f315453..8c0f4a57e7 100644 --- a/synapse/app/complement_fork_starter.py +++ b/synapse/app/complement_fork_starter.py @@ -55,13 +55,13 @@ import os import signal import sys from types import FrameType -from typing import Any, Callable, List, Optional +from typing import Any, Callable, Dict, List, Optional from twisted.internet.main import installReactor # a list of the original signal handlers, before we installed our custom ones. # We restore these in our child processes. -_original_signal_handlers: dict[int, Any] = {} +_original_signal_handlers: Dict[int, Any] = {} class ProxiedReactor: -- cgit 1.5.1 From 9473ebb9e7db9e3f71b341f72ae004db3a0144b8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Nov 2022 11:47:09 +0000 Subject: Revert "Fix event size checks (#13710)" This reverts commit fab495a9e1442d99e922367f65f41de5eaa488eb. As noted in https://github.com/matrix-org/synapse/pull/13710#issuecomment-1298396007: > We want to see this change land for the protocol's sake (and plan to un-revert it) but want to give this a little more time before releasing this. --- changelog.d/13710.bugfix | 1 - synapse/event_auth.py | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) delete mode 100644 changelog.d/13710.bugfix diff --git a/changelog.d/13710.bugfix b/changelog.d/13710.bugfix deleted file mode 100644 index 4c318d15f5..0000000000 --- a/changelog.d/13710.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where Synapse would count codepoints instead of bytes when validating the size of some fields. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 5036604036..bab31e33c5 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -342,15 +342,15 @@ def check_state_dependent_auth_rules( def _check_size_limits(event: "EventBase") -> None: - if len(event.user_id.encode("utf-8")) > 255: + if len(event.user_id) > 255: raise EventSizeError("'user_id' too large") - if len(event.room_id.encode("utf-8")) > 255: + if len(event.room_id) > 255: raise EventSizeError("'room_id' too large") - if event.is_state() and len(event.state_key.encode("utf-8")) > 255: + if event.is_state() and len(event.state_key) > 255: raise EventSizeError("'state_key' too large") - if len(event.type.encode("utf-8")) > 255: + if len(event.type) > 255: raise EventSizeError("'type' too large") - if len(event.event_id.encode("utf-8")) > 255: + if len(event.event_id) > 255: raise EventSizeError("'event_id' too large") if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE: raise EventSizeError("event too large") -- cgit 1.5.1 From ddbba28d52796346de7e96ede32730e4cc5fcdbb Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Nov 2022 12:10:51 +0000 Subject: 1.71.0rc1 --- CHANGES.md | 73 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/11414.feature | 1 - changelog.d/11635.feature | 1 - changelog.d/11804.feature | 1 - changelog.d/13422.bugfix | 1 - changelog.d/13595.misc | 1 - changelog.d/13652.feature | 1 - changelog.d/13844.misc | 1 - changelog.d/13897.feature | 1 - changelog.d/13910.misc | 1 - changelog.d/13927.bugfix | 1 - changelog.d/13958.bugfix | 1 - changelog.d/13999.doc | 1 - changelog.d/14042.misc | 1 - changelog.d/14064.bugfix | 1 - changelog.d/14084.misc | 1 - changelog.d/14086.doc | 1 - changelog.d/14096.misc | 1 - changelog.d/14110.doc | 1 - changelog.d/14133.misc | 1 - changelog.d/14191.doc | 1 - changelog.d/14205.feature | 1 - changelog.d/14228.misc | 1 - changelog.d/14229.misc | 1 - changelog.d/14230.misc | 1 - changelog.d/14243.bugfix | 1 - changelog.d/14249.misc | 1 - changelog.d/14258.bugfix | 2 -- changelog.d/14262.misc | 1 - changelog.d/14268.misc | 1 - changelog.d/14270.misc | 1 - changelog.d/14271.misc | 1 - changelog.d/14272.misc | 1 - changelog.d/14273.misc | 1 - changelog.d/14274.misc | 1 - changelog.d/14275.misc | 1 - changelog.d/14276.misc | 1 - changelog.d/14277.misc | 1 - changelog.d/14278.misc | 1 - changelog.d/14279.misc | 1 - changelog.d/14287.misc | 1 - changelog.d/14291.bugfix | 1 - changelog.d/14303.misc | 1 - changelog.d/14304.bugfix | 1 - changelog.d/14305.misc | 1 - changelog.d/14310.feature | 1 - changelog.d/14311.feature | 1 - changelog.d/14323.bugfix | 1 - changelog.d/14328.misc | 1 - changelog.d/14330.misc | 1 - changelog.d/14331.misc | 1 - changelog.d/14332.misc | 1 - changelog.d/14336.bugfix | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 55 files changed, 80 insertions(+), 54 deletions(-) delete mode 100644 changelog.d/11414.feature delete mode 100644 changelog.d/11635.feature delete mode 100644 changelog.d/11804.feature delete mode 100644 changelog.d/13422.bugfix delete mode 100644 changelog.d/13595.misc delete mode 100644 changelog.d/13652.feature delete mode 100644 changelog.d/13844.misc delete mode 100644 changelog.d/13897.feature delete mode 100644 changelog.d/13910.misc delete mode 100644 changelog.d/13927.bugfix delete mode 100644 changelog.d/13958.bugfix delete mode 100644 changelog.d/13999.doc delete mode 100644 changelog.d/14042.misc delete mode 100644 changelog.d/14064.bugfix delete mode 100644 changelog.d/14084.misc delete mode 100644 changelog.d/14086.doc delete mode 100644 changelog.d/14096.misc delete mode 100644 changelog.d/14110.doc delete mode 100644 changelog.d/14133.misc delete mode 100644 changelog.d/14191.doc delete mode 100644 changelog.d/14205.feature delete mode 100644 changelog.d/14228.misc delete mode 100644 changelog.d/14229.misc delete mode 100644 changelog.d/14230.misc delete mode 100644 changelog.d/14243.bugfix delete mode 100644 changelog.d/14249.misc delete mode 100644 changelog.d/14258.bugfix delete mode 100644 changelog.d/14262.misc delete mode 100644 changelog.d/14268.misc delete mode 100644 changelog.d/14270.misc delete mode 100644 changelog.d/14271.misc delete mode 100644 changelog.d/14272.misc delete mode 100644 changelog.d/14273.misc delete mode 100644 changelog.d/14274.misc delete mode 100644 changelog.d/14275.misc delete mode 100644 changelog.d/14276.misc delete mode 100644 changelog.d/14277.misc delete mode 100644 changelog.d/14278.misc delete mode 100644 changelog.d/14279.misc delete mode 100644 changelog.d/14287.misc delete mode 100644 changelog.d/14291.bugfix delete mode 100644 changelog.d/14303.misc delete mode 100644 changelog.d/14304.bugfix delete mode 100644 changelog.d/14305.misc delete mode 100644 changelog.d/14310.feature delete mode 100644 changelog.d/14311.feature delete mode 100644 changelog.d/14323.bugfix delete mode 100644 changelog.d/14328.misc delete mode 100644 changelog.d/14330.misc delete mode 100644 changelog.d/14331.misc delete mode 100644 changelog.d/14332.misc delete mode 100644 changelog.d/14336.bugfix diff --git a/CHANGES.md b/CHANGES.md index d34fd09024..27cf8f5bae 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,76 @@ +Synapse 1.71.0rc1 (2022-11-01) +============================== + +Features +-------- + +- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414)) +- Allow use of postgres and sqllite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311)) +- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664). Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804)) +- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652)) +- Enable Write-Ahead Logging for SQLite installs. Contributed by [asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897)) +- Show erasure status when listing users in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205)) + + +Bugfixes +-------- + +- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422)) +- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927)) +- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958)) +- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064)) +- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243)) +- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal, and debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258)) +- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291)) +- Fix a bug introduced in 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304)) +- Fix a bug introduced in Synapse 0.34.0rc2 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323)) +- Fix a bug introduced in Synapse 1.70 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336)) + + +Improved Documentation +---------------------- + +- Explain how to disable the use of `trusted_key_servers`. ([\#13999](https://github.com/matrix-org/synapse/issues/13999)) +- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086)) +- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110)) +- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191)) + + +Internal Changes +---------------- + +- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595)) +- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844)) +- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910)) +- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042)) +- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084)) +- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096)) +- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133)) +- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228)) +- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229)) +- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230)) +- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249)) +- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262)) +- Add debugging to help diagnose lost device-list-update. ([\#14268](https://github.com/matrix-org/synapse/issues/14268)) +- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270)) +- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271)) +- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272)) +- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273)) +- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274)) +- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275)) +- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276)) +- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277)) +- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278)) +- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279)) +- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287)) +- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303)) +- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305)) +- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328)) +- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330)) +- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331)) +- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332)) + + Synapse 1.70.1 (2022-10-28) =========================== diff --git a/changelog.d/11414.feature b/changelog.d/11414.feature deleted file mode 100644 index fc035e50a7..0000000000 --- a/changelog.d/11414.feature +++ /dev/null @@ -1 +0,0 @@ -Support back-channel logouts from OpenID Connect providers. diff --git a/changelog.d/11635.feature b/changelog.d/11635.feature deleted file mode 100644 index 94c8a83212..0000000000 --- a/changelog.d/11635.feature +++ /dev/null @@ -1 +0,0 @@ -Allow use of postgres and sqllite full-text search operators in search queries. \ No newline at end of file diff --git a/changelog.d/11804.feature b/changelog.d/11804.feature deleted file mode 100644 index 6420393541..0000000000 --- a/changelog.d/11804.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664). Contributed by Nico. diff --git a/changelog.d/13422.bugfix b/changelog.d/13422.bugfix deleted file mode 100644 index 3a099acbe6..0000000000 --- a/changelog.d/13422.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. \ No newline at end of file diff --git a/changelog.d/13595.misc b/changelog.d/13595.misc deleted file mode 100644 index 71959a6ee7..0000000000 --- a/changelog.d/13595.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused `@lru_cache` decorator. diff --git a/changelog.d/13652.feature b/changelog.d/13652.feature deleted file mode 100644 index bc7f2926dc..0000000000 --- a/changelog.d/13652.feature +++ /dev/null @@ -1 +0,0 @@ -Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. \ No newline at end of file diff --git a/changelog.d/13844.misc b/changelog.d/13844.misc deleted file mode 100644 index 66f4414df7..0000000000 --- a/changelog.d/13844.misc +++ /dev/null @@ -1 +0,0 @@ -Save login tokens in database and prevent login token reuse. diff --git a/changelog.d/13897.feature b/changelog.d/13897.feature deleted file mode 100644 index d46fdf9fa5..0000000000 --- a/changelog.d/13897.feature +++ /dev/null @@ -1 +0,0 @@ -Enable Write-Ahead Logging for SQLite installs. Contributed by [asymmetric](https://github.com/asymmetric). diff --git a/changelog.d/13910.misc b/changelog.d/13910.misc deleted file mode 100644 index e906952aab..0000000000 --- a/changelog.d/13910.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor OIDC tests to better mimic an actual OIDC provider. diff --git a/changelog.d/13927.bugfix b/changelog.d/13927.bugfix deleted file mode 100644 index 119cd128e7..0000000000 --- a/changelog.d/13927.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. diff --git a/changelog.d/13958.bugfix b/changelog.d/13958.bugfix deleted file mode 100644 index f9f651bfdc..0000000000 --- a/changelog.d/13958.bugfix +++ /dev/null @@ -1 +0,0 @@ -Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). diff --git a/changelog.d/13999.doc b/changelog.d/13999.doc deleted file mode 100644 index 26538049e4..0000000000 --- a/changelog.d/13999.doc +++ /dev/null @@ -1 +0,0 @@ -Explain how to disable the use of `trusted_key_servers`. \ No newline at end of file diff --git a/changelog.d/14042.misc b/changelog.d/14042.misc deleted file mode 100644 index 868d55e76a..0000000000 --- a/changelog.d/14042.misc +++ /dev/null @@ -1 +0,0 @@ -Bump flake8-bugbear from 21.3.2 to 22.9.23. diff --git a/changelog.d/14064.bugfix b/changelog.d/14064.bugfix deleted file mode 100644 index cce6ef3b71..0000000000 --- a/changelog.d/14064.bugfix +++ /dev/null @@ -1 +0,0 @@ - Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). \ No newline at end of file diff --git a/changelog.d/14084.misc b/changelog.d/14084.misc deleted file mode 100644 index 988e55f437..0000000000 --- a/changelog.d/14084.misc +++ /dev/null @@ -1 +0,0 @@ -Fix type annotation causing import time error in the Complement forking launcher. \ No newline at end of file diff --git a/changelog.d/14086.doc b/changelog.d/14086.doc deleted file mode 100644 index 5b4b938759..0000000000 --- a/changelog.d/14086.doc +++ /dev/null @@ -1 +0,0 @@ -Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). \ No newline at end of file diff --git a/changelog.d/14096.misc b/changelog.d/14096.misc deleted file mode 100644 index 2c07dc673b..0000000000 --- a/changelog.d/14096.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. diff --git a/changelog.d/14110.doc b/changelog.d/14110.doc deleted file mode 100644 index 0927e31e4f..0000000000 --- a/changelog.d/14110.doc +++ /dev/null @@ -1 +0,0 @@ -Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). diff --git a/changelog.d/14133.misc b/changelog.d/14133.misc deleted file mode 100644 index 5180019c68..0000000000 --- a/changelog.d/14133.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-opentracing from 2.4.7 to 2.4.10. diff --git a/changelog.d/14191.doc b/changelog.d/14191.doc deleted file mode 100644 index 6b0eeb1ae1..0000000000 --- a/changelog.d/14191.doc +++ /dev/null @@ -1 +0,0 @@ -Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. diff --git a/changelog.d/14205.feature b/changelog.d/14205.feature deleted file mode 100644 index 6692063352..0000000000 --- a/changelog.d/14205.feature +++ /dev/null @@ -1 +0,0 @@ -Show erasure status when listing users in the Admin API. diff --git a/changelog.d/14228.misc b/changelog.d/14228.misc deleted file mode 100644 index 14fe31a8bc..0000000000 --- a/changelog.d/14228.misc +++ /dev/null @@ -1 +0,0 @@ -Add initial power level event to batch of bulk persisted events when creating a new room. diff --git a/changelog.d/14229.misc b/changelog.d/14229.misc deleted file mode 100644 index b9cd9a34d5..0000000000 --- a/changelog.d/14229.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `/key/` endpoints to use `RestServlet` classes. diff --git a/changelog.d/14230.misc b/changelog.d/14230.misc deleted file mode 100644 index c9b4b112cb..0000000000 --- a/changelog.d/14230.misc +++ /dev/null @@ -1 +0,0 @@ -Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. diff --git a/changelog.d/14243.bugfix b/changelog.d/14243.bugfix deleted file mode 100644 index ac0b21c2c5..0000000000 --- a/changelog.d/14243.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. diff --git a/changelog.d/14249.misc b/changelog.d/14249.misc deleted file mode 100644 index 3a2ce1e0c2..0000000000 --- a/changelog.d/14249.misc +++ /dev/null @@ -1 +0,0 @@ -Build wheels on macos 11, not 10.15. diff --git a/changelog.d/14258.bugfix b/changelog.d/14258.bugfix deleted file mode 100644 index de97945844..0000000000 --- a/changelog.d/14258.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal, and debug logging was enabled. - diff --git a/changelog.d/14262.misc b/changelog.d/14262.misc deleted file mode 100644 index c1d23bc67d..0000000000 --- a/changelog.d/14262.misc +++ /dev/null @@ -1 +0,0 @@ -Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. diff --git a/changelog.d/14268.misc b/changelog.d/14268.misc deleted file mode 100644 index 894b1e1d4c..0000000000 --- a/changelog.d/14268.misc +++ /dev/null @@ -1 +0,0 @@ -Add debugging to help diagnose lost device-list-update. diff --git a/changelog.d/14270.misc b/changelog.d/14270.misc deleted file mode 100644 index 526c8ac85a..0000000000 --- a/changelog.d/14270.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pysaml2 from 7.1.2 to 7.2.1. diff --git a/changelog.d/14271.misc b/changelog.d/14271.misc deleted file mode 100644 index 09a011bc8c..0000000000 --- a/changelog.d/14271.misc +++ /dev/null @@ -1 +0,0 @@ -Bump jinja2 from 3.0.3 to 3.1.2. diff --git a/changelog.d/14272.misc b/changelog.d/14272.misc deleted file mode 100644 index 2ae88cc50c..0000000000 --- a/changelog.d/14272.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-requests from 2.28.11 to 2.28.11.2. diff --git a/changelog.d/14273.misc b/changelog.d/14273.misc deleted file mode 100644 index d29dc8510d..0000000000 --- a/changelog.d/14273.misc +++ /dev/null @@ -1 +0,0 @@ -Bump setuptools-rust from 1.5.1 to 1.5.2. diff --git a/changelog.d/14274.misc b/changelog.d/14274.misc deleted file mode 100644 index e2077dc0ad..0000000000 --- a/changelog.d/14274.misc +++ /dev/null @@ -1 +0,0 @@ -Bump prometheus-client from 0.14.0 to 0.15.0. diff --git a/changelog.d/14275.misc b/changelog.d/14275.misc deleted file mode 100644 index bcda89ff92..0000000000 --- a/changelog.d/14275.misc +++ /dev/null @@ -1 +0,0 @@ -Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. diff --git a/changelog.d/14276.misc b/changelog.d/14276.misc deleted file mode 100644 index 689e9f3728..0000000000 --- a/changelog.d/14276.misc +++ /dev/null @@ -1 +0,0 @@ -Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. diff --git a/changelog.d/14277.misc b/changelog.d/14277.misc deleted file mode 100644 index 0eebf3f961..0000000000 --- a/changelog.d/14277.misc +++ /dev/null @@ -1 +0,0 @@ -Bump serde from 1.0.145 to 1.0.147. diff --git a/changelog.d/14278.misc b/changelog.d/14278.misc deleted file mode 100644 index 4b9dd89957..0000000000 --- a/changelog.d/14278.misc +++ /dev/null @@ -1 +0,0 @@ -Bump anyhow from 1.0.65 to 1.0.66. diff --git a/changelog.d/14279.misc b/changelog.d/14279.misc deleted file mode 100644 index af7e3265c0..0000000000 --- a/changelog.d/14279.misc +++ /dev/null @@ -1 +0,0 @@ -Bump serde_json from 1.0.86 to 1.0.87. diff --git a/changelog.d/14287.misc b/changelog.d/14287.misc deleted file mode 100644 index 85c6dc2765..0000000000 --- a/changelog.d/14287.misc +++ /dev/null @@ -1 +0,0 @@ -Add Rust cache to CI for `trial` runs. diff --git a/changelog.d/14291.bugfix b/changelog.d/14291.bugfix deleted file mode 100644 index bac5065e94..0000000000 --- a/changelog.d/14291.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). diff --git a/changelog.d/14303.misc b/changelog.d/14303.misc deleted file mode 100644 index 24ce238223..0000000000 --- a/changelog.d/14303.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hinting of `RawHeaders`. diff --git a/changelog.d/14304.bugfix b/changelog.d/14304.bugfix deleted file mode 100644 index b8d4d91034..0000000000 --- a/changelog.d/14304.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. \ No newline at end of file diff --git a/changelog.d/14305.misc b/changelog.d/14305.misc deleted file mode 100644 index a77bd947a1..0000000000 --- a/changelog.d/14305.misc +++ /dev/null @@ -1 +0,0 @@ -Use Poetry 1.2.0 in the Twisted Trunk CI job. diff --git a/changelog.d/14310.feature b/changelog.d/14310.feature deleted file mode 100644 index 94c8a83212..0000000000 --- a/changelog.d/14310.feature +++ /dev/null @@ -1 +0,0 @@ -Allow use of postgres and sqllite full-text search operators in search queries. \ No newline at end of file diff --git a/changelog.d/14311.feature b/changelog.d/14311.feature deleted file mode 100644 index 94c8a83212..0000000000 --- a/changelog.d/14311.feature +++ /dev/null @@ -1 +0,0 @@ -Allow use of postgres and sqllite full-text search operators in search queries. \ No newline at end of file diff --git a/changelog.d/14323.bugfix b/changelog.d/14323.bugfix deleted file mode 100644 index da39bc020c..0000000000 --- a/changelog.d/14323.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 0.34.0rc2 where logs could include error spam when background processes are measured as taking a negative amount of time. diff --git a/changelog.d/14328.misc b/changelog.d/14328.misc deleted file mode 100644 index c88583d1eb..0000000000 --- a/changelog.d/14328.misc +++ /dev/null @@ -1 +0,0 @@ -Bump black from 22.3.0 to 22.10.0. diff --git a/changelog.d/14330.misc b/changelog.d/14330.misc deleted file mode 100644 index e980cfc38f..0000000000 --- a/changelog.d/14330.misc +++ /dev/null @@ -1 +0,0 @@ -Bump sentry-sdk from 1.5.11 to 1.10.1. diff --git a/changelog.d/14331.misc b/changelog.d/14331.misc deleted file mode 100644 index be1c89245c..0000000000 --- a/changelog.d/14331.misc +++ /dev/null @@ -1 +0,0 @@ -Bump psycopg2 from 2.9.4 to 2.9.5. diff --git a/changelog.d/14332.misc b/changelog.d/14332.misc deleted file mode 100644 index 8d883fe38f..0000000000 --- a/changelog.d/14332.misc +++ /dev/null @@ -1 +0,0 @@ -Bump twine from 3.8.0 to 4.0.1. diff --git a/changelog.d/14336.bugfix b/changelog.d/14336.bugfix deleted file mode 100644 index d44ff1bbc7..0000000000 --- a/changelog.d/14336.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.70 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). diff --git a/debian/changelog b/debian/changelog index b9b48e5fbd..e07fa06b7a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium + + * New Synapse release 1.71.0rc1. + + -- Synapse Packaging team Tue, 01 Nov 2022 12:10:17 +0000 + matrix-synapse-py3 (1.70.1) stable; urgency=medium * New Synapse release 1.70.1. diff --git a/pyproject.toml b/pyproject.toml index 4c1c02e8e4..5d96328067 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.70.1" +version = "1.71.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From 051402d1dff6e10a2b76cc26bee8e2ca0206c895 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Nov 2022 12:33:19 +0000 Subject: Adjust changelog --- CHANGES.md | 59 ++++++++++++++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 27cf8f5bae..113ad0d1ee 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,11 +5,12 @@ Features -------- - Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414)) -- Allow use of postgres and sqllite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311)) -- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664). Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804)) +- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311)) +- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804)) - Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652)) -- Enable Write-Ahead Logging for SQLite installs. Contributed by [asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897)) -- Show erasure status when listing users in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205)) +- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897)) +- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205)) +- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262)) Bugfixes @@ -20,17 +21,17 @@ Bugfixes - Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958)) - Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064)) - Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243)) -- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal, and debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258)) +- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258)) - Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291)) -- Fix a bug introduced in 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304)) -- Fix a bug introduced in Synapse 0.34.0rc2 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323)) -- Fix a bug introduced in Synapse 1.70 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336)) +- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304)) +- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323)) +- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336)) Improved Documentation ---------------------- -- Explain how to disable the use of `trusted_key_servers`. ([\#13999](https://github.com/matrix-org/synapse/issues/13999)) +- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999)) - Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086)) - Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110)) - Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191)) @@ -42,34 +43,42 @@ Internal Changes - Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595)) - Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844)) - Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910)) -- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042)) - Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084)) - Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096)) -- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133)) - Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228)) - Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229)) - Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230)) - Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249)) -- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262)) -- Add debugging to help diagnose lost device-list-update. ([\#14268](https://github.com/matrix-org/synapse/issues/14268)) -- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270)) +- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268)) +- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287)) +- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303)) +- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305)) + +
+Dependency updates + +Runtime: + +- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278)) - Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271)) -- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272)) -- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273)) - Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274)) -- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275)) -- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276)) +- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331)) +- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270)) +- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330)) - Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277)) -- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278)) - Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279)) -- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287)) -- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303)) -- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305)) + +Tooling and CI: + - Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328)) -- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330)) -- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331)) +- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042)) +- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276)) +- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275)) +- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273)) - Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332)) - +- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133)) +- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272)) +
Synapse 1.70.1 (2022-10-28) =========================== -- cgit 1.5.1 From 5905ba12d0c652b77ffc677196c7e31addb0eedf Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Nov 2022 13:07:54 +0000 Subject: Run trial tests against Python 3.11 (#13812) --- .ci/scripts/calculate_jobs.py | 10 +- .github/workflows/tests.yml | 2 +- changelog.d/13812.misc | 1 + poetry.lock | 222 ++++++++++++++++++------------ tests/federation/transport/test_client.py | 11 ++ 5 files changed, 149 insertions(+), 97 deletions(-) create mode 100644 changelog.d/13812.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index c2198e0dd4..f82eec231a 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -33,7 +33,7 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/") trial_sqlite_tests = [ { - "python-version": "3.7", + "python-version": "3.11", "database": "sqlite", "extras": "all", } @@ -46,13 +46,13 @@ if not IS_PR: "database": "sqlite", "extras": "all", } - for version in ("3.8", "3.9", "3.10") + for version in ("3.8", "3.9", "3.10", "3.11") ) trial_postgres_tests = [ { - "python-version": "3.7", + "python-version": "3.11", "database": "postgres", "postgres-version": "10", "extras": "all", @@ -62,7 +62,7 @@ trial_postgres_tests = [ if not IS_PR: trial_postgres_tests.append( { - "python-version": "3.10", + "python-version": "3.11", "database": "postgres", "postgres-version": "14", "extras": "all", @@ -71,7 +71,7 @@ if not IS_PR: trial_no_extra_tests = [ { - "python-version": "3.7", + "python-version": "3.11", "database": "sqlite", "extras": "", } diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index ff5cf0c534..27fef6b5bd 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -393,7 +393,7 @@ jobs: - python-version: "3.7" postgres-version: "10" - - python-version: "3.10" + - python-version: "3.11" postgres-version: "14" services: diff --git a/changelog.d/13812.misc b/changelog.d/13812.misc new file mode 100644 index 0000000000..667fdee2b7 --- /dev/null +++ b/changelog.d/13812.misc @@ -0,0 +1 @@ +Run unit tests against Python 3.11. diff --git a/poetry.lock b/poetry.lock index c01cfcfa58..ebabd3b833 100644 --- a/poetry.lock +++ b/poetry.lock @@ -25,7 +25,7 @@ cryptography = ">=3.2" [[package]] name = "automat" -version = "20.2.0" +version = "22.10.0" description = "Self-service finite-state machines for the programmer on the go." category = "main" optional = false @@ -671,12 +671,16 @@ python-versions = "*" [[package]] name = "pillow" -version = "9.0.1" +version = "9.2.0" description = "Python Imaging Library (Fork)" category = "main" optional = false python-versions = ">=3.7" +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] + [[package]] name = "pkginfo" version = "1.8.2" @@ -1534,7 +1538,7 @@ python-versions = "*" [[package]] name = "wrapt" -version = "1.13.3" +version = "1.14.1" description = "Module for decorators, wrappers and monkey patching." category = "dev" optional = false @@ -1646,8 +1650,8 @@ Authlib = [ {file = "Authlib-1.1.0.tar.gz", hash = "sha256:0a270c91409fc2b7b0fbee6996e09f2ee3187358762111a9a4225c874b94e891"}, ] automat = [ - {file = "Automat-20.2.0-py2.py3-none-any.whl", hash = "sha256:b6feb6455337df834f6c9962d6ccf771515b7d939bca142b29c20c2376bc6111"}, - {file = "Automat-20.2.0.tar.gz", hash = "sha256:7979803c74610e11ef0c0d68a2942b152df52da55336e0c9d58daf1831cbdf33"}, + {file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"}, + {file = "Automat-22.10.0.tar.gz", hash = "sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e"}, ] bcrypt = [ {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"}, @@ -2251,41 +2255,64 @@ phonenumbers = [ {file = "phonenumbers-8.12.56.tar.gz", hash = "sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868"}, ] pillow = [ - {file = "Pillow-9.0.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a5d24e1d674dd9d72c66ad3ea9131322819ff86250b30dc5821cbafcfa0b96b4"}, - {file = "Pillow-9.0.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2632d0f846b7c7600edf53c48f8f9f1e13e62f66a6dbc15191029d950bfed976"}, - {file = "Pillow-9.0.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9618823bd237c0d2575283f2939655f54d51b4527ec3972907a927acbcc5bfc"}, - {file = "Pillow-9.0.1-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd"}, - {file = "Pillow-9.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f"}, - {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a"}, - {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049"}, - {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a"}, - {file = "Pillow-9.0.1-cp310-cp310-win32.whl", hash = "sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e"}, - {file = "Pillow-9.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b"}, - {file = "Pillow-9.0.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e"}, - {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360"}, - {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b"}, - {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030"}, - {file = "Pillow-9.0.1-cp37-cp37m-win32.whl", hash = "sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669"}, - {file = "Pillow-9.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092"}, - {file = "Pillow-9.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204"}, - {file = "Pillow-9.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e"}, - {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c"}, - {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5"}, - {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae"}, - {file = "Pillow-9.0.1-cp38-cp38-win32.whl", hash = "sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c"}, - {file = "Pillow-9.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00"}, - {file = "Pillow-9.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838"}, - {file = "Pillow-9.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28"}, - {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c"}, - {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b"}, - {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7"}, - {file = "Pillow-9.0.1-cp39-cp39-win32.whl", hash = "sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7"}, - {file = "Pillow-9.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac"}, - {file = "Pillow-9.0.1-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97"}, - {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56"}, - {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e"}, - {file = "Pillow-9.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70"}, - {file = "Pillow-9.0.1.tar.gz", hash = "sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa"}, + {file = "Pillow-9.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:a9c9bc489f8ab30906d7a85afac4b4944a572a7432e00698a7239f44a44e6efb"}, + {file = "Pillow-9.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:510cef4a3f401c246cfd8227b300828715dd055463cdca6176c2e4036df8bd4f"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7888310f6214f19ab2b6df90f3f06afa3df7ef7355fc025e78a3044737fab1f5"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831e648102c82f152e14c1a0938689dbb22480c548c8d4b8b248b3e50967b88c"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cc1d2451e8a3b4bfdb9caf745b58e6c7a77d2e469159b0d527a4554d73694d1"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:136659638f61a251e8ed3b331fc6ccd124590eeff539de57c5f80ef3a9594e58"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6e8c66f70fb539301e064f6478d7453e820d8a2c631da948a23384865cd95544"}, + {file = "Pillow-9.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:37ff6b522a26d0538b753f0b4e8e164fdada12db6c6f00f62145d732d8a3152e"}, + {file = "Pillow-9.2.0-cp310-cp310-win32.whl", hash = "sha256:c79698d4cd9318d9481d89a77e2d3fcaeff5486be641e60a4b49f3d2ecca4e28"}, + {file = "Pillow-9.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:254164c57bab4b459f14c64e93df11eff5ded575192c294a0c49270f22c5d93d"}, + {file = "Pillow-9.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:adabc0bce035467fb537ef3e5e74f2847c8af217ee0be0455d4fec8adc0462fc"}, + {file = "Pillow-9.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:336b9036127eab855beec9662ac3ea13a4544a523ae273cbf108b228ecac8437"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50dff9cc21826d2977ef2d2a205504034e3a4563ca6f5db739b0d1026658e004"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb6259196a589123d755380b65127ddc60f4c64b21fc3bb46ce3a6ea663659b0"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b0554af24df2bf96618dac71ddada02420f946be943b181108cac55a7a2dcd4"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:15928f824870535c85dbf949c09d6ae7d3d6ac2d6efec80f3227f73eefba741c"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:bdd0de2d64688ecae88dd8935012c4a72681e5df632af903a1dca8c5e7aa871a"}, + {file = "Pillow-9.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5b87da55a08acb586bad5c3aa3b86505f559b84f39035b233d5bf844b0834b1"}, + {file = "Pillow-9.2.0-cp311-cp311-win32.whl", hash = "sha256:b6d5e92df2b77665e07ddb2e4dbd6d644b78e4c0d2e9272a852627cdba0d75cf"}, + {file = "Pillow-9.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6bf088c1ce160f50ea40764f825ec9b72ed9da25346216b91361eef8ad1b8f8c"}, + {file = "Pillow-9.2.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:2c58b24e3a63efd22554c676d81b0e57f80e0a7d3a5874a7e14ce90ec40d3069"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eef7592281f7c174d3d6cbfbb7ee5984a671fcd77e3fc78e973d492e9bf0eb3f"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dcd7b9c7139dc8258d164b55696ecd16c04607f1cc33ba7af86613881ffe4ac8"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a138441e95562b3c078746a22f8fca8ff1c22c014f856278bdbdd89ca36cff1b"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:93689632949aff41199090eff5474f3990b6823404e45d66a5d44304e9cdc467"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:f3fac744f9b540148fa7715a435d2283b71f68bfb6d4aae24482a890aed18b59"}, + {file = "Pillow-9.2.0-cp37-cp37m-win32.whl", hash = "sha256:fa768eff5f9f958270b081bb33581b4b569faabf8774726b283edb06617101dc"}, + {file = "Pillow-9.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:69bd1a15d7ba3694631e00df8de65a8cb031911ca11f44929c97fe05eb9b6c1d"}, + {file = "Pillow-9.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:030e3460861488e249731c3e7ab59b07c7853838ff3b8e16aac9561bb345da14"}, + {file = "Pillow-9.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:74a04183e6e64930b667d321524e3c5361094bb4af9083db5c301db64cd341f3"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d33a11f601213dcd5718109c09a52c2a1c893e7461f0be2d6febc2879ec2402"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fd6f5e3c0e4697fa7eb45b6e93996299f3feee73a3175fa451f49a74d092b9f"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a647c0d4478b995c5e54615a2e5360ccedd2f85e70ab57fbe817ca613d5e63b8"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:4134d3f1ba5f15027ff5c04296f13328fecd46921424084516bdb1b2548e66ff"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:bc431b065722a5ad1dfb4df354fb9333b7a582a5ee39a90e6ffff688d72f27a1"}, + {file = "Pillow-9.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1536ad017a9f789430fb6b8be8bf99d2f214c76502becc196c6f2d9a75b01b76"}, + {file = "Pillow-9.2.0-cp38-cp38-win32.whl", hash = "sha256:2ad0d4df0f5ef2247e27fc790d5c9b5a0af8ade9ba340db4a73bb1a4a3e5fb4f"}, + {file = "Pillow-9.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:ec52c351b35ca269cb1f8069d610fc45c5bd38c3e91f9ab4cbbf0aebc136d9c8"}, + {file = "Pillow-9.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ed2c4ef2451de908c90436d6e8092e13a43992f1860275b4d8082667fbb2ffc"}, + {file = "Pillow-9.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ad2f835e0ad81d1689f1b7e3fbac7b01bb8777d5a985c8962bedee0cc6d43da"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea98f633d45f7e815db648fd7ff0f19e328302ac36427343e4432c84432e7ff4"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7761afe0126d046974a01e030ae7529ed0ca6a196de3ec6937c11df0df1bc91c"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a54614049a18a2d6fe156e68e188da02a046a4a93cf24f373bffd977e943421"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:5aed7dde98403cd91d86a1115c78d8145c83078e864c1de1064f52e6feb61b20"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:13b725463f32df1bfeacbf3dd197fb358ae8ebcd8c5548faa75126ea425ccb60"}, + {file = "Pillow-9.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:808add66ea764ed97d44dda1ac4f2cfec4c1867d9efb16a33d158be79f32b8a4"}, + {file = "Pillow-9.2.0-cp39-cp39-win32.whl", hash = "sha256:337a74fd2f291c607d220c793a8135273c4c2ab001b03e601c36766005f36885"}, + {file = "Pillow-9.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:fac2d65901fb0fdf20363fbd345c01958a742f2dc62a8dd4495af66e3ff502a4"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ad2277b185ebce47a63f4dc6302e30f05762b688f8dc3de55dbae4651872cdf3"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c7b502bc34f6e32ba022b4a209638f9e097d7a9098104ae420eb8186217ebbb"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d1f14f5f691f55e1b47f824ca4fdcb4b19b4323fe43cc7bb105988cad7496be"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:dfe4c1fedfde4e2fbc009d5ad420647f7730d719786388b7de0999bf32c0d9fd"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:f07f1f00e22b231dd3d9b9208692042e29792d6bd4f6639415d2f23158a80013"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1802f34298f5ba11d55e5bb09c31997dc0c6aed919658dfdf0198a2fe75d5490"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17d4cafe22f050b46d983b71c707162d63d796a1235cdf8b9d7a112e97b15bac"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:96b5e6874431df16aee0c1ba237574cb6dff1dcb173798faa6a9d8b399a05d0e"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:0030fdbd926fb85844b8b92e2f9449ba89607231d3dd597a21ae72dc7fe26927"}, + {file = "Pillow-9.2.0.tar.gz", hash = "sha256:75e636fd3e0fb872693f23ccb8a5ff2cd578801251f3a4f6854c6a5d437d3c04"}, ] pkginfo = [ {file = "pkginfo-1.8.2-py2.py3-none-any.whl", hash = "sha256:c24c487c6a7f72c66e816ab1796b96ac6c3d14d49338293d2141664330b55ffc"}, @@ -2814,57 +2841,70 @@ webencodings = [ {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, ] wrapt = [ - {file = "wrapt-1.13.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:e05e60ff3b2b0342153be4d1b597bbcfd8330890056b9619f4ad6b8d5c96a81a"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:85148f4225287b6a0665eef08a178c15097366d46b210574a658c1ff5b377489"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:2dded5496e8f1592ec27079b28b6ad2a1ef0b9296d270f77b8e4a3a796cf6909"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:e94b7d9deaa4cc7bac9198a58a7240aaf87fe56c6277ee25fa5b3aa1edebd229"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:498e6217523111d07cd67e87a791f5e9ee769f9241fcf8a379696e25806965af"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:ec7e20258ecc5174029a0f391e1b948bf2906cd64c198a9b8b281b811cbc04de"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:87883690cae293541e08ba2da22cacaae0a092e0ed56bbba8d018cc486fbafbb"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f99c0489258086308aad4ae57da9e8ecf9e1f3f30fa35d5e170b4d4896554d80"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6a03d9917aee887690aa3f1747ce634e610f6db6f6b332b35c2dd89412912bca"}, - {file = "wrapt-1.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:936503cb0a6ed28dbfa87e8fcd0a56458822144e9d11a49ccee6d9a8adb2ac44"}, - {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f9c51d9af9abb899bd34ace878fbec8bf357b3194a10c4e8e0a25512826ef056"}, - {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:220a869982ea9023e163ba915077816ca439489de6d2c09089b219f4e11b6785"}, - {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0877fe981fd76b183711d767500e6b3111378ed2043c145e21816ee589d91096"}, - {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:43e69ffe47e3609a6aec0fe723001c60c65305784d964f5007d5b4fb1bc6bf33"}, - {file = "wrapt-1.13.3-cp310-cp310-win32.whl", hash = "sha256:78dea98c81915bbf510eb6a3c9c24915e4660302937b9ae05a0947164248020f"}, - {file = "wrapt-1.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:ea3e746e29d4000cd98d572f3ee2a6050a4f784bb536f4ac1f035987fc1ed83e"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:8c73c1a2ec7c98d7eaded149f6d225a692caa1bd7b2401a14125446e9e90410d"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:086218a72ec7d986a3eddb7707c8c4526d677c7b35e355875a0fe2918b059179"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:e92d0d4fa68ea0c02d39f1e2f9cb5bc4b4a71e8c442207433d8db47ee79d7aa3"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:d4a5f6146cfa5c7ba0134249665acd322a70d1ea61732723c7d3e8cc0fa80755"}, - {file = "wrapt-1.13.3-cp35-cp35m-win32.whl", hash = "sha256:8aab36778fa9bba1a8f06a4919556f9f8c7b33102bd71b3ab307bb3fecb21851"}, - {file = "wrapt-1.13.3-cp35-cp35m-win_amd64.whl", hash = "sha256:944b180f61f5e36c0634d3202ba8509b986b5fbaf57db3e94df11abee244ba13"}, - {file = "wrapt-1.13.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2ebdde19cd3c8cdf8df3fc165bc7827334bc4e353465048b36f7deeae8ee0918"}, - {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:610f5f83dd1e0ad40254c306f4764fcdc846641f120c3cf424ff57a19d5f7ade"}, - {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5601f44a0f38fed36cc07db004f0eedeaadbdcec90e4e90509480e7e6060a5bc"}, - {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:e6906d6f48437dfd80464f7d7af1740eadc572b9f7a4301e7dd3d65db285cacf"}, - {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:766b32c762e07e26f50d8a3468e3b4228b3736c805018e4b0ec8cc01ecd88125"}, - {file = "wrapt-1.13.3-cp36-cp36m-win32.whl", hash = "sha256:5f223101f21cfd41deec8ce3889dc59f88a59b409db028c469c9b20cfeefbe36"}, - {file = "wrapt-1.13.3-cp36-cp36m-win_amd64.whl", hash = "sha256:f122ccd12fdc69628786d0c947bdd9cb2733be8f800d88b5a37c57f1f1d73c10"}, - {file = "wrapt-1.13.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:46f7f3af321a573fc0c3586612db4decb7eb37172af1bc6173d81f5b66c2e068"}, - {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:778fd096ee96890c10ce96187c76b3e99b2da44e08c9e24d5652f356873f6709"}, - {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0cb23d36ed03bf46b894cfec777eec754146d68429c30431c99ef28482b5c1df"}, - {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:96b81ae75591a795d8c90edc0bfaab44d3d41ffc1aae4d994c5aa21d9b8e19a2"}, - {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7dd215e4e8514004c8d810a73e342c536547038fb130205ec4bba9f5de35d45b"}, - {file = "wrapt-1.13.3-cp37-cp37m-win32.whl", hash = "sha256:47f0a183743e7f71f29e4e21574ad3fa95676136f45b91afcf83f6a050914829"}, - {file = "wrapt-1.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fd76c47f20984b43d93de9a82011bb6e5f8325df6c9ed4d8310029a55fa361ea"}, - {file = "wrapt-1.13.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b73d4b78807bd299b38e4598b8e7bd34ed55d480160d2e7fdaabd9931afa65f9"}, - {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ec9465dd69d5657b5d2fa6133b3e1e989ae27d29471a672416fd729b429eb554"}, - {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd91006848eb55af2159375134d724032a2d1d13bcc6f81cd8d3ed9f2b8e846c"}, - {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ae9de71eb60940e58207f8e71fe113c639da42adb02fb2bcbcaccc1ccecd092b"}, - {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:51799ca950cfee9396a87f4a1240622ac38973b6df5ef7a41e7f0b98797099ce"}, - {file = "wrapt-1.13.3-cp38-cp38-win32.whl", hash = "sha256:4b9c458732450ec42578b5642ac53e312092acf8c0bfce140ada5ca1ac556f79"}, - {file = "wrapt-1.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:7dde79d007cd6dfa65afe404766057c2409316135cb892be4b1c768e3f3a11cb"}, - {file = "wrapt-1.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:981da26722bebb9247a0601e2922cedf8bb7a600e89c852d063313102de6f2cb"}, - {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:705e2af1f7be4707e49ced9153f8d72131090e52be9278b5dbb1498c749a1e32"}, - {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:25b1b1d5df495d82be1c9d2fad408f7ce5ca8a38085e2da41bb63c914baadff7"}, - {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:77416e6b17926d953b5c666a3cb718d5945df63ecf922af0ee576206d7033b5e"}, - {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:865c0b50003616f05858b22174c40ffc27a38e67359fa1495605f96125f76640"}, - {file = "wrapt-1.13.3-cp39-cp39-win32.whl", hash = "sha256:0a017a667d1f7411816e4bf214646d0ad5b1da2c1ea13dec6c162736ff25a374"}, - {file = "wrapt-1.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:81bd7c90d28a4b2e1df135bfbd7c23aee3050078ca6441bead44c42483f9ebfb"}, - {file = "wrapt-1.13.3.tar.gz", hash = "sha256:1fea9cd438686e6682271d36f3481a9f3636195578bab9ca3382e2f5f01fc185"}, + {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"}, + {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"}, + {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, + {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, + {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"}, + {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"}, + {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"}, + {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"}, + {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"}, + {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"}, + {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"}, + {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"}, + {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"}, + {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"}, + {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"}, + {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"}, + {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"}, + {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"}, + {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"}, + {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"}, + {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"}, + {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"}, ] xmlschema = [ {file = "xmlschema-1.10.0-py3-none-any.whl", hash = "sha256:dbd68bded2fef00c19cf37110ca0565eca34cf0b6c9e1d3b62ad0de8cbb582ca"}, diff --git a/tests/federation/transport/test_client.py b/tests/federation/transport/test_client.py index dd4d1b56de..b84c74fc0e 100644 --- a/tests/federation/transport/test_client.py +++ b/tests/federation/transport/test_client.py @@ -15,6 +15,8 @@ import json from unittest.mock import Mock +import ijson.common + from synapse.api.room_versions import RoomVersions from synapse.federation.transport.client import SendJoinParser from synapse.util import ExceptionBundle @@ -117,8 +119,17 @@ class SendJoinParserTestCase(TestCase): coro_3 = Mock() coro_3.close = Mock(side_effect=RuntimeError("Couldn't close coro 3")) + original_coros = parser._coros parser._coros = [coro_1, coro_2, coro_3] + # Close the original coroutines. If we don't, when we garbage collect them + # they will throw, failing the test. (Oddly, this only started in CPython 3.11). + for coro in original_coros: + try: + coro.close() + except ijson.common.IncompleteJSONError: + pass + # Send half of the data to the parser parser.write(serialisation[: len(serialisation) // 2]) -- cgit 1.5.1 From efdcb243287ae6c91e2806f0bfe24a9add2b488e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Nov 2022 13:12:22 +0000 Subject: Revert a testing commit from #13812 It (4f5d492cd6a9438de03d1b768f4c220cb662ac06) should have been reverted before the merge to develop. --- .ci/scripts/calculate_jobs.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index f82eec231a..c53d4d5ff1 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -33,7 +33,7 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/") trial_sqlite_tests = [ { - "python-version": "3.11", + "python-version": "3.7", "database": "sqlite", "extras": "all", } @@ -52,7 +52,7 @@ if not IS_PR: trial_postgres_tests = [ { - "python-version": "3.11", + "python-version": "3.7", "database": "postgres", "postgres-version": "10", "extras": "all", @@ -71,7 +71,7 @@ if not IS_PR: trial_no_extra_tests = [ { - "python-version": "3.11", + "python-version": "3.7", "database": "sqlite", "extras": "", } -- cgit 1.5.1 From a62c796f633516c2f24ba2506fde2f7c99676422 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Nov 2022 13:58:39 +0000 Subject: Deal with another batch of GHA warning messages (#14313) --- .github/workflows/tests.yml | 54 +++++++++++++++++++++++++++++++-------------- changelog.d/14313.misc | 1 + 2 files changed, 39 insertions(+), 16 deletions(-) create mode 100644 changelog.d/14313.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 27fef6b5bd..fea33abd12 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -33,6 +33,8 @@ jobs: steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 + with: + python-version: "3.x" - uses: matrix-org/setup-python-poetry@v1 with: extras: "all" @@ -44,6 +46,8 @@ jobs: steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 + with: + python-version: "3.x" - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'" - run: scripts-dev/check_schema_delta.py --force-colors @@ -68,6 +72,8 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 - uses: actions/setup-python@v4 + with: + python-version: "3.x" - run: "pip install 'towncrier>=18.6.0rc1'" - run: scripts-dev/check-newsfragment.sh env: @@ -93,10 +99,12 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + # There don't seem to be versioned releases of this action per se: for each rust + # version there is a branch which gets constantly rebased on top of master. + # We pin to a specific commit for paranoia's sake. + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: toolchain: 1.58.1 - override: true components: clippy - uses: Swatinem/rust-cache@v2 @@ -111,11 +119,13 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + # There don't seem to be versioned releases of this action per se: for each rust + # version there is a branch which gets constantly rebased on top of master. + # We pin to a specific commit for paranoia's sake. + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: - toolchain: 1.58.1 - override: true - components: rustfmt + toolchain: 1.58.1 + components: rustfmt - uses: Swatinem/rust-cache@v2 - run: cargo fmt --check @@ -143,6 +153,8 @@ jobs: steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 + with: + python-version: "3.x" - id: get-matrix run: .ci/scripts/calculate_jobs.py outputs: @@ -169,10 +181,12 @@ jobs: postgres:${{ matrix.job.postgres-version }} - name: Install Rust - uses: actions-rs/toolchain@v1 + # There don't seem to be versioned releases of this action per se: for each rust + # version there is a branch which gets constantly rebased on top of master. + # We pin to a specific commit for paranoia's sake. + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: toolchain: 1.58.1 - override: true - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 @@ -211,10 +225,12 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + # There don't seem to be versioned releases of this action per se: for each rust + # version there is a branch which gets constantly rebased on top of master. + # We pin to a specific commit for paranoia's sake. + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: toolchain: 1.58.1 - override: true - uses: Swatinem/rust-cache@v2 # There aren't wheels for some of the older deps, so we need to install @@ -327,10 +343,12 @@ jobs: run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers - name: Install Rust - uses: actions-rs/toolchain@v1 + # There don't seem to be versioned releases of this action per se: for each rust + # version there is a branch which gets constantly rebased on top of master. + # We pin to a specific commit for paranoia's sake. + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: toolchain: 1.58.1 - override: true - uses: Swatinem/rust-cache@v2 - name: Run SyTest @@ -459,10 +477,12 @@ jobs: path: synapse - name: Install Rust - uses: actions-rs/toolchain@v1 + # There don't seem to be versioned releases of this action per se: for each rust + # version there is a branch which gets constantly rebased on top of master. + # We pin to a specific commit for paranoia's sake. + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: toolchain: 1.58.1 - override: true - uses: Swatinem/rust-cache@v2 - name: Prepare Complement's Prerequisites @@ -485,10 +505,12 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + # There don't seem to be versioned releases of this action per se: for each rust + # version there is a branch which gets constantly rebased on top of master. + # We pin to a specific commit for paranoia's sake. + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: toolchain: 1.58.1 - override: true - uses: Swatinem/rust-cache@v2 - run: cargo test diff --git a/changelog.d/14313.misc b/changelog.d/14313.misc new file mode 100644 index 0000000000..755527daef --- /dev/null +++ b/changelog.d/14313.misc @@ -0,0 +1 @@ +Switch to a maintained action for installing Rust in CI. -- cgit 1.5.1 From 1dd16e96c86848a4d58950344e1d058c23bd5373 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Nov 2022 14:31:12 +0000 Subject: Bump twisted from 22.8.0 to 22.10.0 (#14340) * Bump twisted from 22.8.0 to 22.10.0 Bumps [twisted](https://github.com/twisted/twisted) from 22.8.0 to 22.10.0. - [Release notes](https://github.com/twisted/twisted/releases) - [Changelog](https://github.com/twisted/twisted/blob/trunk/NEWS.rst) - [Commits](https://github.com/twisted/twisted/compare/twisted-22.8.0...twisted-22.10.0) --- updated-dependencies: - dependency-name: twisted dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14340.misc | 1 + poetry.lock | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 12 deletions(-) create mode 100644 changelog.d/14340.misc diff --git a/changelog.d/14340.misc b/changelog.d/14340.misc new file mode 100644 index 0000000000..4e01bf777c --- /dev/null +++ b/changelog.d/14340.misc @@ -0,0 +1 @@ +Bump twisted from 22.8.0 to 22.10.0. diff --git a/poetry.lock b/poetry.lock index ebabd3b833..b945463299 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1313,7 +1313,7 @@ urllib3 = ">=1.26.0" [[package]] name = "twisted" -version = "22.8.0" +version = "22.10.0" description = "An asynchronous networking framework written in Python" category = "main" optional = false @@ -1333,21 +1333,21 @@ typing-extensions = ">=3.6.5" "zope.interface" = ">=4.4.2" [package.extras] -all-non-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +all-non-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"] conch-nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"] contextvars = ["contextvars (>=2.4,<3)"] -dev = ["coverage (>=6b1,<7)", "pydoctor (>=22.7.0,<22.8.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)"] -dev-release = ["pydoctor (>=22.7.0,<22.8.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)"] -gtk-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pygobject", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +dev = ["coverage (>=6b1,<7)", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)"] +dev-release = ["pydoctor (>=22.9.0,<22.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)"] +gtk-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pygobject", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"] -macos-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] -mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=22.7.0,<22.8.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"] -osx-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +macos-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"] +osx-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] serial = ["pyserial (>=3.0)", "pywin32 (!=226)"] -test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)"] +test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.0,<7.0)"] tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"] -windows-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +windows-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] [[package]] name = "twisted-iocpsupport" @@ -2721,8 +2721,8 @@ twine = [ {file = "twine-4.0.1.tar.gz", hash = "sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0"}, ] twisted = [ - {file = "Twisted-22.8.0-py3-none-any.whl", hash = "sha256:8d4718d1e48dcc28933f8beb48dc71cfe77a125e37ad1eb7a3d0acc49baf6c99"}, - {file = "Twisted-22.8.0.tar.gz", hash = "sha256:e5b60de39f2d1da153fbe1874d885fe3fcbdb21fcc446fa759a53e8fc3513bed"}, + {file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"}, + {file = "Twisted-22.10.0.tar.gz", hash = "sha256:32acbd40a94f5f46e7b42c109bfae2b302250945561783a8b7a059048f2d4d31"}, ] twisted-iocpsupport = [ {file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"}, -- cgit 1.5.1 From 2bd7f3eeab1a4818359c9f585b660ff3f3d8bc6c Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Nov 2022 15:02:39 +0000 Subject: Allow PUT/GET of aliases during faster join (#14292) without blocking on full state. --- changelog.d/14292.bugfix | 1 + synapse/handlers/directory.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14292.bugfix diff --git a/changelog.d/14292.bugfix b/changelog.d/14292.bugfix new file mode 100644 index 0000000000..4ed92f5cf2 --- /dev/null +++ b/changelog.d/14292.bugfix @@ -0,0 +1 @@ +Faster joins: do not block creation of or queries for room aliases during the resync. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index d52ebada6b..2ea52257cb 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -85,7 +85,7 @@ class DirectoryHandler: # TODO(erikj): Add transactions. # TODO(erikj): Check if there is a current association. if not servers: - servers = await self._storage_controllers.state.get_current_hosts_in_room( + servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation( room_id ) @@ -290,7 +290,7 @@ class DirectoryHandler: Codes.NOT_FOUND, ) - extra_servers = await self._storage_controllers.state.get_current_hosts_in_room( + extra_servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation( room_id ) servers_set = set(extra_servers) | set(servers) -- cgit 1.5.1 From 59ca73006c84a74f1bf4934f1c109202ff82408e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 1 Nov 2022 13:26:28 -0400 Subject: Enable testing MSC3874 in complement. (#14339) --- changelog.d/14339.misc | 1 + docker/complement/conf/workers-shared-extra.yaml.j2 | 4 ++-- scripts-dev/complement.sh | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14339.misc diff --git a/changelog.d/14339.misc b/changelog.d/14339.misc new file mode 100644 index 0000000000..3761d453a8 --- /dev/null +++ b/changelog.d/14339.misc @@ -0,0 +1 @@ +Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index c651645115..883a87159c 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -92,8 +92,6 @@ allow_device_name_lookup_over_federation: true ## Experimental Features ## experimental_features: - # Enable spaces support - spaces_enabled: true # Enable history backfilling support msc2716_enabled: true # server-side support for partial state in /send_join responses @@ -104,6 +102,8 @@ experimental_features: {% endif %} # Enable jump to date endpoint msc3030_enabled: true + # Filtering /messages by relation type. + msc3874_enabled: true server_notices: system_mxid_localpart: _server diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index a7b1e1e3a8..76c859694f 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -126,7 +126,7 @@ export COMPLEMENT_BASE_IMAGE=complement-synapse extra_test_args=() -test_tags="synapse_blacklist,msc3787" +test_tags="synapse_blacklist,msc3787,msc3874" # All environment variables starting with PASS_ will be shared. # (The prefix is stripped off before reaching the container.) -- cgit 1.5.1 From d4fac8a3e27ab3e133c5e5ac603c8d937a1fd86c Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Nov 2022 19:20:35 +0000 Subject: Fix typo in #13320 which could cause log spam (#14347) --- changelog.d/14347.bugfix | 1 + synapse/federation/federation_client.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14347.bugfix diff --git a/changelog.d/14347.bugfix b/changelog.d/14347.bugfix new file mode 100644 index 0000000000..91975757ae --- /dev/null +++ b/changelog.d/14347.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index fa225182be..c4c0bc7315 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -465,7 +465,7 @@ class FederationClient(FederationBase): pdu_attempts[destination] = now logger.info( - "get_pdu(event_id=): Failed to get PDU from %s because %s", + "get_pdu(event_id=%s): Failed to get PDU from %s because %s", event_id, destination, e, -- cgit 1.5.1 From 19a57f4a3710d6c3f7cc9d031e0e59bc2ed3b052 Mon Sep 17 00:00:00 2001 From: Kat Gerasimova Date: Tue, 1 Nov 2022 19:26:15 +0000 Subject: Fix issue automation for Needs-Info (#14343) Run when an issue is labelled with X-Needs-Info only. Add to triage board. Use itemId which is output by actions/add-to-project to run the mutation to update the field value (i.e. move to the right column). --- .github/workflows/triage_labelled.yml | 54 +++++++++++++++++------------------ 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml index fbd55de17f..d1ac4357b1 100644 --- a/.github/workflows/triage_labelled.yml +++ b/.github/workflows/triage_labelled.yml @@ -11,34 +11,34 @@ jobs: if: > contains(github.event.issue.labels.*.name, 'X-Needs-Info') steps: - - uses: octokit/graphql-action@v2.x - id: add_to_project + - uses: actions/add-to-project@main + id: add_project with: - headers: '{"GraphQL-Features": "projects_next_graphql"}' - query: | - mutation { - updateProjectV2ItemFieldValue( - input: { - projectId: $projectid - itemId: $contentid - fieldId: $fieldid - value: { - singleSelectOptionId: "Todo" + project-url: "https://github.com/orgs/matrix-org/projects/67" + github-token: ${{ secrets.ELEMENT_BOT_TOKEN }} + - name: Set status + env: + GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }} + run: | + gh api graphql -f query=' + mutation( + $project: ID! + $item: ID! + $fieldid: ID! + $columnid: String! + ) { + updateProjectV2ItemFieldValue( + input: { + projectId: $project + itemId: $item + fieldId: $fieldid + value: { + singleSelectOptionId: $columnid } - } - ) { - projectV2Item { - id - } + } + ) { + projectV2Item { + id } } - - projectid: ${{ env.PROJECT_ID }} - contentid: ${{ github.event.issue.node_id }} - fieldid: ${{ env.FIELD_ID }} - optionid: ${{ env.OPTION_ID }} - env: - PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ" - GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }} - FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" - OPTION_ID: "ba22e43c" + }' -f project="PVT_kwDOAIB0Bs4AFDdZ" -f item=${{ steps.add_project.outputs.itemId }} -f fieldid="PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" -f columnid=ba22e43c --silent -- cgit 1.5.1 From 6546308c1e7d3eff316631a5909151dc6c7a9e1e Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 2 Nov 2022 17:33:45 +0000 Subject: Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. (#14353) --- CHANGES.md | 9 +++++++++ changelog.d/14353.removal | 1 + docs/upgrade.md | 16 ++++++++++++++++ docs/usage/configuration/config_documentation.md | 4 ++-- synapse/config/metrics.py | 2 +- 5 files changed, 29 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14353.removal diff --git a/CHANGES.md b/CHANGES.md index 113ad0d1ee..6bafdd3fad 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse (Next) (2022-11-01) +========================= + +Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default. +They will be removed altogether in Synapse 1.73.0. +If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names. +See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details. + + Synapse 1.71.0rc1 (2022-11-01) ============================== diff --git a/changelog.d/14353.removal b/changelog.d/14353.removal new file mode 100644 index 0000000000..fc42aa9106 --- /dev/null +++ b/changelog.d/14353.removal @@ -0,0 +1 @@ +Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. \ No newline at end of file diff --git a/docs/upgrade.md b/docs/upgrade.md index f095bbc3a6..41b06cc253 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -116,6 +116,22 @@ local users and some remote users is why the spec was changed/clarified and this caveat is no longer supported. +## Legacy Prometheus metric names are now disabled by default + +Synapse v1.71.0 disables legacy Prometheus metric names by default. +For administrators that still rely on them and have not yet had chance to update their +uses of the metrics, it's still possible to specify `enable_legacy_metrics: true` in +the configuration to re-enable them temporarily. + +Synapse v1.73.0 will **remove legacy metric names altogether** and at that point, +it will no longer be possible to re-enable them. + +If you do not use metrics or you have already updated your Grafana dashboard(s), +Prometheus console(s) and alerting rule(s), there is no action needed. + +See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names). + + # Upgrading to v1.69.0 ## Changes to the receipts replication streams diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 44358faf59..9a6bd08d01 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2441,8 +2441,8 @@ enable_metrics: true Set to `true` to publish both legacy and non-legacy Prometheus metric names, or to `false` to only publish non-legacy Prometheus metric names. -Defaults to `true`. Has no effect if `enable_metrics` is `false`. -**In Synapse v1.71.0, this will default to `false` before being removed in Synapse v1.73.0.** +Defaults to `false`. Has no effect if `enable_metrics` is `false`. +**In Synapse v1.67.0 up to and including Synapse v1.70.1, this defaulted to `true`.** Legacy metric names include: - metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules; diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index bb065f9f2f..6034a0346e 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -43,7 +43,7 @@ class MetricsConfig(Config): def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.enable_metrics = config.get("enable_metrics", False) - self.enable_legacy_metrics = config.get("enable_legacy_metrics", True) + self.enable_legacy_metrics = config.get("enable_legacy_metrics", False) self.report_stats = config.get("report_stats", None) self.report_stats_endpoint = config.get( -- cgit 1.5.1 From e5cd278f3f6e9fa3008addc064841e6448f18bbd Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Nov 2022 23:19:57 +0000 Subject: Use maintained action to install Rust in latest deps/twisted trunk jobs (#14351) * Use maintained action to install Rust Part of #14203. Like the changes in #14313. * Changelog --- .github/workflows/latest_deps.yml | 15 ++++++--------- .github/workflows/twisted_trunk.yml | 15 ++++++--------- changelog.d/14351.misc | 1 + 3 files changed, 13 insertions(+), 18 deletions(-) create mode 100644 changelog.d/14351.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index b1e45ee648..c6f481cdaa 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -27,10 +27,9 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: - toolchain: stable - override: true + toolchain: stable - uses: Swatinem/rust-cache@v2 # The dev dependencies aren't exposed in the wheel metadata (at least with current @@ -62,10 +61,9 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: - toolchain: stable - override: true + toolchain: stable - uses: Swatinem/rust-cache@v2 - run: sudo apt-get -qq install xmlsec1 @@ -136,10 +134,9 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: - toolchain: stable - override: true + toolchain: stable - uses: Swatinem/rust-cache@v2 - name: Ensure sytest runs `pip install` diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index f592e632c1..6a047193f6 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -18,10 +18,9 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: - toolchain: stable - override: true + toolchain: stable - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 @@ -44,10 +43,9 @@ jobs: - run: sudo apt-get -qq install xmlsec1 - name: Install Rust - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: - toolchain: stable - override: true + toolchain: stable - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 @@ -84,10 +82,9 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb with: - toolchain: stable - override: true + toolchain: stable - uses: Swatinem/rust-cache@v2 - name: Patch dependencies diff --git a/changelog.d/14351.misc b/changelog.d/14351.misc new file mode 100644 index 0000000000..4a3e053c08 --- /dev/null +++ b/changelog.d/14351.misc @@ -0,0 +1 @@ +Use a maintained Github action to install Rust. -- cgit 1.5.1 From 86c5a710d8b4212f8a8a668d7d4a79c0bb371508 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 3 Nov 2022 16:21:31 +0000 Subject: Implement MSC3912: Relation-based redactions (#14260) Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/14260.feature | 1 + synapse/api/constants.py | 2 + synapse/config/experimental.py | 3 + synapse/handlers/message.py | 47 ++++- synapse/handlers/relations.py | 56 +++++- synapse/rest/client/room.py | 57 ++++-- synapse/rest/client/versions.py | 2 + synapse/storage/databases/main/relations.py | 36 ++++ tests/rest/client/test_redactions.py | 273 +++++++++++++++++++++++++++- tests/rest/client/utils.py | 37 ++++ 10 files changed, 486 insertions(+), 28 deletions(-) create mode 100644 changelog.d/14260.feature diff --git a/changelog.d/14260.feature b/changelog.d/14260.feature new file mode 100644 index 0000000000..102dc7b3e0 --- /dev/null +++ b/changelog.d/14260.feature @@ -0,0 +1 @@ +Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 44c5ffc6a5..bc04a0755b 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -125,6 +125,8 @@ class EventTypes: MSC2716_BATCH: Final = "org.matrix.msc2716.batch" MSC2716_MARKER: Final = "org.matrix.msc2716.marker" + Reaction: Final = "m.reaction" + class ToDeviceEventTypes: RoomKeyRequest: Final = "m.room_key_request" diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index d9bdd66d55..d4b71d1673 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -128,3 +128,6 @@ class ExperimentalConfig(Config): self.msc3886_endpoint: Optional[str] = experimental.get( "msc3886_endpoint", None ) + + # MSC3912: Relation-based redactions. + self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 468900a07f..4cf593cfdc 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -877,6 +877,36 @@ class EventCreationHandler: return prev_event return None + async def get_event_from_transaction( + self, + requester: Requester, + txn_id: str, + room_id: str, + ) -> Optional[EventBase]: + """For the given transaction ID and room ID, check if there is a matching event. + If so, fetch it and return it. + + Args: + requester: The requester making the request in the context of which we want + to fetch the event. + txn_id: The transaction ID. + room_id: The room ID. + + Returns: + An event if one could be found, None otherwise. + """ + if requester.access_token_id: + existing_event_id = await self.store.get_event_id_from_transaction_id( + room_id, + requester.user.to_string(), + requester.access_token_id, + txn_id, + ) + if existing_event_id: + return await self.store.get_event(existing_event_id) + + return None + async def create_and_send_nonmember_event( self, requester: Requester, @@ -956,18 +986,17 @@ class EventCreationHandler: # extremities to pile up, which in turn leads to state resolution # taking longer. async with self.limiter.queue(event_dict["room_id"]): - if txn_id and requester.access_token_id: - existing_event_id = await self.store.get_event_id_from_transaction_id( - event_dict["room_id"], - requester.user.to_string(), - requester.access_token_id, - txn_id, + if txn_id: + event = await self.get_event_from_transaction( + requester, txn_id, event_dict["room_id"] ) - if existing_event_id: - event = await self.store.get_event(existing_event_id) + if event: # we know it was persisted, so must have a stream ordering assert event.internal_metadata.stream_ordering - return event, event.internal_metadata.stream_ordering + return ( + event, + event.internal_metadata.stream_ordering, + ) event, context = await self.create_event( requester, diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 0a0c6d938e..8e71dda970 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -17,7 +17,7 @@ from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Tup import attr -from synapse.api.constants import RelationTypes +from synapse.api.constants import EventTypes, RelationTypes from synapse.api.errors import SynapseError from synapse.events import EventBase, relation_from_event from synapse.logging.opentracing import trace @@ -75,6 +75,7 @@ class RelationsHandler: self._clock = hs.get_clock() self._event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() + self._event_creation_handler = hs.get_event_creation_handler() async def get_relations( self, @@ -205,6 +206,59 @@ class RelationsHandler: return related_events, next_token + async def redact_events_related_to( + self, + requester: Requester, + event_id: str, + initial_redaction_event: EventBase, + relation_types: List[str], + ) -> None: + """Redacts all events related to the given event ID with one of the given + relation types. + + This method is expected to be called when redacting the event referred to by + the given event ID. + + If an event cannot be redacted (e.g. because of insufficient permissions), log + the error and try to redact the next one. + + Args: + requester: The requester to redact events on behalf of. + event_id: The event IDs to look and redact relations of. + initial_redaction_event: The redaction for the event referred to by + event_id. + relation_types: The types of relations to look for. + + Raises: + ShadowBanError if the requester is shadow-banned + """ + related_event_ids = ( + await self._main_store.get_all_relations_for_event_with_types( + event_id, relation_types + ) + ) + + for related_event_id in related_event_ids: + try: + await self._event_creation_handler.create_and_send_nonmember_event( + requester, + { + "type": EventTypes.Redaction, + "content": initial_redaction_event.content, + "room_id": initial_redaction_event.room_id, + "sender": requester.user.to_string(), + "redacts": related_event_id, + }, + ratelimit=False, + ) + except SynapseError as e: + logger.warning( + "Failed to redact event %s (related to event %s): %s", + related_event_id, + event_id, + e.msg, + ) + async def get_annotations_for_event( self, event_id: str, diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 01e5079963..91cb791139 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -52,6 +52,7 @@ from synapse.http.servlet import ( from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import set_tag +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.client._base import client_patterns from synapse.rest.client.transactions import HttpTransactionCache from synapse.storage.state import StateFilter @@ -1029,6 +1030,8 @@ class RoomRedactEventRestServlet(TransactionRestServlet): super().__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() + self._relation_handler = hs.get_relations_handler() + self._msc3912_enabled = hs.config.experimental.msc3912_enabled def register(self, http_server: HttpServer) -> None: PATTERNS = "/rooms/(?P[^/]*)/redact/(?P[^/]*)" @@ -1045,20 +1048,46 @@ class RoomRedactEventRestServlet(TransactionRestServlet): content = parse_json_object_from_request(request) try: - ( - event, - _, - ) = await self.event_creation_handler.create_and_send_nonmember_event( - requester, - { - "type": EventTypes.Redaction, - "content": content, - "room_id": room_id, - "sender": requester.user.to_string(), - "redacts": event_id, - }, - txn_id=txn_id, - ) + with_relations = None + if self._msc3912_enabled and "org.matrix.msc3912.with_relations" in content: + with_relations = content["org.matrix.msc3912.with_relations"] + del content["org.matrix.msc3912.with_relations"] + + # Check if there's an existing event for this transaction now (even though + # create_and_send_nonmember_event also does it) because, if there's one, + # then we want to skip the call to redact_events_related_to. + event = None + if txn_id: + event = await self.event_creation_handler.get_event_from_transaction( + requester, txn_id, room_id + ) + + if event is None: + ( + event, + _, + ) = await self.event_creation_handler.create_and_send_nonmember_event( + requester, + { + "type": EventTypes.Redaction, + "content": content, + "room_id": room_id, + "sender": requester.user.to_string(), + "redacts": event_id, + }, + txn_id=txn_id, + ) + + if with_relations: + run_as_background_process( + "redact_related_events", + self._relation_handler.redact_events_related_to, + requester=requester, + event_id=event_id, + initial_redaction_event=event, + relation_types=with_relations, + ) + event_id = event.event_id except ShadowBanError: event_id = "$" + random_string(43) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 9b1b72c68a..180a11ef88 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -119,6 +119,8 @@ class VersionsRestServlet(RestServlet): # Adds support for simple HTTP rendezvous as per MSC3886 "org.matrix.msc3886": self.config.experimental.msc3886_endpoint is not None, + # Adds support for relation-based redactions as per MSC3912. + "org.matrix.msc3912": self.config.experimental.msc3912_enabled, }, }, ) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index c022510e76..ca431002c8 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -295,6 +295,42 @@ class RelationsWorkerStore(SQLBaseStore): "get_recent_references_for_event", _get_recent_references_for_event_txn ) + async def get_all_relations_for_event_with_types( + self, + event_id: str, + relation_types: List[str], + ) -> List[str]: + """Get the event IDs of all events that have a relation to the given event with + one of the given relation types. + + Args: + event_id: The event for which to look for related events. + relation_types: The types of relations to look for. + + Returns: + A list of the IDs of the events that relate to the given event with one of + the given relation types. + """ + + def get_all_relation_ids_for_event_with_types_txn( + txn: LoggingTransaction, + ) -> List[str]: + rows = self.db_pool.simple_select_many_txn( + txn=txn, + table="event_relations", + column="relation_type", + iterable=relation_types, + keyvalues={"relates_to_id": event_id}, + retcols=["event_id"], + ) + + return [row["event_id"] for row in rows] + + return await self.db_pool.runInteraction( + desc="get_all_relation_ids_for_event_with_types", + func=get_all_relation_ids_for_event_with_types_txn, + ) + async def event_includes_relation(self, event_id: str) -> bool: """Check if the given event relates to another event. diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index be4c67d68e..5dfe44defb 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -11,17 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from typing import List, Optional from twisted.test.proto_helpers import MemoryReactor +from synapse.api.constants import EventTypes, RelationTypes from synapse.rest import admin from synapse.rest.client import login, room, sync from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, override_config class RedactionsTestCase(HomeserverTestCase): @@ -67,7 +68,12 @@ class RedactionsTestCase(HomeserverTestCase): ) def _redact_event( - self, access_token: str, room_id: str, event_id: str, expect_code: int = 200 + self, + access_token: str, + room_id: str, + event_id: str, + expect_code: int = 200, + with_relations: Optional[List[str]] = None, ) -> JsonDict: """Helper function to send a redaction event. @@ -75,7 +81,13 @@ class RedactionsTestCase(HomeserverTestCase): """ path = "/_matrix/client/r0/rooms/%s/redact/%s" % (room_id, event_id) - channel = self.make_request("POST", path, content={}, access_token=access_token) + request_content = {} + if with_relations: + request_content["org.matrix.msc3912.with_relations"] = with_relations + + channel = self.make_request( + "POST", path, request_content, access_token=access_token + ) self.assertEqual(channel.code, expect_code) return channel.json_body @@ -201,3 +213,256 @@ class RedactionsTestCase(HomeserverTestCase): # These should all succeed, even though this would be denied by # the standard message ratelimiter self._redact_event(self.mod_access_token, self.room_id, msg_id) + + @override_config({"experimental_features": {"msc3912_enabled": True}}) + def test_redact_relations(self) -> None: + """Tests that we can redact the relations of an event at the same time as the + event itself. + """ + # Send a root event. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={"msgtype": "m.text", "body": "hello"}, + tok=self.mod_access_token, + ) + root_event_id = res["event_id"] + + # Send an edit to this root event. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={ + "body": " * hello world", + "m.new_content": { + "body": "hello world", + "msgtype": "m.text", + }, + "m.relates_to": { + "event_id": root_event_id, + "rel_type": RelationTypes.REPLACE, + }, + "msgtype": "m.text", + }, + tok=self.mod_access_token, + ) + edit_event_id = res["event_id"] + + # Also send a threaded message whose root is the same as the edit's. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={ + "msgtype": "m.text", + "body": "message 1", + "m.relates_to": { + "event_id": root_event_id, + "rel_type": RelationTypes.THREAD, + }, + }, + tok=self.mod_access_token, + ) + threaded_event_id = res["event_id"] + + # Also send a reaction, again with the same root. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Reaction, + content={ + "m.relates_to": { + "rel_type": RelationTypes.ANNOTATION, + "event_id": root_event_id, + "key": "👍", + } + }, + tok=self.mod_access_token, + ) + reaction_event_id = res["event_id"] + + # Redact the root event, specifying that we also want to delete events that + # relate to it with m.replace. + self._redact_event( + self.mod_access_token, + self.room_id, + root_event_id, + with_relations=[ + RelationTypes.REPLACE, + RelationTypes.THREAD, + ], + ) + + # Check that the root event got redacted. + event_dict = self.helper.get_event( + self.room_id, root_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + + # Check that the edit got redacted. + event_dict = self.helper.get_event( + self.room_id, edit_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + + # Check that the threaded message got redacted. + event_dict = self.helper.get_event( + self.room_id, threaded_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + + # Check that the reaction did not get redacted. + event_dict = self.helper.get_event( + self.room_id, reaction_event_id, self.mod_access_token + ) + self.assertNotIn("redacted_because", event_dict, event_dict) + + @override_config({"experimental_features": {"msc3912_enabled": True}}) + def test_redact_relations_no_perms(self) -> None: + """Tests that, when redacting a message along with its relations, if not all + the related messages can be redacted because of insufficient permissions, the + server still redacts all the ones that can be. + """ + # Send a root event. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={ + "msgtype": "m.text", + "body": "root", + }, + tok=self.other_access_token, + ) + root_event_id = res["event_id"] + + # Send a first threaded message, this one from the moderator. We do this for the + # first message with the m.thread relation (and not the last one) to ensure + # that, when the server fails to redact it, it doesn't stop there, and it + # instead goes on to redact the other one. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={ + "msgtype": "m.text", + "body": "message 1", + "m.relates_to": { + "event_id": root_event_id, + "rel_type": RelationTypes.THREAD, + }, + }, + tok=self.mod_access_token, + ) + first_threaded_event_id = res["event_id"] + + # Send a second threaded message, this time from the user who'll perform the + # redaction. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={ + "msgtype": "m.text", + "body": "message 2", + "m.relates_to": { + "event_id": root_event_id, + "rel_type": RelationTypes.THREAD, + }, + }, + tok=self.other_access_token, + ) + second_threaded_event_id = res["event_id"] + + # Redact the thread's root, and request that all threaded messages are also + # redacted. Send that request from the non-mod user, so that the first threaded + # event cannot be redacted. + self._redact_event( + self.other_access_token, + self.room_id, + root_event_id, + with_relations=[RelationTypes.THREAD], + ) + + # Check that the thread root got redacted. + event_dict = self.helper.get_event( + self.room_id, root_event_id, self.other_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + + # Check that the last message in the thread got redacted, despite failing to + # redact the one before it. + event_dict = self.helper.get_event( + self.room_id, second_threaded_event_id, self.other_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + + # Check that the message that was sent into the tread by the mod user is not + # redacted. + event_dict = self.helper.get_event( + self.room_id, first_threaded_event_id, self.other_access_token + ) + self.assertIn("body", event_dict["content"], event_dict) + self.assertEqual("message 1", event_dict["content"]["body"]) + + @override_config({"experimental_features": {"msc3912_enabled": True}}) + def test_redact_relations_txn_id_reuse(self) -> None: + """Tests that redacting a message using a transaction ID, then reusing the same + transaction ID but providing an additional list of relations to redact, is + effectively a no-op. + """ + # Send a root event. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={ + "msgtype": "m.text", + "body": "root", + }, + tok=self.mod_access_token, + ) + root_event_id = res["event_id"] + + # Send a first threaded message. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={ + "msgtype": "m.text", + "body": "I'm in a thread!", + "m.relates_to": { + "event_id": root_event_id, + "rel_type": RelationTypes.THREAD, + }, + }, + tok=self.mod_access_token, + ) + threaded_event_id = res["event_id"] + + # Send a first redaction request which redacts only the root event. + channel = self.make_request( + method="PUT", + path=f"/rooms/{self.room_id}/redact/{root_event_id}/foo", + content={}, + access_token=self.mod_access_token, + ) + self.assertEqual(channel.code, 200) + + # Send a second redaction request which redacts the root event as well as + # threaded messages. + channel = self.make_request( + method="PUT", + path=f"/rooms/{self.room_id}/redact/{root_event_id}/foo", + content={"org.matrix.msc3912.with_relations": [RelationTypes.THREAD]}, + access_token=self.mod_access_token, + ) + self.assertEqual(channel.code, 200) + + # Check that the root event got redacted. + event_dict = self.helper.get_event( + self.room_id, root_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict) + + # Check that the threaded message didn't get redacted (since that wasn't part of + # the original redaction). + event_dict = self.helper.get_event( + self.room_id, threaded_event_id, self.mod_access_token + ) + self.assertIn("body", event_dict["content"], event_dict) + self.assertEqual("I'm in a thread!", event_dict["content"]["body"]) diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 706399fae5..8d6f2b6ff9 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -410,6 +410,43 @@ class RestHelper: return channel.json_body + def get_event( + self, + room_id: str, + event_id: str, + tok: Optional[str] = None, + expect_code: int = HTTPStatus.OK, + ) -> JsonDict: + """Request a specific event from the server. + + Args: + room_id: the room in which the event was sent. + event_id: the event's ID. + tok: the token to request the event with. + expect_code: the expected HTTP status for the response. + + Returns: + The event as a dict. + """ + path = f"/_matrix/client/v3/rooms/{room_id}/event/{event_id}" + if tok: + path = path + f"?access_token={tok}" + + channel = make_request( + self.hs.get_reactor(), + self.site, + "GET", + path, + ) + + assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % ( + expect_code, + channel.code, + channel.result["body"], + ) + + return channel.json_body + def _read_write_state( self, room_id: str, -- cgit 1.5.1 From b1379a7ca8ecb82920e8ab2096e42dbd63ec5474 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Thu, 3 Nov 2022 20:47:20 +0000 Subject: Update legacy `synapse_admin_mau:` metric names in docs (#14358) * Rename legacy metrics in MAU docs * changelog --- changelog.d/14358.doc | 1 + docs/usage/administration/monthly_active_users.md | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14358.doc diff --git a/changelog.d/14358.doc b/changelog.d/14358.doc new file mode 100644 index 0000000000..a9b910878d --- /dev/null +++ b/changelog.d/14358.doc @@ -0,0 +1 @@ +Replace references to legacy metrics `synapse_admin_mau` with new metric names. diff --git a/docs/usage/administration/monthly_active_users.md b/docs/usage/administration/monthly_active_users.md index d4e9037284..b1da6f17c2 100644 --- a/docs/usage/administration/monthly_active_users.md +++ b/docs/usage/administration/monthly_active_users.md @@ -73,12 +73,12 @@ When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMI Synapse records several different prometheus metrics for MAU. -`synapse_admin_mau:current` records the current MAU figure for native (non-application-service) users. +`synapse_admin_mau_current` records the current MAU figure for native (non-application-service) users. -`synapse_admin_mau:max` records the maximum MAU as dictated by the `max_mau_value` config value. +`synapse_admin_mau_max` records the maximum MAU as dictated by the `max_mau_value` config value. `synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` . -`synapse_admin_mau:registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have +`synapse_admin_mau_registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have registered accounts on the homeserver. -- cgit 1.5.1 From 2e2cffe1a27c2d819ab7eec188a4a7c4f64dabcc Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Nov 2022 13:07:54 +0000 Subject: Cherry-pick "Run trial tests against Python 3.11 (#13812)" and fixup commit 4f5d492cd6a9438de03d1b768f4c220cb662ac06 The release branch CI is failing because poetry seems unable to install wrapt 1.13.3 when run under CPython 3.11. Develop has already bumped wrapt for 3.11 compatibility. Cherry-pick that commit here to try and get CI going again. --- .ci/scripts/calculate_jobs.py | 4 +- .github/workflows/tests.yml | 2 +- changelog.d/13812.misc | 1 + poetry.lock | 222 ++++++++++++++++++------------ tests/federation/transport/test_client.py | 11 ++ 5 files changed, 146 insertions(+), 94 deletions(-) create mode 100644 changelog.d/13812.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index c2198e0dd4..c53d4d5ff1 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -46,7 +46,7 @@ if not IS_PR: "database": "sqlite", "extras": "all", } - for version in ("3.8", "3.9", "3.10") + for version in ("3.8", "3.9", "3.10", "3.11") ) @@ -62,7 +62,7 @@ trial_postgres_tests = [ if not IS_PR: trial_postgres_tests.append( { - "python-version": "3.10", + "python-version": "3.11", "database": "postgres", "postgres-version": "14", "extras": "all", diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index ff5cf0c534..27fef6b5bd 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -393,7 +393,7 @@ jobs: - python-version: "3.7" postgres-version: "10" - - python-version: "3.10" + - python-version: "3.11" postgres-version: "14" services: diff --git a/changelog.d/13812.misc b/changelog.d/13812.misc new file mode 100644 index 0000000000..667fdee2b7 --- /dev/null +++ b/changelog.d/13812.misc @@ -0,0 +1 @@ +Run unit tests against Python 3.11. diff --git a/poetry.lock b/poetry.lock index c01cfcfa58..ebabd3b833 100644 --- a/poetry.lock +++ b/poetry.lock @@ -25,7 +25,7 @@ cryptography = ">=3.2" [[package]] name = "automat" -version = "20.2.0" +version = "22.10.0" description = "Self-service finite-state machines for the programmer on the go." category = "main" optional = false @@ -671,12 +671,16 @@ python-versions = "*" [[package]] name = "pillow" -version = "9.0.1" +version = "9.2.0" description = "Python Imaging Library (Fork)" category = "main" optional = false python-versions = ">=3.7" +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] + [[package]] name = "pkginfo" version = "1.8.2" @@ -1534,7 +1538,7 @@ python-versions = "*" [[package]] name = "wrapt" -version = "1.13.3" +version = "1.14.1" description = "Module for decorators, wrappers and monkey patching." category = "dev" optional = false @@ -1646,8 +1650,8 @@ Authlib = [ {file = "Authlib-1.1.0.tar.gz", hash = "sha256:0a270c91409fc2b7b0fbee6996e09f2ee3187358762111a9a4225c874b94e891"}, ] automat = [ - {file = "Automat-20.2.0-py2.py3-none-any.whl", hash = "sha256:b6feb6455337df834f6c9962d6ccf771515b7d939bca142b29c20c2376bc6111"}, - {file = "Automat-20.2.0.tar.gz", hash = "sha256:7979803c74610e11ef0c0d68a2942b152df52da55336e0c9d58daf1831cbdf33"}, + {file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"}, + {file = "Automat-22.10.0.tar.gz", hash = "sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e"}, ] bcrypt = [ {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"}, @@ -2251,41 +2255,64 @@ phonenumbers = [ {file = "phonenumbers-8.12.56.tar.gz", hash = "sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868"}, ] pillow = [ - {file = "Pillow-9.0.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a5d24e1d674dd9d72c66ad3ea9131322819ff86250b30dc5821cbafcfa0b96b4"}, - {file = "Pillow-9.0.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2632d0f846b7c7600edf53c48f8f9f1e13e62f66a6dbc15191029d950bfed976"}, - {file = "Pillow-9.0.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9618823bd237c0d2575283f2939655f54d51b4527ec3972907a927acbcc5bfc"}, - {file = "Pillow-9.0.1-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd"}, - {file = "Pillow-9.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f"}, - {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a"}, - {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049"}, - {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a"}, - {file = "Pillow-9.0.1-cp310-cp310-win32.whl", hash = "sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e"}, - {file = "Pillow-9.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b"}, - {file = "Pillow-9.0.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e"}, - {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360"}, - {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b"}, - {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030"}, - {file = "Pillow-9.0.1-cp37-cp37m-win32.whl", hash = "sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669"}, - {file = "Pillow-9.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092"}, - {file = "Pillow-9.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204"}, - {file = "Pillow-9.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e"}, - {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c"}, - {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5"}, - {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae"}, - {file = "Pillow-9.0.1-cp38-cp38-win32.whl", hash = "sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c"}, - {file = "Pillow-9.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00"}, - {file = "Pillow-9.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838"}, - {file = "Pillow-9.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28"}, - {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c"}, - {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b"}, - {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7"}, - {file = "Pillow-9.0.1-cp39-cp39-win32.whl", hash = "sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7"}, - {file = "Pillow-9.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac"}, - {file = "Pillow-9.0.1-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97"}, - {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56"}, - {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e"}, - {file = "Pillow-9.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70"}, - {file = "Pillow-9.0.1.tar.gz", hash = "sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa"}, + {file = "Pillow-9.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:a9c9bc489f8ab30906d7a85afac4b4944a572a7432e00698a7239f44a44e6efb"}, + {file = "Pillow-9.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:510cef4a3f401c246cfd8227b300828715dd055463cdca6176c2e4036df8bd4f"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7888310f6214f19ab2b6df90f3f06afa3df7ef7355fc025e78a3044737fab1f5"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831e648102c82f152e14c1a0938689dbb22480c548c8d4b8b248b3e50967b88c"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cc1d2451e8a3b4bfdb9caf745b58e6c7a77d2e469159b0d527a4554d73694d1"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:136659638f61a251e8ed3b331fc6ccd124590eeff539de57c5f80ef3a9594e58"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6e8c66f70fb539301e064f6478d7453e820d8a2c631da948a23384865cd95544"}, + {file = "Pillow-9.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:37ff6b522a26d0538b753f0b4e8e164fdada12db6c6f00f62145d732d8a3152e"}, + {file = "Pillow-9.2.0-cp310-cp310-win32.whl", hash = "sha256:c79698d4cd9318d9481d89a77e2d3fcaeff5486be641e60a4b49f3d2ecca4e28"}, + {file = "Pillow-9.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:254164c57bab4b459f14c64e93df11eff5ded575192c294a0c49270f22c5d93d"}, + {file = "Pillow-9.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:adabc0bce035467fb537ef3e5e74f2847c8af217ee0be0455d4fec8adc0462fc"}, + {file = "Pillow-9.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:336b9036127eab855beec9662ac3ea13a4544a523ae273cbf108b228ecac8437"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50dff9cc21826d2977ef2d2a205504034e3a4563ca6f5db739b0d1026658e004"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb6259196a589123d755380b65127ddc60f4c64b21fc3bb46ce3a6ea663659b0"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b0554af24df2bf96618dac71ddada02420f946be943b181108cac55a7a2dcd4"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:15928f824870535c85dbf949c09d6ae7d3d6ac2d6efec80f3227f73eefba741c"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:bdd0de2d64688ecae88dd8935012c4a72681e5df632af903a1dca8c5e7aa871a"}, + {file = "Pillow-9.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5b87da55a08acb586bad5c3aa3b86505f559b84f39035b233d5bf844b0834b1"}, + {file = "Pillow-9.2.0-cp311-cp311-win32.whl", hash = "sha256:b6d5e92df2b77665e07ddb2e4dbd6d644b78e4c0d2e9272a852627cdba0d75cf"}, + {file = "Pillow-9.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6bf088c1ce160f50ea40764f825ec9b72ed9da25346216b91361eef8ad1b8f8c"}, + {file = "Pillow-9.2.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:2c58b24e3a63efd22554c676d81b0e57f80e0a7d3a5874a7e14ce90ec40d3069"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eef7592281f7c174d3d6cbfbb7ee5984a671fcd77e3fc78e973d492e9bf0eb3f"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dcd7b9c7139dc8258d164b55696ecd16c04607f1cc33ba7af86613881ffe4ac8"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a138441e95562b3c078746a22f8fca8ff1c22c014f856278bdbdd89ca36cff1b"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:93689632949aff41199090eff5474f3990b6823404e45d66a5d44304e9cdc467"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:f3fac744f9b540148fa7715a435d2283b71f68bfb6d4aae24482a890aed18b59"}, + {file = "Pillow-9.2.0-cp37-cp37m-win32.whl", hash = "sha256:fa768eff5f9f958270b081bb33581b4b569faabf8774726b283edb06617101dc"}, + {file = "Pillow-9.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:69bd1a15d7ba3694631e00df8de65a8cb031911ca11f44929c97fe05eb9b6c1d"}, + {file = "Pillow-9.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:030e3460861488e249731c3e7ab59b07c7853838ff3b8e16aac9561bb345da14"}, + {file = "Pillow-9.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:74a04183e6e64930b667d321524e3c5361094bb4af9083db5c301db64cd341f3"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d33a11f601213dcd5718109c09a52c2a1c893e7461f0be2d6febc2879ec2402"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fd6f5e3c0e4697fa7eb45b6e93996299f3feee73a3175fa451f49a74d092b9f"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a647c0d4478b995c5e54615a2e5360ccedd2f85e70ab57fbe817ca613d5e63b8"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:4134d3f1ba5f15027ff5c04296f13328fecd46921424084516bdb1b2548e66ff"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:bc431b065722a5ad1dfb4df354fb9333b7a582a5ee39a90e6ffff688d72f27a1"}, + {file = "Pillow-9.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1536ad017a9f789430fb6b8be8bf99d2f214c76502becc196c6f2d9a75b01b76"}, + {file = "Pillow-9.2.0-cp38-cp38-win32.whl", hash = "sha256:2ad0d4df0f5ef2247e27fc790d5c9b5a0af8ade9ba340db4a73bb1a4a3e5fb4f"}, + {file = "Pillow-9.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:ec52c351b35ca269cb1f8069d610fc45c5bd38c3e91f9ab4cbbf0aebc136d9c8"}, + {file = "Pillow-9.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ed2c4ef2451de908c90436d6e8092e13a43992f1860275b4d8082667fbb2ffc"}, + {file = "Pillow-9.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ad2f835e0ad81d1689f1b7e3fbac7b01bb8777d5a985c8962bedee0cc6d43da"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea98f633d45f7e815db648fd7ff0f19e328302ac36427343e4432c84432e7ff4"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7761afe0126d046974a01e030ae7529ed0ca6a196de3ec6937c11df0df1bc91c"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a54614049a18a2d6fe156e68e188da02a046a4a93cf24f373bffd977e943421"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:5aed7dde98403cd91d86a1115c78d8145c83078e864c1de1064f52e6feb61b20"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:13b725463f32df1bfeacbf3dd197fb358ae8ebcd8c5548faa75126ea425ccb60"}, + {file = "Pillow-9.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:808add66ea764ed97d44dda1ac4f2cfec4c1867d9efb16a33d158be79f32b8a4"}, + {file = "Pillow-9.2.0-cp39-cp39-win32.whl", hash = "sha256:337a74fd2f291c607d220c793a8135273c4c2ab001b03e601c36766005f36885"}, + {file = "Pillow-9.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:fac2d65901fb0fdf20363fbd345c01958a742f2dc62a8dd4495af66e3ff502a4"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ad2277b185ebce47a63f4dc6302e30f05762b688f8dc3de55dbae4651872cdf3"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c7b502bc34f6e32ba022b4a209638f9e097d7a9098104ae420eb8186217ebbb"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d1f14f5f691f55e1b47f824ca4fdcb4b19b4323fe43cc7bb105988cad7496be"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:dfe4c1fedfde4e2fbc009d5ad420647f7730d719786388b7de0999bf32c0d9fd"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:f07f1f00e22b231dd3d9b9208692042e29792d6bd4f6639415d2f23158a80013"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1802f34298f5ba11d55e5bb09c31997dc0c6aed919658dfdf0198a2fe75d5490"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17d4cafe22f050b46d983b71c707162d63d796a1235cdf8b9d7a112e97b15bac"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:96b5e6874431df16aee0c1ba237574cb6dff1dcb173798faa6a9d8b399a05d0e"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:0030fdbd926fb85844b8b92e2f9449ba89607231d3dd597a21ae72dc7fe26927"}, + {file = "Pillow-9.2.0.tar.gz", hash = "sha256:75e636fd3e0fb872693f23ccb8a5ff2cd578801251f3a4f6854c6a5d437d3c04"}, ] pkginfo = [ {file = "pkginfo-1.8.2-py2.py3-none-any.whl", hash = "sha256:c24c487c6a7f72c66e816ab1796b96ac6c3d14d49338293d2141664330b55ffc"}, @@ -2814,57 +2841,70 @@ webencodings = [ {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, ] wrapt = [ - {file = "wrapt-1.13.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:e05e60ff3b2b0342153be4d1b597bbcfd8330890056b9619f4ad6b8d5c96a81a"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:85148f4225287b6a0665eef08a178c15097366d46b210574a658c1ff5b377489"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:2dded5496e8f1592ec27079b28b6ad2a1ef0b9296d270f77b8e4a3a796cf6909"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:e94b7d9deaa4cc7bac9198a58a7240aaf87fe56c6277ee25fa5b3aa1edebd229"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:498e6217523111d07cd67e87a791f5e9ee769f9241fcf8a379696e25806965af"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:ec7e20258ecc5174029a0f391e1b948bf2906cd64c198a9b8b281b811cbc04de"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:87883690cae293541e08ba2da22cacaae0a092e0ed56bbba8d018cc486fbafbb"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f99c0489258086308aad4ae57da9e8ecf9e1f3f30fa35d5e170b4d4896554d80"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6a03d9917aee887690aa3f1747ce634e610f6db6f6b332b35c2dd89412912bca"}, - {file = "wrapt-1.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:936503cb0a6ed28dbfa87e8fcd0a56458822144e9d11a49ccee6d9a8adb2ac44"}, - {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f9c51d9af9abb899bd34ace878fbec8bf357b3194a10c4e8e0a25512826ef056"}, - {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:220a869982ea9023e163ba915077816ca439489de6d2c09089b219f4e11b6785"}, - {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0877fe981fd76b183711d767500e6b3111378ed2043c145e21816ee589d91096"}, - {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:43e69ffe47e3609a6aec0fe723001c60c65305784d964f5007d5b4fb1bc6bf33"}, - {file = "wrapt-1.13.3-cp310-cp310-win32.whl", hash = "sha256:78dea98c81915bbf510eb6a3c9c24915e4660302937b9ae05a0947164248020f"}, - {file = "wrapt-1.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:ea3e746e29d4000cd98d572f3ee2a6050a4f784bb536f4ac1f035987fc1ed83e"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:8c73c1a2ec7c98d7eaded149f6d225a692caa1bd7b2401a14125446e9e90410d"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:086218a72ec7d986a3eddb7707c8c4526d677c7b35e355875a0fe2918b059179"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:e92d0d4fa68ea0c02d39f1e2f9cb5bc4b4a71e8c442207433d8db47ee79d7aa3"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:d4a5f6146cfa5c7ba0134249665acd322a70d1ea61732723c7d3e8cc0fa80755"}, - {file = "wrapt-1.13.3-cp35-cp35m-win32.whl", hash = "sha256:8aab36778fa9bba1a8f06a4919556f9f8c7b33102bd71b3ab307bb3fecb21851"}, - {file = "wrapt-1.13.3-cp35-cp35m-win_amd64.whl", hash = "sha256:944b180f61f5e36c0634d3202ba8509b986b5fbaf57db3e94df11abee244ba13"}, - {file = "wrapt-1.13.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2ebdde19cd3c8cdf8df3fc165bc7827334bc4e353465048b36f7deeae8ee0918"}, - {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:610f5f83dd1e0ad40254c306f4764fcdc846641f120c3cf424ff57a19d5f7ade"}, - {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5601f44a0f38fed36cc07db004f0eedeaadbdcec90e4e90509480e7e6060a5bc"}, - {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:e6906d6f48437dfd80464f7d7af1740eadc572b9f7a4301e7dd3d65db285cacf"}, - {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:766b32c762e07e26f50d8a3468e3b4228b3736c805018e4b0ec8cc01ecd88125"}, - {file = "wrapt-1.13.3-cp36-cp36m-win32.whl", hash = "sha256:5f223101f21cfd41deec8ce3889dc59f88a59b409db028c469c9b20cfeefbe36"}, - {file = "wrapt-1.13.3-cp36-cp36m-win_amd64.whl", hash = "sha256:f122ccd12fdc69628786d0c947bdd9cb2733be8f800d88b5a37c57f1f1d73c10"}, - {file = "wrapt-1.13.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:46f7f3af321a573fc0c3586612db4decb7eb37172af1bc6173d81f5b66c2e068"}, - {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:778fd096ee96890c10ce96187c76b3e99b2da44e08c9e24d5652f356873f6709"}, - {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0cb23d36ed03bf46b894cfec777eec754146d68429c30431c99ef28482b5c1df"}, - {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:96b81ae75591a795d8c90edc0bfaab44d3d41ffc1aae4d994c5aa21d9b8e19a2"}, - {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7dd215e4e8514004c8d810a73e342c536547038fb130205ec4bba9f5de35d45b"}, - {file = "wrapt-1.13.3-cp37-cp37m-win32.whl", hash = "sha256:47f0a183743e7f71f29e4e21574ad3fa95676136f45b91afcf83f6a050914829"}, - {file = "wrapt-1.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fd76c47f20984b43d93de9a82011bb6e5f8325df6c9ed4d8310029a55fa361ea"}, - {file = "wrapt-1.13.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b73d4b78807bd299b38e4598b8e7bd34ed55d480160d2e7fdaabd9931afa65f9"}, - {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ec9465dd69d5657b5d2fa6133b3e1e989ae27d29471a672416fd729b429eb554"}, - {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd91006848eb55af2159375134d724032a2d1d13bcc6f81cd8d3ed9f2b8e846c"}, - {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ae9de71eb60940e58207f8e71fe113c639da42adb02fb2bcbcaccc1ccecd092b"}, - {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:51799ca950cfee9396a87f4a1240622ac38973b6df5ef7a41e7f0b98797099ce"}, - {file = "wrapt-1.13.3-cp38-cp38-win32.whl", hash = "sha256:4b9c458732450ec42578b5642ac53e312092acf8c0bfce140ada5ca1ac556f79"}, - {file = "wrapt-1.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:7dde79d007cd6dfa65afe404766057c2409316135cb892be4b1c768e3f3a11cb"}, - {file = "wrapt-1.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:981da26722bebb9247a0601e2922cedf8bb7a600e89c852d063313102de6f2cb"}, - {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:705e2af1f7be4707e49ced9153f8d72131090e52be9278b5dbb1498c749a1e32"}, - {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:25b1b1d5df495d82be1c9d2fad408f7ce5ca8a38085e2da41bb63c914baadff7"}, - {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:77416e6b17926d953b5c666a3cb718d5945df63ecf922af0ee576206d7033b5e"}, - {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:865c0b50003616f05858b22174c40ffc27a38e67359fa1495605f96125f76640"}, - {file = "wrapt-1.13.3-cp39-cp39-win32.whl", hash = "sha256:0a017a667d1f7411816e4bf214646d0ad5b1da2c1ea13dec6c162736ff25a374"}, - {file = "wrapt-1.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:81bd7c90d28a4b2e1df135bfbd7c23aee3050078ca6441bead44c42483f9ebfb"}, - {file = "wrapt-1.13.3.tar.gz", hash = "sha256:1fea9cd438686e6682271d36f3481a9f3636195578bab9ca3382e2f5f01fc185"}, + {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"}, + {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"}, + {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, + {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, + {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"}, + {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"}, + {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"}, + {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"}, + {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"}, + {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"}, + {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"}, + {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"}, + {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"}, + {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"}, + {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"}, + {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"}, + {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"}, + {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"}, + {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"}, + {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"}, + {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"}, + {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"}, ] xmlschema = [ {file = "xmlschema-1.10.0-py3-none-any.whl", hash = "sha256:dbd68bded2fef00c19cf37110ca0565eca34cf0b6c9e1d3b62ad0de8cbb582ca"}, diff --git a/tests/federation/transport/test_client.py b/tests/federation/transport/test_client.py index dd4d1b56de..b84c74fc0e 100644 --- a/tests/federation/transport/test_client.py +++ b/tests/federation/transport/test_client.py @@ -15,6 +15,8 @@ import json from unittest.mock import Mock +import ijson.common + from synapse.api.room_versions import RoomVersions from synapse.federation.transport.client import SendJoinParser from synapse.util import ExceptionBundle @@ -117,8 +119,17 @@ class SendJoinParserTestCase(TestCase): coro_3 = Mock() coro_3.close = Mock(side_effect=RuntimeError("Couldn't close coro 3")) + original_coros = parser._coros parser._coros = [coro_1, coro_2, coro_3] + # Close the original coroutines. If we don't, when we garbage collect them + # they will throw, failing the test. (Oddly, this only started in CPython 3.11). + for coro in original_coros: + try: + coro.close() + except ijson.common.IncompleteJSONError: + pass + # Send half of the data to the parser parser.write(serialisation[: len(serialisation) // 2]) -- cgit 1.5.1 From 78909f50286a857205269103b149be13e6cb95d1 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 4 Nov 2022 10:45:01 +0000 Subject: Include monthly active user metrics in the list of legacy metrics names (#14360) --- changelog.d/14360.doc | 1 + docs/metrics-howto.md | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/14360.doc diff --git a/changelog.d/14360.doc b/changelog.d/14360.doc new file mode 100644 index 0000000000..8a8dee3b8d --- /dev/null +++ b/changelog.d/14360.doc @@ -0,0 +1 @@ +Include monthly active user metrics in the list of legacy metrics names. diff --git a/docs/metrics-howto.md b/docs/metrics-howto.md index 8f1f11f2b2..16e4368f35 100644 --- a/docs/metrics-howto.md +++ b/docs/metrics-howto.md @@ -209,6 +209,9 @@ altogether in Synapse v1.73.0.** | synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed | | synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed | | synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed | +| synapse_admin_mau_current | synapse_admin_mau:current | +| synapse_admin_mau_max | synapse_admin_mau:max | +| synapse_admin_mau_registered_reserved_users | synapse_admin_mau:registered_reserved_users | Removal of deprecated metrics & time based counters becoming histograms in 0.31.0 --------------------------------------------------------------------------------- -- cgit 1.5.1 From b00294b8b1f289de10f57e4704d3f6f32943bd57 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Fri, 4 Nov 2022 12:01:17 +0000 Subject: 1.71.0rc2 --- CHANGES.md | 22 ++++++++++++++++++++++ changelog.d/13812.misc | 1 - changelog.d/14353.removal | 1 - changelog.d/14358.doc | 1 - changelog.d/14360.doc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 7 files changed, 29 insertions(+), 5 deletions(-) delete mode 100644 changelog.d/13812.misc delete mode 100644 changelog.d/14353.removal delete mode 100644 changelog.d/14358.doc delete mode 100644 changelog.d/14360.doc diff --git a/CHANGES.md b/CHANGES.md index 6bafdd3fad..8f883cfc43 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,25 @@ +Synapse 1.71.0rc2 (2022-11-04) +============================== + +Improved Documentation +---------------------- + +- Replace references to legacy metrics `synapse_admin_mau` with new metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358)) +- Include monthly active user metrics in the list of legacy metrics names. ([\#14360](https://github.com/matrix-org/synapse/issues/14360)) + + +Deprecations and Removals +------------------------- + +- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353)) + + +Internal Changes +---------------- + +- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812)) + + Synapse (Next) (2022-11-01) ========================= diff --git a/changelog.d/13812.misc b/changelog.d/13812.misc deleted file mode 100644 index 667fdee2b7..0000000000 --- a/changelog.d/13812.misc +++ /dev/null @@ -1 +0,0 @@ -Run unit tests against Python 3.11. diff --git a/changelog.d/14353.removal b/changelog.d/14353.removal deleted file mode 100644 index fc42aa9106..0000000000 --- a/changelog.d/14353.removal +++ /dev/null @@ -1 +0,0 @@ -Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. \ No newline at end of file diff --git a/changelog.d/14358.doc b/changelog.d/14358.doc deleted file mode 100644 index a9b910878d..0000000000 --- a/changelog.d/14358.doc +++ /dev/null @@ -1 +0,0 @@ -Replace references to legacy metrics `synapse_admin_mau` with new metric names. diff --git a/changelog.d/14360.doc b/changelog.d/14360.doc deleted file mode 100644 index 8a8dee3b8d..0000000000 --- a/changelog.d/14360.doc +++ /dev/null @@ -1 +0,0 @@ -Include monthly active user metrics in the list of legacy metrics names. diff --git a/debian/changelog b/debian/changelog index e07fa06b7a..419fc7ebc2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium + + * New Synapse release 1.71.0rc2. + + -- Synapse Packaging team Fri, 04 Nov 2022 12:00:33 +0000 + matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium * New Synapse release 1.71.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 5d96328067..249b1cbedf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.71.0rc1" +version = "1.71.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From af592d7d4c63c6bc0c3535c62318845f449945f6 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Fri, 4 Nov 2022 12:13:04 +0000 Subject: Update CHANGES.md --- CHANGES.md | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8f883cfc43..b6417ceb29 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,11 +1,16 @@ Synapse 1.71.0rc2 (2022-11-04) ============================== +Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default. +They will be removed altogether in Synapse 1.73.0. +If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names. +See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details. + + Improved Documentation ---------------------- -- Replace references to legacy metrics `synapse_admin_mau` with new metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358)) -- Include monthly active user metrics in the list of legacy metrics names. ([\#14360](https://github.com/matrix-org/synapse/issues/14360)) +- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360)) Deprecations and Removals @@ -20,15 +25,6 @@ Internal Changes - Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812)) -Synapse (Next) (2022-11-01) -========================= - -Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default. -They will be removed altogether in Synapse 1.73.0. -If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names. -See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details. - - Synapse 1.71.0rc1 (2022-11-01) ============================== -- cgit 1.5.1 From a4b1f6456276e62b3f4d6b060c289b6413b8a5c2 Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Fri, 4 Nov 2022 18:43:51 +0200 Subject: Fix /refresh endpoint version (#14364) --- changelog.d/14364.bugfix | 1 + synapse/rest/client/login.py | 2 +- tests/rest/client/test_auth.py | 16 ++++++++-------- 3 files changed, 10 insertions(+), 9 deletions(-) create mode 100644 changelog.d/14364.bugfix diff --git a/changelog.d/14364.bugfix b/changelog.d/14364.bugfix new file mode 100644 index 0000000000..514bf859bb --- /dev/null +++ b/changelog.d/14364.bugfix @@ -0,0 +1 @@ +Fix refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 7774f1967d..05706b598c 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -536,7 +536,7 @@ def _get_auth_flow_dict_for_idp(idp: SsoIdentityProvider) -> JsonDict: class RefreshTokenServlet(RestServlet): - PATTERNS = (re.compile("^/_matrix/client/v1/refresh$"),) + PATTERNS = client_patterns("/refresh$") def __init__(self, hs: "HomeServer"): self._auth_handler = hs.get_auth_handler() diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py index 847294dc8e..208ec44829 100644 --- a/tests/rest/client/test_auth.py +++ b/tests/rest/client/test_auth.py @@ -635,7 +635,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): """ return self.make_request( "POST", - "/_matrix/client/v1/refresh", + "/_matrix/client/v3/refresh", {"refresh_token": refresh_token}, ) @@ -724,7 +724,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): refresh_response = self.make_request( "POST", - "/_matrix/client/v1/refresh", + "/_matrix/client/v3/refresh", {"refresh_token": login_response.json_body["refresh_token"]}, ) self.assertEqual(refresh_response.code, HTTPStatus.OK, refresh_response.result) @@ -765,7 +765,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): refresh_response = self.make_request( "POST", - "/_matrix/client/v1/refresh", + "/_matrix/client/v3/refresh", {"refresh_token": login_response.json_body["refresh_token"]}, ) self.assertEqual(refresh_response.code, HTTPStatus.OK, refresh_response.result) @@ -1002,7 +1002,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): # This first refresh should work properly first_refresh_response = self.make_request( "POST", - "/_matrix/client/v1/refresh", + "/_matrix/client/v3/refresh", {"refresh_token": login_response.json_body["refresh_token"]}, ) self.assertEqual( @@ -1012,7 +1012,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): # This one as well, since the token in the first one was never used second_refresh_response = self.make_request( "POST", - "/_matrix/client/v1/refresh", + "/_matrix/client/v3/refresh", {"refresh_token": login_response.json_body["refresh_token"]}, ) self.assertEqual( @@ -1022,7 +1022,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): # This one should not, since the token from the first refresh is not valid anymore third_refresh_response = self.make_request( "POST", - "/_matrix/client/v1/refresh", + "/_matrix/client/v3/refresh", {"refresh_token": first_refresh_response.json_body["refresh_token"]}, ) self.assertEqual( @@ -1056,7 +1056,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): # Now that the access token from the last valid refresh was used once, refreshing with the N-1 token should fail fourth_refresh_response = self.make_request( "POST", - "/_matrix/client/v1/refresh", + "/_matrix/client/v3/refresh", {"refresh_token": login_response.json_body["refresh_token"]}, ) self.assertEqual( @@ -1068,7 +1068,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): # But refreshing from the last valid refresh token still works fifth_refresh_response = self.make_request( "POST", - "/_matrix/client/v1/refresh", + "/_matrix/client/v3/refresh", {"refresh_token": second_refresh_response.json_body["refresh_token"]}, ) self.assertEqual( -- cgit 1.5.1 From 79b6c19321f3de78f2a8d3d40eebd320cef1a91c Mon Sep 17 00:00:00 2001 From: Michael Telatynski <7t3chguy@gmail.com> Date: Fri, 4 Nov 2022 17:08:11 +0000 Subject: Upload documentation PRs to Netlify (#12947) Signed-off-by: Michael Telatynski <7t3chguy@gmail.com> Co-authored-by: Erik Johnston Co-authored-by: David Robertson Co-authored-by: Brendan Abolivier --- .github/workflows/docs-pr-netlify.yaml | 34 ++++++++++++++++++++++++++++++++++ .github/workflows/docs-pr.yaml | 34 ++++++++++++++++++++++++++++++++++ changelog.d/12947.doc | 1 + 3 files changed, 69 insertions(+) create mode 100644 .github/workflows/docs-pr-netlify.yaml create mode 100644 .github/workflows/docs-pr.yaml create mode 100644 changelog.d/12947.doc diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml new file mode 100644 index 0000000000..7cc3b4d9be --- /dev/null +++ b/.github/workflows/docs-pr-netlify.yaml @@ -0,0 +1,34 @@ +name: Deploy documentation PR preview + +on: + workflow_run: + workflows: [ "Prepare documentation PR preview" ] + types: + - completed + +jobs: + netlify: + if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request' + runs-on: ubuntu-latest + steps: + # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action + # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: + - name: 📥 Download artifact + uses: dawidd6/action-download-artifact@af92a8455a59214b7b932932f2662fdefbd78126 # v2.15.0 + with: + workflow: docs-pr.yaml + run_id: ${{ github.event.workflow_run.id }} + name: book + path: book + + - name: 📤 Deploy to Netlify + uses: matrix-org/netlify-pr-preview@v1 + with: + path: book + owner: ${{ github.event.workflow_run.head_repository.owner.login }} + branch: ${{ github.event.workflow_run.head_branch }} + revision: ${{ github.event.workflow_run.head_sha }} + token: ${{ secrets.NETLIFY_AUTH_TOKEN }} + site_id: ${{ secrets.NETLIFY_SITE_ID }} + desc: Documentation preview + deployment_env: PR Documentation Preview diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml new file mode 100644 index 0000000000..1910e72fe2 --- /dev/null +++ b/.github/workflows/docs-pr.yaml @@ -0,0 +1,34 @@ +name: Prepare documentation PR preview + +on: + pull_request: + paths: + - docs + +jobs: + pages: + name: GitHub Pages + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Setup mdbook + uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14 + with: + mdbook-version: '0.4.17' + + - name: Build the documentation + # mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md. + # However, we're using docs/README.md for other purposes and need to pick a new page + # as the default. Let's opt for the welcome page instead. + run: | + mdbook build + cp book/welcome_and_overview.html book/index.html + + - name: Upload Artifact + uses: actions/upload-artifact@v2 + with: + name: book + path: book + # We'll only use this in a workflow_run, then we're done with it + retention-days: 1 diff --git a/changelog.d/12947.doc b/changelog.d/12947.doc new file mode 100644 index 0000000000..5319e9ad4f --- /dev/null +++ b/changelog.d/12947.doc @@ -0,0 +1 @@ +Upload documentation PRs to Netlify. -- cgit 1.5.1 From bb39fc4366eb59464838d99a676b1620b879c2b8 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 4 Nov 2022 18:33:01 +0000 Subject: Fix the trigger path for deploying documentation PRs (#14370) This was missed from #12947 --- .github/workflows/docs-pr.yaml | 2 +- changelog.d/14370.doc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14370.doc diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index 1910e72fe2..ebe352f220 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -3,7 +3,7 @@ name: Prepare documentation PR preview on: pull_request: paths: - - docs + - docs/** jobs: pages: diff --git a/changelog.d/14370.doc b/changelog.d/14370.doc new file mode 100644 index 0000000000..5319e9ad4f --- /dev/null +++ b/changelog.d/14370.doc @@ -0,0 +1 @@ +Upload documentation PRs to Netlify. -- cgit 1.5.1 From 8bcdd712b8ba471b3489d41e569276677cf6c2bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Nov 2022 18:43:14 +0000 Subject: Bump flake8-bugbear from 22.9.23 to 22.10.27 (#14329) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions Co-authored-by: Olivier Wilkinson (reivilibre) --- changelog.d/14329.misc | 1 + poetry.lock | 10 +++++----- synapse/handlers/presence.py | 6 ++++-- synapse/server.py | 2 +- synapse/storage/_base.py | 2 +- 5 files changed, 12 insertions(+), 9 deletions(-) create mode 100644 changelog.d/14329.misc diff --git a/changelog.d/14329.misc b/changelog.d/14329.misc new file mode 100644 index 0000000000..2f6bbd3af7 --- /dev/null +++ b/changelog.d/14329.misc @@ -0,0 +1 @@ +Bump flake8-bugbear from 22.9.23 to 22.10.27. diff --git a/poetry.lock b/poetry.lock index b945463299..f6e462e6ae 100644 --- a/poetry.lock +++ b/poetry.lock @@ -260,18 +260,18 @@ pyflakes = ">=2.4.0,<2.5.0" [[package]] name = "flake8-bugbear" -version = "22.9.23" +version = "22.10.27" description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] attrs = ">=19.2.0" flake8 = ">=3.0.0" [package.extras] -dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit"] +dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit", "tox"] [[package]] name = "flake8-comprehensions" @@ -1829,8 +1829,8 @@ flake8 = [ {file = "flake8-4.0.1.tar.gz", hash = "sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"}, ] flake8-bugbear = [ - {file = "flake8-bugbear-22.9.23.tar.gz", hash = "sha256:17b9623325e6e0dcdcc80ed9e4aa811287fcc81d7e03313b8736ea5733759937"}, - {file = "flake8_bugbear-22.9.23-py3-none-any.whl", hash = "sha256:cd2779b2b7ada212d7a322814a1e5651f1868ab0d3f24cc9da66169ab8fda474"}, + {file = "flake8-bugbear-22.10.27.tar.gz", hash = "sha256:a6708608965c9e0de5fff13904fed82e0ba21ac929fe4896459226a797e11cd5"}, + {file = "flake8_bugbear-22.10.27-py3-none-any.whl", hash = "sha256:6ad0ab754507319060695e2f2be80e6d8977cfcea082293089a9226276bd825d"}, ] flake8-comprehensions = [ {file = "flake8-comprehensions-3.8.0.tar.gz", hash = "sha256:8e108707637b1d13734f38e03435984f6b7854fa6b5a4e34f93e69534be8e521"}, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 2670e561d7..0066d63987 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -256,7 +256,7 @@ class BasePresenceHandler(abc.ABC): with the app. """ - async def update_external_syncs_row( + async def update_external_syncs_row( # noqa: B027 (no-op by design) self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int ) -> None: """Update the syncing users for an external process as a delta. @@ -272,7 +272,9 @@ class BasePresenceHandler(abc.ABC): sync_time_msec: Time in ms when the user was last syncing """ - async def update_external_syncs_clear(self, process_id: str) -> None: + async def update_external_syncs_clear( # noqa: B027 (no-op by design) + self, process_id: str + ) -> None: """Marks all users that had been marked as syncing by a given process as offline. diff --git a/synapse/server.py b/synapse/server.py index df3a1cb405..c4e025af22 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -315,7 +315,7 @@ class HomeServer(metaclass=abc.ABCMeta): if self.config.worker.run_background_tasks: self.setup_background_tasks() - def start_listening(self) -> None: + def start_listening(self) -> None: # noqa: B027 (no-op by design) """Start the HTTP, manhole, metrics, etc listeners Does nothing in this base class; overridden in derived classes to start the diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index bf42aeb8d1..69abf6fa87 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -50,7 +50,7 @@ class SQLBaseStore(metaclass=ABCMeta): self.external_cached_functions: Dict[str, CachedFunction] = {} - def process_replication_rows( + def process_replication_rows( # noqa: B027 (no-op by design) self, stream_name: str, instance_name: str, -- cgit 1.5.1 From b2a1e7543192c0c391a75e8c7c93ce124eb11e48 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:27:39 +0000 Subject: Bump dawidd6/action-download-artifact from 2.15.0 to 2.24.0 (#14378) * Bump dawidd6/action-download-artifact from 2.15.0 to 2.24.0 Bumps [dawidd6/action-download-artifact](https://github.com/dawidd6/action-download-artifact) from 2.15.0 to 2.24.0. - [Release notes](https://github.com/dawidd6/action-download-artifact/releases) - [Commits](https://github.com/dawidd6/action-download-artifact/compare/af92a8455a59214b7b932932f2662fdefbd78126...46b4ae883bf0726f5949d025d31cb62c7a5ac70c) --- updated-dependencies: - dependency-name: dawidd6/action-download-artifact dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/docs-pr-netlify.yaml | 2 +- changelog.d/14378.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14378.misc diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml index 7cc3b4d9be..5c90cf714c 100644 --- a/.github/workflows/docs-pr-netlify.yaml +++ b/.github/workflows/docs-pr-netlify.yaml @@ -14,7 +14,7 @@ jobs: # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: - name: 📥 Download artifact - uses: dawidd6/action-download-artifact@af92a8455a59214b7b932932f2662fdefbd78126 # v2.15.0 + uses: dawidd6/action-download-artifact@46b4ae883bf0726f5949d025d31cb62c7a5ac70c # v2.24.0 with: workflow: docs-pr.yaml run_id: ${{ github.event.workflow_run.id }} diff --git a/changelog.d/14378.misc b/changelog.d/14378.misc new file mode 100644 index 0000000000..3d397cdf00 --- /dev/null +++ b/changelog.d/14378.misc @@ -0,0 +1 @@ +Bump dawidd6/action-download-artifact from 2.15.0 to 2.24.0. -- cgit 1.5.1 From 04359f92f2cd2af6767d1857899f99e5e48afdc4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:27:52 +0000 Subject: Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0 (#14379) * Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0 Bumps [peaceiris/actions-mdbook](https://github.com/peaceiris/actions-mdbook) from 1.1.14 to 1.2.0. - [Release notes](https://github.com/peaceiris/actions-mdbook/releases) - [Changelog](https://github.com/peaceiris/actions-mdbook/blob/main/CHANGELOG.md) - [Commits](https://github.com/peaceiris/actions-mdbook/compare/v1.1.14...adeb05db28a0c0004681db83893d56c0388ea9ea) --- updated-dependencies: - dependency-name: peaceiris/actions-mdbook dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/docs-pr.yaml | 2 +- changelog.d/14379.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14379.misc diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index ebe352f220..da40fa2408 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v2 - name: Setup mdbook - uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14 + uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0 with: mdbook-version: '0.4.17' diff --git a/changelog.d/14379.misc b/changelog.d/14379.misc new file mode 100644 index 0000000000..bcda89ff92 --- /dev/null +++ b/changelog.d/14379.misc @@ -0,0 +1 @@ +Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. -- cgit 1.5.1 From 1df426062062c38d2e286736a314a4ffc36f4134 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:28:08 +0000 Subject: Bump regex from 1.6.0 to 1.7.0 (#14380) * Bump regex from 1.6.0 to 1.7.0 Bumps [regex](https://github.com/rust-lang/regex) from 1.6.0 to 1.7.0. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.6.0...1.7.0) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- Cargo.lock | 4 ++-- changelog.d/14380.misc | 1 + rust/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14380.misc diff --git a/Cargo.lock b/Cargo.lock index 1107578a20..f5d9e7b24e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -294,9 +294,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" dependencies = [ "aho-corasick", "memchr", diff --git a/changelog.d/14380.misc b/changelog.d/14380.misc new file mode 100644 index 0000000000..078fb6cf95 --- /dev/null +++ b/changelog.d/14380.misc @@ -0,0 +1 @@ +Bump regex from 1.6.0 to 1.7.0. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index e26201884c..a8e9d7abb2 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -26,7 +26,7 @@ log = "0.4.17" pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] } pyo3-log = "0.7.0" pythonize = "0.17.0" -regex = "1.6.0" +regex = "1.7.0" serde = { version = "1.0.147", features = ["derive"] } serde_json = "1.0.87" -- cgit 1.5.1 From b03b5a5a4ffbf0186933ae094820eca8d460b615 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:28:19 +0000 Subject: Bump pyo3 from 0.17.2 to 0.17.3 (#14381) * Bump pyo3 from 0.17.2 to 0.17.3 Bumps [pyo3](https://github.com/pyo3/pyo3) from 0.17.2 to 0.17.3. - [Release notes](https://github.com/pyo3/pyo3/releases) - [Changelog](https://github.com/PyO3/pyo3/blob/main/CHANGELOG.md) - [Commits](https://github.com/pyo3/pyo3/compare/v0.17.2...v0.17.3) --- updated-dependencies: - dependency-name: pyo3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- Cargo.lock | 20 ++++++++++---------- changelog.d/14381.misc | 1 + rust/Cargo.toml | 2 +- 3 files changed, 12 insertions(+), 11 deletions(-) create mode 100644 changelog.d/14381.misc diff --git a/Cargo.lock b/Cargo.lock index f5d9e7b24e..4b12b8ef34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -194,9 +194,9 @@ dependencies = [ [[package]] name = "pyo3" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201b6887e5576bf2f945fe65172c1fcbf3fcf285b23e4d71eb171d9736e38d32" +checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543" dependencies = [ "anyhow", "cfg-if", @@ -212,9 +212,9 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf0708c9ed01692635cbf056e286008e5a2927ab1a5e48cdd3aeb1ba5a6fef47" +checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8" dependencies = [ "once_cell", "target-lexicon", @@ -222,9 +222,9 @@ dependencies = [ [[package]] name = "pyo3-ffi" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90352dea4f486932b72ddf776264d293f85b79a1d214de1d023927b41461132d" +checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc" dependencies = [ "libc", "pyo3-build-config", @@ -243,9 +243,9 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb24b804a2d9e88bfcc480a5a6dd76f006c1e3edaf064e8250423336e2cd79d" +checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28" dependencies = [ "proc-macro2", "pyo3-macros-backend", @@ -255,9 +255,9 @@ dependencies = [ [[package]] name = "pyo3-macros-backend" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f22bb49f6a7348c253d7ac67a6875f2dc65f36c2ae64a82c381d528972bea6d6" +checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f" dependencies = [ "proc-macro2", "quote", diff --git a/changelog.d/14381.misc b/changelog.d/14381.misc new file mode 100644 index 0000000000..1efeb6f378 --- /dev/null +++ b/changelog.d/14381.misc @@ -0,0 +1 @@ +Bump pyo3 from 0.17.2 to 0.17.3. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index a8e9d7abb2..d6e72d4c00 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -23,7 +23,7 @@ name = "synapse.synapse_rust" anyhow = "1.0.66" lazy_static = "1.4.0" log = "0.4.17" -pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] } +pyo3 = { version = "0.17.3", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] } pyo3-log = "0.7.0" pythonize = "0.17.0" regex = "1.7.0" -- cgit 1.5.1 From 7deee6763c5c4840b2c84551cfbc894864a2d04b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:28:29 +0000 Subject: Bump types-setuptools from 65.5.0.1 to 65.5.0.2 (#14382) * Bump types-setuptools from 65.5.0.1 to 65.5.0.2 Bumps [types-setuptools](https://github.com/python/typeshed) from 65.5.0.1 to 65.5.0.2. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-setuptools dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14382.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14382.misc diff --git a/changelog.d/14382.misc b/changelog.d/14382.misc new file mode 100644 index 0000000000..e648f3fcc3 --- /dev/null +++ b/changelog.d/14382.misc @@ -0,0 +1 @@ +Bump types-setuptools from 65.5.0.1 to 65.5.0.2. diff --git a/poetry.lock b/poetry.lock index f6e462e6ae..c8860a3925 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1485,7 +1485,7 @@ types-urllib3 = "<1.27" [[package]] name = "types-setuptools" -version = "65.5.0.1" +version = "65.5.0.2" description = "Typing stubs for setuptools" category = "dev" optional = false @@ -2817,8 +2817,8 @@ types-requests = [ {file = "types_requests-2.28.11.2-py3-none-any.whl", hash = "sha256:14941f8023a80b16441b3b46caffcbfce5265fd14555844d6029697824b5a2ef"}, ] types-setuptools = [ - {file = "types-setuptools-65.5.0.1.tar.gz", hash = "sha256:5b297081c8f1fbd992cd8b305a97ed96ee6ffc765e9115124029597dd10b8a71"}, - {file = "types_setuptools-65.5.0.1-py3-none-any.whl", hash = "sha256:601d45b5e9979d2b931de5403aa11153626a1eadd1ce9727b21f24673ced5ceb"}, + {file = "types-setuptools-65.5.0.2.tar.gz", hash = "sha256:9847de6d7087fb1dd4a88c2a21543d1b86a6179c36744f081974303fe2f30f50"}, + {file = "types_setuptools-65.5.0.2-py3-none-any.whl", hash = "sha256:2d33e4ef0d35cd2da48a143eb02184f58398381ddb1f772eff20ccc4126fec13"}, ] types-urllib3 = [ {file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"}, -- cgit 1.5.1 From 6ac9b5c9a544749ba8864b2bca757c123de6f380 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:28:50 +0000 Subject: Bump pillow from 9.2.0 to 9.3.0 (#14383) * Bump pillow from 9.2.0 to 9.3.0 Bumps [pillow](https://github.com/python-pillow/Pillow) from 9.2.0 to 9.3.0. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/9.2.0...9.3.0) --- updated-dependencies: - dependency-name: pillow dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14383.misc | 1 + poetry.lock | 121 +++++++++++++++++++++++++------------------------ 2 files changed, 63 insertions(+), 59 deletions(-) create mode 100644 changelog.d/14383.misc diff --git a/changelog.d/14383.misc b/changelog.d/14383.misc new file mode 100644 index 0000000000..44b77488d9 --- /dev/null +++ b/changelog.d/14383.misc @@ -0,0 +1 @@ +Bump pillow from 9.2.0 to 9.3.0. diff --git a/poetry.lock b/poetry.lock index c8860a3925..83682a0147 100644 --- a/poetry.lock +++ b/poetry.lock @@ -671,7 +671,7 @@ python-versions = "*" [[package]] name = "pillow" -version = "9.2.0" +version = "9.3.0" description = "Python Imaging Library (Fork)" category = "main" optional = false @@ -2255,64 +2255,67 @@ phonenumbers = [ {file = "phonenumbers-8.12.56.tar.gz", hash = "sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868"}, ] pillow = [ - {file = "Pillow-9.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:a9c9bc489f8ab30906d7a85afac4b4944a572a7432e00698a7239f44a44e6efb"}, - {file = "Pillow-9.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:510cef4a3f401c246cfd8227b300828715dd055463cdca6176c2e4036df8bd4f"}, - {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7888310f6214f19ab2b6df90f3f06afa3df7ef7355fc025e78a3044737fab1f5"}, - {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831e648102c82f152e14c1a0938689dbb22480c548c8d4b8b248b3e50967b88c"}, - {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cc1d2451e8a3b4bfdb9caf745b58e6c7a77d2e469159b0d527a4554d73694d1"}, - {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:136659638f61a251e8ed3b331fc6ccd124590eeff539de57c5f80ef3a9594e58"}, - {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6e8c66f70fb539301e064f6478d7453e820d8a2c631da948a23384865cd95544"}, - {file = "Pillow-9.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:37ff6b522a26d0538b753f0b4e8e164fdada12db6c6f00f62145d732d8a3152e"}, - {file = "Pillow-9.2.0-cp310-cp310-win32.whl", hash = "sha256:c79698d4cd9318d9481d89a77e2d3fcaeff5486be641e60a4b49f3d2ecca4e28"}, - {file = "Pillow-9.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:254164c57bab4b459f14c64e93df11eff5ded575192c294a0c49270f22c5d93d"}, - {file = "Pillow-9.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:adabc0bce035467fb537ef3e5e74f2847c8af217ee0be0455d4fec8adc0462fc"}, - {file = "Pillow-9.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:336b9036127eab855beec9662ac3ea13a4544a523ae273cbf108b228ecac8437"}, - {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50dff9cc21826d2977ef2d2a205504034e3a4563ca6f5db739b0d1026658e004"}, - {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb6259196a589123d755380b65127ddc60f4c64b21fc3bb46ce3a6ea663659b0"}, - {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b0554af24df2bf96618dac71ddada02420f946be943b181108cac55a7a2dcd4"}, - {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:15928f824870535c85dbf949c09d6ae7d3d6ac2d6efec80f3227f73eefba741c"}, - {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:bdd0de2d64688ecae88dd8935012c4a72681e5df632af903a1dca8c5e7aa871a"}, - {file = "Pillow-9.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5b87da55a08acb586bad5c3aa3b86505f559b84f39035b233d5bf844b0834b1"}, - {file = "Pillow-9.2.0-cp311-cp311-win32.whl", hash = "sha256:b6d5e92df2b77665e07ddb2e4dbd6d644b78e4c0d2e9272a852627cdba0d75cf"}, - {file = "Pillow-9.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6bf088c1ce160f50ea40764f825ec9b72ed9da25346216b91361eef8ad1b8f8c"}, - {file = "Pillow-9.2.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:2c58b24e3a63efd22554c676d81b0e57f80e0a7d3a5874a7e14ce90ec40d3069"}, - {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eef7592281f7c174d3d6cbfbb7ee5984a671fcd77e3fc78e973d492e9bf0eb3f"}, - {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dcd7b9c7139dc8258d164b55696ecd16c04607f1cc33ba7af86613881ffe4ac8"}, - {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a138441e95562b3c078746a22f8fca8ff1c22c014f856278bdbdd89ca36cff1b"}, - {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:93689632949aff41199090eff5474f3990b6823404e45d66a5d44304e9cdc467"}, - {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:f3fac744f9b540148fa7715a435d2283b71f68bfb6d4aae24482a890aed18b59"}, - {file = "Pillow-9.2.0-cp37-cp37m-win32.whl", hash = "sha256:fa768eff5f9f958270b081bb33581b4b569faabf8774726b283edb06617101dc"}, - {file = "Pillow-9.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:69bd1a15d7ba3694631e00df8de65a8cb031911ca11f44929c97fe05eb9b6c1d"}, - {file = "Pillow-9.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:030e3460861488e249731c3e7ab59b07c7853838ff3b8e16aac9561bb345da14"}, - {file = "Pillow-9.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:74a04183e6e64930b667d321524e3c5361094bb4af9083db5c301db64cd341f3"}, - {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d33a11f601213dcd5718109c09a52c2a1c893e7461f0be2d6febc2879ec2402"}, - {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fd6f5e3c0e4697fa7eb45b6e93996299f3feee73a3175fa451f49a74d092b9f"}, - {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a647c0d4478b995c5e54615a2e5360ccedd2f85e70ab57fbe817ca613d5e63b8"}, - {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:4134d3f1ba5f15027ff5c04296f13328fecd46921424084516bdb1b2548e66ff"}, - {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:bc431b065722a5ad1dfb4df354fb9333b7a582a5ee39a90e6ffff688d72f27a1"}, - {file = "Pillow-9.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1536ad017a9f789430fb6b8be8bf99d2f214c76502becc196c6f2d9a75b01b76"}, - {file = "Pillow-9.2.0-cp38-cp38-win32.whl", hash = "sha256:2ad0d4df0f5ef2247e27fc790d5c9b5a0af8ade9ba340db4a73bb1a4a3e5fb4f"}, - {file = "Pillow-9.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:ec52c351b35ca269cb1f8069d610fc45c5bd38c3e91f9ab4cbbf0aebc136d9c8"}, - {file = "Pillow-9.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ed2c4ef2451de908c90436d6e8092e13a43992f1860275b4d8082667fbb2ffc"}, - {file = "Pillow-9.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ad2f835e0ad81d1689f1b7e3fbac7b01bb8777d5a985c8962bedee0cc6d43da"}, - {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea98f633d45f7e815db648fd7ff0f19e328302ac36427343e4432c84432e7ff4"}, - {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7761afe0126d046974a01e030ae7529ed0ca6a196de3ec6937c11df0df1bc91c"}, - {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a54614049a18a2d6fe156e68e188da02a046a4a93cf24f373bffd977e943421"}, - {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:5aed7dde98403cd91d86a1115c78d8145c83078e864c1de1064f52e6feb61b20"}, - {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:13b725463f32df1bfeacbf3dd197fb358ae8ebcd8c5548faa75126ea425ccb60"}, - {file = "Pillow-9.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:808add66ea764ed97d44dda1ac4f2cfec4c1867d9efb16a33d158be79f32b8a4"}, - {file = "Pillow-9.2.0-cp39-cp39-win32.whl", hash = "sha256:337a74fd2f291c607d220c793a8135273c4c2ab001b03e601c36766005f36885"}, - {file = "Pillow-9.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:fac2d65901fb0fdf20363fbd345c01958a742f2dc62a8dd4495af66e3ff502a4"}, - {file = "Pillow-9.2.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ad2277b185ebce47a63f4dc6302e30f05762b688f8dc3de55dbae4651872cdf3"}, - {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c7b502bc34f6e32ba022b4a209638f9e097d7a9098104ae420eb8186217ebbb"}, - {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d1f14f5f691f55e1b47f824ca4fdcb4b19b4323fe43cc7bb105988cad7496be"}, - {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:dfe4c1fedfde4e2fbc009d5ad420647f7730d719786388b7de0999bf32c0d9fd"}, - {file = "Pillow-9.2.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:f07f1f00e22b231dd3d9b9208692042e29792d6bd4f6639415d2f23158a80013"}, - {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1802f34298f5ba11d55e5bb09c31997dc0c6aed919658dfdf0198a2fe75d5490"}, - {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17d4cafe22f050b46d983b71c707162d63d796a1235cdf8b9d7a112e97b15bac"}, - {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:96b5e6874431df16aee0c1ba237574cb6dff1dcb173798faa6a9d8b399a05d0e"}, - {file = "Pillow-9.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:0030fdbd926fb85844b8b92e2f9449ba89607231d3dd597a21ae72dc7fe26927"}, - {file = "Pillow-9.2.0.tar.gz", hash = "sha256:75e636fd3e0fb872693f23ccb8a5ff2cd578801251f3a4f6854c6a5d437d3c04"}, + {file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"}, + {file = "Pillow-9.3.0-1-cp37-cp37m-win_amd64.whl", hash = "sha256:32a44128c4bdca7f31de5be641187367fe2a450ad83b833ef78910397db491aa"}, + {file = "Pillow-9.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:0b7257127d646ff8676ec8a15520013a698d1fdc48bc2a79ba4e53df792526f2"}, + {file = "Pillow-9.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b90f7616ea170e92820775ed47e136208e04c967271c9ef615b6fbd08d9af0e3"}, + {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68943d632f1f9e3dce98908e873b3a090f6cba1cbb1b892a9e8d97c938871fbe"}, + {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be55f8457cd1eac957af0c3f5ece7bc3f033f89b114ef30f710882717670b2a8"}, + {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d77adcd56a42d00cc1be30843d3426aa4e660cab4a61021dc84467123f7a00c"}, + {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:829f97c8e258593b9daa80638aee3789b7df9da5cf1336035016d76f03b8860c"}, + {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:801ec82e4188e935c7f5e22e006d01611d6b41661bba9fe45b60e7ac1a8f84de"}, + {file = "Pillow-9.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:871b72c3643e516db4ecf20efe735deb27fe30ca17800e661d769faab45a18d7"}, + {file = "Pillow-9.3.0-cp310-cp310-win32.whl", hash = "sha256:655a83b0058ba47c7c52e4e2df5ecf484c1b0b0349805896dd350cbc416bdd91"}, + {file = "Pillow-9.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:9f47eabcd2ded7698106b05c2c338672d16a6f2a485e74481f524e2a23c2794b"}, + {file = "Pillow-9.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:57751894f6618fd4308ed8e0c36c333e2f5469744c34729a27532b3db106ee20"}, + {file = "Pillow-9.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7db8b751ad307d7cf238f02101e8e36a128a6cb199326e867d1398067381bff4"}, + {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3033fbe1feb1b59394615a1cafaee85e49d01b51d54de0cbf6aa8e64182518a1"}, + {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22b012ea2d065fd163ca096f4e37e47cd8b59cf4b0fd47bfca6abb93df70b34c"}, + {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a65733d103311331875c1dca05cb4606997fd33d6acfed695b1232ba1df193"}, + {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:502526a2cbfa431d9fc2a079bdd9061a2397b842bb6bc4239bb176da00993812"}, + {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90fb88843d3902fe7c9586d439d1e8c05258f41da473952aa8b328d8b907498c"}, + {file = "Pillow-9.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:89dca0ce00a2b49024df6325925555d406b14aa3efc2f752dbb5940c52c56b11"}, + {file = "Pillow-9.3.0-cp311-cp311-win32.whl", hash = "sha256:3168434d303babf495d4ba58fc22d6604f6e2afb97adc6a423e917dab828939c"}, + {file = "Pillow-9.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:18498994b29e1cf86d505edcb7edbe814d133d2232d256db8c7a8ceb34d18cef"}, + {file = "Pillow-9.3.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:772a91fc0e03eaf922c63badeca75e91baa80fe2f5f87bdaed4280662aad25c9"}, + {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa4107d1b306cdf8953edde0534562607fe8811b6c4d9a486298ad31de733b2"}, + {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4012d06c846dc2b80651b120e2cdd787b013deb39c09f407727ba90015c684f"}, + {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77ec3e7be99629898c9a6d24a09de089fa5356ee408cdffffe62d67bb75fdd72"}, + {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:6c738585d7a9961d8c2821a1eb3dcb978d14e238be3d70f0a706f7fa9316946b"}, + {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:828989c45c245518065a110434246c44a56a8b2b2f6347d1409c787e6e4651ee"}, + {file = "Pillow-9.3.0-cp37-cp37m-win32.whl", hash = "sha256:82409ffe29d70fd733ff3c1025a602abb3e67405d41b9403b00b01debc4c9a29"}, + {file = "Pillow-9.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:41e0051336807468be450d52b8edd12ac60bebaa97fe10c8b660f116e50b30e4"}, + {file = "Pillow-9.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b03ae6f1a1878233ac620c98f3459f79fd77c7e3c2b20d460284e1fb370557d4"}, + {file = "Pillow-9.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4390e9ce199fc1951fcfa65795f239a8a4944117b5935a9317fb320e7767b40f"}, + {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40e1ce476a7804b0fb74bcfa80b0a2206ea6a882938eaba917f7a0f004b42502"}, + {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a06a052c5f37b4ed81c613a455a81f9a3a69429b4fd7bb913c3fa98abefc20"}, + {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040"}, + {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:15c42fb9dea42465dfd902fb0ecf584b8848ceb28b41ee2b58f866411be33f07"}, + {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:51e0e543a33ed92db9f5ef69a0356e0b1a7a6b6a71b80df99f1d181ae5875636"}, + {file = "Pillow-9.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3dd6caf940756101205dffc5367babf288a30043d35f80936f9bfb37f8355b32"}, + {file = "Pillow-9.3.0-cp38-cp38-win32.whl", hash = "sha256:f1ff2ee69f10f13a9596480335f406dd1f70c3650349e2be67ca3139280cade0"}, + {file = "Pillow-9.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:276a5ca930c913f714e372b2591a22c4bd3b81a418c0f6635ba832daec1cbcfc"}, + {file = "Pillow-9.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:73bd195e43f3fadecfc50c682f5055ec32ee2c933243cafbfdec69ab1aa87cad"}, + {file = "Pillow-9.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c7c8ae3864846fc95f4611c78129301e203aaa2af813b703c55d10cc1628535"}, + {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e0918e03aa0c72ea56edbb00d4d664294815aa11291a11504a377ea018330d3"}, + {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0915e734b33a474d76c28e07292f196cdf2a590a0d25bcc06e64e545f2d146c"}, + {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0372acb5d3598f36ec0914deed2a63f6bcdb7b606da04dc19a88d31bf0c05b"}, + {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ad58d27a5b0262c0c19b47d54c5802db9b34d38bbf886665b626aff83c74bacd"}, + {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:97aabc5c50312afa5e0a2b07c17d4ac5e865b250986f8afe2b02d772567a380c"}, + {file = "Pillow-9.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9aaa107275d8527e9d6e7670b64aabaaa36e5b6bd71a1015ddd21da0d4e06448"}, + {file = "Pillow-9.3.0-cp39-cp39-win32.whl", hash = "sha256:bac18ab8d2d1e6b4ce25e3424f709aceef668347db8637c2296bcf41acb7cf48"}, + {file = "Pillow-9.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b472b5ea442148d1c3e2209f20f1e0bb0eb556538690fa70b5e1f79fa0ba8dc2"}, + {file = "Pillow-9.3.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ab388aaa3f6ce52ac1cb8e122c4bd46657c15905904b3120a6248b5b8b0bc228"}, + {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbb8e7f2abee51cef77673be97760abff1674ed32847ce04b4af90f610144c7b"}, + {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca31dd6014cb8b0b2db1e46081b0ca7d936f856da3b39744aef499db5d84d02"}, + {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c7025dce65566eb6e89f56c9509d4f628fddcedb131d9465cacd3d8bac337e7e"}, + {file = "Pillow-9.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ebf2029c1f464c59b8bdbe5143c79fa2045a581ac53679733d3a91d400ff9efb"}, + {file = "Pillow-9.3.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b59430236b8e58840a0dfb4099a0e8717ffb779c952426a69ae435ca1f57210c"}, + {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12ce4932caf2ddf3e41d17fc9c02d67126935a44b86df6a206cf0d7161548627"}, + {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae5331c23ce118c53b172fa64a4c037eb83c9165aba3a7ba9ddd3ec9fa64a699"}, + {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0b07fffc13f474264c336298d1b4ce01d9c5a011415b79d4ee5527bb69ae6f65"}, + {file = "Pillow-9.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8"}, + {file = "Pillow-9.3.0.tar.gz", hash = "sha256:c935a22a557a560108d780f9a0fc426dd7459940dc54faa49d83249c8d3e760f"}, ] pkginfo = [ {file = "pkginfo-1.8.2-py2.py3-none-any.whl", hash = "sha256:c24c487c6a7f72c66e816ab1796b96ac6c3d14d49338293d2141664330b55ffc"}, -- cgit 1.5.1 From a2a44e53a6dd28cead8d8e4d84ded0f0a0bde54f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:29:08 +0000 Subject: Bump cryptography from 36.0.1 to 38.0.3 (#14384) * Bump cryptography from 36.0.1 to 38.0.3 Bumps [cryptography](https://github.com/pyca/cryptography) from 36.0.1 to 38.0.3. - [Release notes](https://github.com/pyca/cryptography/releases) - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/36.0.1...38.0.3) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14384.misc | 1 + poetry.lock | 54 ++++++++++++++++++++++++++++---------------------- 2 files changed, 31 insertions(+), 24 deletions(-) create mode 100644 changelog.d/14384.misc diff --git a/changelog.d/14384.misc b/changelog.d/14384.misc new file mode 100644 index 0000000000..44d3d03183 --- /dev/null +++ b/changelog.d/14384.misc @@ -0,0 +1 @@ +Bump cryptography from 36.0.1 to 38.0.3. diff --git a/poetry.lock b/poetry.lock index 83682a0147..307c631cf9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -186,7 +186,7 @@ python-versions = "*" [[package]] name = "cryptography" -version = "36.0.1" +version = "38.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." category = "main" optional = false @@ -196,12 +196,12 @@ python-versions = ">=3.6" cffi = ">=1.12" [package.extras] -docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx_rtd_theme"] +docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"] docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] -sdist = ["setuptools_rust (>=0.11.4)"] +sdist = ["setuptools-rust (>=0.11.4)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"] +test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"] [[package]] name = "defusedxml" @@ -1787,26 +1787,32 @@ constantly = [ {file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"}, ] cryptography = [ - {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:73bc2d3f2444bcfeac67dd130ff2ea598ea5f20b40e36d19821b4df8c9c5037b"}, - {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:2d87cdcb378d3cfed944dac30596da1968f88fb96d7fc34fdae30a99054b2e31"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74d6c7e80609c0f4c2434b97b80c7f8fdfaa072ca4baab7e239a15d6d70ed73a"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:6c0c021f35b421ebf5976abf2daacc47e235f8b6082d3396a2fe3ccd537ab173"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59a9d55027a8b88fd9fd2826c4392bd487d74bf628bb9d39beecc62a644c12"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a817b961b46894c5ca8a66b599c745b9a3d9f822725221f0e0fe49dc043a3a3"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:94ae132f0e40fe48f310bba63f477f14a43116f05ddb69d6fa31e93f05848ae2"}, - {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7be0eec337359c155df191d6ae00a5e8bbb63933883f4f5dffc439dac5348c3f"}, - {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e0344c14c9cb89e76eb6a060e67980c9e35b3f36691e15e1b7a9e58a0a6c6dc3"}, - {file = "cryptography-36.0.1-cp36-abi3-win32.whl", hash = "sha256:4caa4b893d8fad33cf1964d3e51842cd78ba87401ab1d2e44556826df849a8ca"}, - {file = "cryptography-36.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:391432971a66cfaf94b21c24ab465a4cc3e8bf4a939c1ca5c3e3a6e0abebdbcf"}, - {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bb5829d027ff82aa872d76158919045a7c1e91fbf241aec32cb07956e9ebd3c9"}, - {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc15b1c22e55c4d5566e3ca4db8689470a0ca2babef8e3a9ee057a8b82ce4b1"}, - {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:596f3cd67e1b950bc372c33f1a28a0692080625592ea6392987dba7f09f17a94"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:30ee1eb3ebe1644d1c3f183d115a8c04e4e603ed6ce8e394ed39eea4a98469ac"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec63da4e7e4a5f924b90af42eddf20b698a70e58d86a72d943857c4c6045b3ee"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca238ceb7ba0bdf6ce88c1b74a87bffcee5afbfa1e41e173b1ceb095b39add46"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:ca28641954f767f9822c24e927ad894d45d5a1e501767599647259cbf030b903"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:39bdf8e70eee6b1c7b289ec6e5d84d49a6bfa11f8b8646b5b3dfe41219153316"}, - {file = "cryptography-36.0.1.tar.gz", hash = "sha256:53e5c1dc3d7a953de055d77bef2ff607ceef7a2aac0353b5d630ab67f7423638"}, + {file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:984fe150f350a3c91e84de405fe49e688aa6092b3525f407a18b9646f6612320"}, + {file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:ed7b00096790213e09eb11c97cc6e2b757f15f3d2f85833cd2d3ec3fe37c1722"}, + {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bbf203f1a814007ce24bd4d51362991d5cb90ba0c177a9c08825f2cc304d871f"}, + {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:554bec92ee7d1e9d10ded2f7e92a5d70c1f74ba9524947c0ba0c850c7b011828"}, + {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1b52c9e5f8aa2b802d48bd693190341fae201ea51c7a167d69fc48b60e8a959"}, + {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:728f2694fa743a996d7784a6194da430f197d5c58e2f4e278612b359f455e4a2"}, + {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dfb4f4dd568de1b6af9f4cda334adf7d72cf5bc052516e1b2608b683375dd95c"}, + {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5419a127426084933076132d317911e3c6eb77568a1ce23c3ac1e12d111e61e0"}, + {file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9b24bcff7853ed18a63cfb0c2b008936a9554af24af2fb146e16d8e1aed75748"}, + {file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:25c1d1f19729fb09d42e06b4bf9895212292cb27bb50229f5aa64d039ab29146"}, + {file = "cryptography-38.0.3-cp36-abi3-win32.whl", hash = "sha256:7f836217000342d448e1c9a342e9163149e45d5b5eca76a30e84503a5a96cab0"}, + {file = "cryptography-38.0.3-cp36-abi3-win_amd64.whl", hash = "sha256:c46837ea467ed1efea562bbeb543994c2d1f6e800785bd5a2c98bc096f5cb220"}, + {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06fc3cc7b6f6cca87bd56ec80a580c88f1da5306f505876a71c8cfa7050257dd"}, + {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:65535bc550b70bd6271984d9863a37741352b4aad6fb1b3344a54e6950249b55"}, + {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:5e89468fbd2fcd733b5899333bc54d0d06c80e04cd23d8c6f3e0542358c6060b"}, + {file = "cryptography-38.0.3-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6ab9516b85bebe7aa83f309bacc5f44a61eeb90d0b4ec125d2d003ce41932d36"}, + {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:068147f32fa662c81aebab95c74679b401b12b57494872886eb5c1139250ec5d"}, + {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:402852a0aea73833d982cabb6d0c3bb582c15483d29fb7085ef2c42bfa7e38d7"}, + {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b1b35d9d3a65542ed2e9d90115dfd16bbc027b3f07ee3304fc83580f26e43249"}, + {file = "cryptography-38.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6addc3b6d593cd980989261dc1cce38263c76954d758c3c94de51f1e010c9a50"}, + {file = "cryptography-38.0.3-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:be243c7e2bfcf6cc4cb350c0d5cdf15ca6383bbcb2a8ef51d3c9411a9d4386f0"}, + {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78cf5eefac2b52c10398a42765bfa981ce2372cbc0457e6bf9658f41ec3c41d8"}, + {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4e269dcd9b102c5a3d72be3c45d8ce20377b8076a43cbed6f660a1afe365e436"}, + {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8d41a46251bf0634e21fac50ffd643216ccecfaf3701a063257fe0b2be1b6548"}, + {file = "cryptography-38.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:785e4056b5a8b28f05a533fab69febf5004458e20dad7e2e13a3120d8ecec75a"}, + {file = "cryptography-38.0.3.tar.gz", hash = "sha256:bfbe6ee19615b07a98b1d2287d6a6073f734735b49ee45b11324d85efc4d5cbd"}, ] defusedxml = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, -- cgit 1.5.1 From bd70fc1a3c6de5d121cc79ade5b6631591d632ef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:29:16 +0000 Subject: Bump types-pyyaml from 6.0.12 to 6.0.12.1 (#14385) * Bump types-pyyaml from 6.0.12 to 6.0.12.1 Bumps [types-pyyaml](https://github.com/python/typeshed) from 6.0.12 to 6.0.12.1. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pyyaml dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14385.misc | 1 + poetry.lock | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/14385.misc diff --git a/changelog.d/14385.misc b/changelog.d/14385.misc new file mode 100644 index 0000000000..960ad9b0cd --- /dev/null +++ b/changelog.d/14385.misc @@ -0,0 +1 @@ +Bump types-pyyaml from 6.0.12 to 6.0.12.1. diff --git a/poetry.lock b/poetry.lock index 307c631cf9..4b66eba7b8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1465,8 +1465,8 @@ python-versions = "*" types-cryptography = "*" [[package]] -name = "types-PyYAML" -version = "6.0.12" +name = "types-pyyaml" +version = "6.0.12.1" description = "Typing stubs for PyYAML" category = "dev" optional = false @@ -2817,9 +2817,9 @@ types-pyOpenSSL = [ {file = "types-pyOpenSSL-22.0.10.tar.gz", hash = "sha256:f943b834f5b97e5e808764c2f6e37be1a2e226c46792296f61558196acfcc3a1"}, {file = "types_pyOpenSSL-22.0.10-py3-none-any.whl", hash = "sha256:63baea211768bea580a769ac5c0d637ae8cd3150314aadc5726ca22e4c4f241a"}, ] -types-PyYAML = [ - {file = "types-PyYAML-6.0.12.tar.gz", hash = "sha256:f6f350418125872f3f0409d96a62a5a5ceb45231af5cc07ee0034ec48a3c82fa"}, - {file = "types_PyYAML-6.0.12-py3-none-any.whl", hash = "sha256:29228db9f82df4f1b7febee06bbfb601677882e98a3da98132e31c6874163e15"}, +types-pyyaml = [ + {file = "types-PyYAML-6.0.12.1.tar.gz", hash = "sha256:70ccaafcf3fb404d57bffc1529fdd86a13e8b4f2cf9fc3ee81a6408ce0ad59d2"}, + {file = "types_PyYAML-6.0.12.1-py3-none-any.whl", hash = "sha256:aaf5e51444c13bd34104695a89ad9c48412599a4f615d65a60e649109714f608"}, ] types-requests = [ {file = "types-requests-2.28.11.2.tar.gz", hash = "sha256:fdcd7bd148139fb8eef72cf4a41ac7273872cad9e6ada14b11ff5dfdeee60ed3"}, -- cgit 1.5.1 From 233fc6e279f1a2af0d76f10ce9581aed1139467d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:29:26 +0000 Subject: Bump types-jsonschema from 4.4.6 to 4.17.0.0 (#14386) * Bump types-jsonschema from 4.4.6 to 4.17.0.0 Bumps [types-jsonschema](https://github.com/python/typeshed) from 4.4.6 to 4.17.0.0. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-jsonschema dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14386.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14386.misc diff --git a/changelog.d/14386.misc b/changelog.d/14386.misc new file mode 100644 index 0000000000..03c2af8cdb --- /dev/null +++ b/changelog.d/14386.misc @@ -0,0 +1 @@ +Bump types-jsonschema from 4.4.6 to 4.17.0.0. diff --git a/poetry.lock b/poetry.lock index 4b66eba7b8..29210dc4cd 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1423,7 +1423,7 @@ python-versions = "*" [[package]] name = "types-jsonschema" -version = "4.4.6" +version = "4.17.0.0" description = "Typing stubs for jsonschema" category = "dev" optional = false @@ -2798,8 +2798,8 @@ types-ipaddress = [ {file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"}, ] types-jsonschema = [ - {file = "types-jsonschema-4.4.6.tar.gz", hash = "sha256:7f2a804618756768c7c0616f8c794b61fcfe3077c7ee1ad47dcf01c5e5f692bb"}, - {file = "types_jsonschema-4.4.6-py3-none-any.whl", hash = "sha256:1db9031ca49a8444d01bd2ce8cf2f89318382b04610953b108321e6f8fb03390"}, + {file = "types-jsonschema-4.17.0.0.tar.gz", hash = "sha256:5b0875503218497cfc5c5ba92b458b1b8ec34a136e4a0d8c4f5889d59b1f5168"}, + {file = "types_jsonschema-4.17.0.0-py3-none-any.whl", hash = "sha256:520816acf40d1d7ce0981aa805862b27395491b4854188844945c674ad9173a1"}, ] types-opentracing = [ {file = "types-opentracing-2.4.10.tar.gz", hash = "sha256:6101414f3b6d3b9c10f1c510a261e8439b6c8d67c723d5c2872084697b4580a7"}, -- cgit 1.5.1 From e980982b59dea38ec10a5c58993d09e02f845d28 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 7 Nov 2022 13:49:31 +0000 Subject: Do not reject `/sync` requests with unrecognised filter fields (#14369) For forward compatibility, Synapse needs to ignore fields it does not recognise instead of raising an error. Fixes #14365. Signed-off-by: Sean Quah --- changelog.d/14369.bugfix | 1 + synapse/api/filtering.py | 8 ++++---- tests/api/test_filtering.py | 21 +++++++++++++++++++-- 3 files changed, 24 insertions(+), 6 deletions(-) create mode 100644 changelog.d/14369.bugfix diff --git a/changelog.d/14369.bugfix b/changelog.d/14369.bugfix new file mode 100644 index 0000000000..e6709f4eec --- /dev/null +++ b/changelog.d/14369.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 26be377d03..a9888381b4 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -43,7 +43,7 @@ if TYPE_CHECKING: from synapse.server import HomeServer FILTER_SCHEMA = { - "additionalProperties": False, + "additionalProperties": True, # Allow new fields for forward compatibility "type": "object", "properties": { "limit": {"type": "number"}, @@ -63,7 +63,7 @@ FILTER_SCHEMA = { } ROOM_FILTER_SCHEMA = { - "additionalProperties": False, + "additionalProperties": True, # Allow new fields for forward compatibility "type": "object", "properties": { "not_rooms": {"$ref": "#/definitions/room_id_array"}, @@ -77,7 +77,7 @@ ROOM_FILTER_SCHEMA = { } ROOM_EVENT_FILTER_SCHEMA = { - "additionalProperties": False, + "additionalProperties": True, # Allow new fields for forward compatibility "type": "object", "properties": { "limit": {"type": "number"}, @@ -143,7 +143,7 @@ USER_FILTER_SCHEMA = { }, }, }, - "additionalProperties": False, + "additionalProperties": True, # Allow new fields for forward compatibility } diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index a82c4eed86..d5524d296e 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -46,19 +46,36 @@ class FilteringTestCase(unittest.HomeserverTestCase): self.datastore = hs.get_datastores().main def test_errors_on_invalid_filters(self): + # See USER_FILTER_SCHEMA for the filter schema. invalid_filters = [ - {"boom": {}}, + # `account_data` must be a dictionary {"account_data": "Hello World"}, + # `event_fields` entries must not contain backslashes {"event_fields": [r"\\foo"]}, - {"room": {"timeline": {"limit": 0}, "state": {"not_bars": ["*"]}}}, + # `event_format` must be "client" or "federation" {"event_format": "other"}, + # `not_rooms` must contain valid room IDs {"room": {"not_rooms": ["#foo:pik-test"]}}, + # `senders` must contain valid user IDs {"presence": {"senders": ["@bar;pik.test.com"]}}, ] for filter in invalid_filters: with self.assertRaises(SynapseError): self.filtering.check_valid_filter(filter) + def test_ignores_unknown_filter_fields(self): + # For forward compatibility, we must ignore unknown filter fields. + # See USER_FILTER_SCHEMA for the filter schema. + filters = [ + {"org.matrix.msc9999.future_option": True}, + {"presence": {"org.matrix.msc9999.future_option": True}}, + {"room": {"org.matrix.msc9999.future_option": True}}, + {"room": {"timeline": {"org.matrix.msc9999.future_option": True}}}, + ] + for filter in filters: + self.filtering.check_valid_filter(filter) + # Must not raise. + def test_valid_filters(self): valid_filters = [ { -- cgit 1.5.1 From 42f9d414c266188645ef2a36eeff05309b779a46 Mon Sep 17 00:00:00 2001 From: aceArt-GmbH <33117017+aceArt-GmbH@users.noreply.github.com> Date: Mon, 7 Nov 2022 14:51:53 +0100 Subject: Add example on how to load balance /sync requests (#14297) Signed-off-by: lukas Signed-off-by: lukas --- changelog.d/14297.doc | 1 + docs/workers.md | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14297.doc diff --git a/changelog.d/14297.doc b/changelog.d/14297.doc new file mode 100644 index 0000000000..e418c2a18a --- /dev/null +++ b/changelog.d/14297.doc @@ -0,0 +1 @@ +Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). diff --git a/docs/workers.md b/docs/workers.md index 5e1b9ba220..7ee8801161 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -305,9 +305,11 @@ may wish to run multiple groups of workers handling different endpoints so that load balancing can be done in different ways. For `/sync` and `/initialSync` requests it will be more efficient if all -requests from a particular user are routed to a single instance. Extracting a -user ID from the access token or `Authorization` header is currently left as an -exercise for the reader. Admins may additionally wish to separate out `/sync` +requests from a particular user are routed to a single instance. This can +be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via +`hash $http_authorization consistent;` which contains the users access token. + +Admins may additionally wish to separate out `/sync` requests that have a `since` query parameter from those that don't (and `/initialSync`), as requests that don't are known as "initial sync" that happens when a user logs in on a new device and can be *very* resource intensive, so -- cgit 1.5.1 From 2193513346054769080dd8a07586bed652acae60 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 7 Nov 2022 14:28:00 +0000 Subject: Fix background update table-scanning `events` (#14374) When this background update did its last batch, it would try to update all the events that had been inserted since the bgupdate started, which could cause a table-scan. Make sure we limit the update correctly. --- changelog.d/14374.bugfix | 1 + synapse/storage/databases/main/events_bg_updates.py | 16 ++++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) create mode 100644 changelog.d/14374.bugfix diff --git a/changelog.d/14374.bugfix b/changelog.d/14374.bugfix new file mode 100644 index 0000000000..8366cfbf8a --- /dev/null +++ b/changelog.d/14374.bugfix @@ -0,0 +1 @@ +Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 6e8aeed7b4..9e31798ab1 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1435,16 +1435,16 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): ), ) - endpoint = None row = txn.fetchone() if row: endpoint = row[0] + else: + # if the query didn't return a row, we must be almost done. We just + # need to go up to the recorded max_stream_ordering. + endpoint = max_stream_ordering_inclusive - where_clause = "stream_ordering > ?" - args = [min_stream_ordering_exclusive] - if endpoint: - where_clause += " AND stream_ordering <= ?" - args.append(endpoint) + where_clause = "stream_ordering > ? AND stream_ordering <= ?" + args = [min_stream_ordering_exclusive, endpoint] # now do the updates. txn.execute( @@ -1458,13 +1458,13 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): ) logger.info( - "populated new `events` columns up to %s/%i: updated %i rows", + "populated new `events` columns up to %i/%i: updated %i rows", endpoint, max_stream_ordering_inclusive, txn.rowcount, ) - if endpoint is None: + if endpoint >= max_stream_ordering_inclusive: # we're done return True -- cgit 1.5.1 From 7894251bcea7714b47e3849e509ea717bb18e9f5 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 7 Nov 2022 13:38:50 -0800 Subject: Correctly create power level event during initial room creation (#14361) --- changelog.d/14361.bugfix | 1 + synapse/handlers/room.py | 25 +++++++++++++++++++++++-- tests/rest/client/test_rooms.py | 4 ++-- 3 files changed, 26 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14361.bugfix diff --git a/changelog.d/14361.bugfix b/changelog.d/14361.bugfix new file mode 100644 index 0000000000..33ba1d92af --- /dev/null +++ b/changelog.d/14361.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. \ No newline at end of file diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f10cfca073..66a50bca6e 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1080,6 +1080,19 @@ class RoomCreationHandler: for_batch: bool, **kwargs: Any, ) -> Tuple[EventBase, synapse.events.snapshot.EventContext]: + """ + Creates an event and associated event context. + Args: + etype: the type of event to be created + content: content of the event + for_batch: whether the event is being created for batch persisting. If + bool for_batch is true, this will create an event using the prev_event_ids, + and will create an event context for the event using the parameters state_map + and current_state_group, thus these parameters must be provided in this + case if for_batch is True. The subsequently created event and context + are suitable for being batched up and bulk persisted to the database + with other similarly created events. + """ nonlocal depth nonlocal prev_event @@ -1139,13 +1152,21 @@ class RoomCreationHandler: depth += 1 state_map[(EventTypes.Member, creator.user.to_string())] = member_event_id + # we need the state group of the membership event as it is the current state group + event_to_state = ( + await self._storage_controllers.state.get_state_group_for_events( + [member_event_id] + ) + ) + current_state_group = event_to_state[member_event_id] + events_to_send = [] # We treat the power levels override specially as this needs to be one # of the first events that get sent into a room. pl_content = initial_state.pop((EventTypes.PowerLevels, ""), None) if pl_content is not None: power_event, power_context = await create_event( - EventTypes.PowerLevels, pl_content, False + EventTypes.PowerLevels, pl_content, True ) current_state_group = power_context._state_group events_to_send.append((power_event, power_context)) @@ -1194,7 +1215,7 @@ class RoomCreationHandler: pl_event, pl_context = await create_event( EventTypes.PowerLevels, power_level_content, - False, + True, ) current_state_group = pl_context._state_group events_to_send.append((pl_event, pl_context)) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 1084d4ad9d..e919e089cb 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -715,7 +715,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(34, channel.resource_usage.db_txn_count) + self.assertEqual(33, channel.resource_usage.db_txn_count) def test_post_room_initial_state(self) -> None: # POST with initial_state config key, expect new room id @@ -728,7 +728,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(37, channel.resource_usage.db_txn_count) + self.assertEqual(36, channel.resource_usage.db_txn_count) def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id -- cgit 1.5.1 From 404404733c361891cfb90b2fc07e6cd05458cd40 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Tue, 8 Nov 2022 10:38:16 +0000 Subject: 1.71.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index b6417ceb29..968612bc79 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.71.0 (2022-11-08) +=========================== + +No significant changes since 1.71.0rc2. + + Synapse 1.71.0rc2 (2022-11-04) ============================== diff --git a/debian/changelog b/debian/changelog index 419fc7ebc2..74ae2870bb 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.71.0) stable; urgency=medium + + * New Synapse release 1.71.0. + + -- Synapse Packaging team Tue, 08 Nov 2022 10:38:10 +0000 + matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium * New Synapse release 1.71.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 249b1cbedf..e07a208e67 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.71.0rc2" +version = "1.71.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From 1d1ab0e41fad3c2e5a8c8df483ca687d3350cf8a Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Tue, 8 Nov 2022 10:40:34 +0000 Subject: Update CHANGES.md --- CHANGES.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 968612bc79..b47346dd47 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,17 +1,16 @@ Synapse 1.71.0 (2022-11-08) =========================== -No significant changes since 1.71.0rc2. - - -Synapse 1.71.0rc2 (2022-11-04) -============================== - Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default. They will be removed altogether in Synapse 1.73.0. If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names. See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details. +No significant changes since 1.71.0rc2. + + +Synapse 1.71.0rc2 (2022-11-04) +============================== Improved Documentation ---------------------- -- cgit 1.5.1 From f0dec49f01a7071296bce74557fbd6996ccf5759 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Tue, 8 Nov 2022 10:59:36 +0000 Subject: Update CHANGES.md to mention PostgreSQL 10 end of life --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index b47346dd47..078a36eac9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,6 +6,8 @@ They will be removed altogether in Synapse 1.73.0. If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names. See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details. +**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+. + No significant changes since 1.71.0rc2. -- cgit 1.5.1 From 69814eb2824daf846f869cb9579eb1008e61f8ad Mon Sep 17 00:00:00 2001 From: realtyem Date: Tue, 8 Nov 2022 06:34:09 -0600 Subject: Allow override for requesting specific worker types for Complement on command line. (#14324) * Expose getting SYNAPSE_WORKER_TYPES from external, allowing override of workers requested. * Add WORKER_TYPES variable option to complement.sh script that passes requested workers into start_for_complement.sh entrypoint. * Update docs to reflect this new ability. * Changelog * Don't rely on soft wrapping to format long strings Good idea dklimpel. Thanks for catching that. Co-authored-by: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> * Small nits just noticed in docs. * Fixup new line in docs. Co-authored-by: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> --- changelog.d/14324.misc | 1 + docker/complement/conf/start_for_complement.sh | 9 ++++++++- docs/development/contributing_guide.md | 6 ++++++ scripts-dev/complement.sh | 3 +++ 4 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14324.misc diff --git a/changelog.d/14324.misc b/changelog.d/14324.misc new file mode 100644 index 0000000000..54d8198a8f --- /dev/null +++ b/changelog.d/14324.misc @@ -0,0 +1 @@ +Add override ability to `complement.sh` command line script to request certain types of workers. diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh index bb85d9fed7..49d79745b0 100755 --- a/docker/complement/conf/start_for_complement.sh +++ b/docker/complement/conf/start_for_complement.sh @@ -45,7 +45,12 @@ esac if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then # Specify the workers to test with - export SYNAPSE_WORKER_TYPES="\ + # Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still + # utilizing WORKERS=1 for backwards compatibility. + # -n True if the length of string is non-zero. + # -z True if the length of string is zero. + if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then + export SYNAPSE_WORKER_TYPES="\ event_persister, \ event_persister, \ background_worker, \ @@ -61,6 +66,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then appservice, \ pusher" + fi + log "Workers requested: $SYNAPSE_WORKER_TYPES" # Improve startup times by using a launcher based on fork() export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1 else diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 1e52f9808c..342bc1d340 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -324,6 +324,12 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data - Passing `POSTGRES=1` as an environment variable to use the Postgres database instead. - Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres. + - If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker + types you wish to test. A simple comma-delimited string containing the worker types + defined from the `WORKERS_CONFIG` template in + [here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54). + A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`. + See the [worker documentation](../workers.md) for additional information on workers. To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g: ```sh diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 76c859694f..803c6ce92d 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -139,6 +139,9 @@ if [[ -n "$WORKERS" ]]; then # Use workers. export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true + # Pass through the workers defined. If none, it will be an empty string + export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES" + # Workers can only use Postgres as a database. export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres -- cgit 1.5.1 From d85cba1aa0a2f6dbb988b81d331b5ba9487fe1ac Mon Sep 17 00:00:00 2001 From: realtyem Date: Tue, 8 Nov 2022 07:14:00 -0600 Subject: Add all Stream Writer worker types to configure_workers_and_start.py (#14197) Co-authored-by: reivilibre --- changelog.d/14197.docker | 1 + docker/configure_workers_and_start.py | 76 +++++++++++++++++++++++++++++++---- 2 files changed, 70 insertions(+), 7 deletions(-) create mode 100644 changelog.d/14197.docker diff --git a/changelog.d/14197.docker b/changelog.d/14197.docker new file mode 100644 index 0000000000..529ccd99c5 --- /dev/null +++ b/changelog.d/14197.docker @@ -0,0 +1 @@ +Add all Stream Writer worker types to configure_workers_and_start.py. diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 1ea456b2f8..da259129d1 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -50,7 +50,12 @@ from jinja2 import Environment, FileSystemLoader MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 - +# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources +# Watching /_matrix/client needs a "client" listener +# Watching /_matrix/federation needs a "federation" listener +# Watching /_matrix/media and related needs a "media" listener +# Stream Writers require "client" and "replication" listeners because they +# have to attach by instance_map to the master process and have client endpoints. WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "pusher": { "app": "synapse.app.pusher", @@ -209,6 +214,49 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { % (MAIN_PROCESS_HTTP_LISTENER_PORT,) ), }, + "account_data": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client", "replication"], + "endpoint_patterns": [ + "^/_matrix/client/(r0|v3|unstable)/.*/tags", + "^/_matrix/client/(r0|v3|unstable)/.*/account_data", + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "presence": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client", "replication"], + "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "receipts": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client", "replication"], + "endpoint_patterns": [ + "^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt", + "^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers", + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "to_device": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client", "replication"], + "endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "typing": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client", "replication"], + "endpoint_patterns": [ + "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing" + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, } # Templates for sections that may be inserted multiple times in config files @@ -271,7 +319,7 @@ def convert(src: str, dst: str, **template_vars: object) -> None: outfile.write(rendered) -def add_sharding_to_shared_config( +def add_worker_roles_to_shared_config( shared_config: dict, worker_type: str, worker_name: str, @@ -309,6 +357,20 @@ def add_sharding_to_shared_config( "port": worker_port, } + elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]: + # Update the list of stream writers + # It's convienent that the name of the worker type is the same as the event stream + shared_config.setdefault("stream_writers", {}).setdefault( + worker_type, [] + ).append(worker_name) + + # Map of stream writer instance names to host/ports combos + # For now, all stream writers need http replication ports + instance_map[worker_name] = { + "host": "localhost", + "port": worker_port, + } + elif worker_type == "media_repository": # The first configured media worker will run the media background jobs shared_config.setdefault("media_instance_running_background_jobs", worker_name) @@ -441,11 +503,11 @@ def generate_worker_files( # Check if more than one instance of this worker type has been specified worker_type_total_count = worker_types.count(worker_type) - if worker_type_total_count > 1: - # Update the shared config with sharding-related options if necessary - add_sharding_to_shared_config( - shared_config, worker_type, worker_name, worker_port - ) + + # Update the shared config with sharding-related options if necessary + add_worker_roles_to_shared_config( + shared_config, worker_type, worker_name, worker_port + ) # Enable the worker in supervisord worker_descriptors.append(worker_config) -- cgit 1.5.1 From a5fcdea090c2396c30dd07c357ce4d9c90004c34 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 8 Nov 2022 17:17:13 +0000 Subject: Remove support for PostgreSQL 10 (#14392) Signed-off-by: Sean Quah --- .ci/scripts/calculate_jobs.py | 2 +- .github/workflows/tests.yml | 2 +- changelog.d/14392.removal | 1 + docs/upgrade.md | 10 ++++++++++ synapse/storage/engines/postgres.py | 4 ++-- 5 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14392.removal diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index c53d4d5ff1..b48174bea2 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -54,7 +54,7 @@ trial_postgres_tests = [ { "python-version": "3.7", "database": "postgres", - "postgres-version": "10", + "postgres-version": "11", "extras": "all", } ] diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fea33abd12..2bc237a0ba 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -409,7 +409,7 @@ jobs: matrix: include: - python-version: "3.7" - postgres-version: "10" + postgres-version: "11" - python-version: "3.11" postgres-version: "14" diff --git a/changelog.d/14392.removal b/changelog.d/14392.removal new file mode 100644 index 0000000000..e96b3de2bd --- /dev/null +++ b/changelog.d/14392.removal @@ -0,0 +1 @@ +Remove support for PostgreSQL 10. diff --git a/docs/upgrade.md b/docs/upgrade.md index 41b06cc253..2aa353e496 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,16 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.72.0 + +## Dropping support for PostgreSQL 10 + +In line with our [deprecation policy](deprecation_policy.md), we've dropped +support for PostgreSQL 10, as it is no longer supported upstream. + +This release of Synapse requires PostgreSQL 11+. + + # Upgrading to v1.71.0 ## Removal of the `generate_short_term_login_token` module API method diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 9bf74bbf59..0c4fd88914 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -81,8 +81,8 @@ class PostgresEngine( allow_unsafe_locale = self.config.get("allow_unsafe_locale", False) # Are we on a supported PostgreSQL version? - if not allow_outdated_version and self._version < 100000: - raise RuntimeError("Synapse requires PostgreSQL 10 or above.") + if not allow_outdated_version and self._version < 110000: + raise RuntimeError("Synapse requires PostgreSQL 11 or above.") with db_conn.cursor() as txn: txn.execute("SHOW SERVER_ENCODING") -- cgit 1.5.1 From 22d46db0eae000830154bba8916d8d53458dc351 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 9 Nov 2022 10:32:52 +0000 Subject: Test against PostgreSQL 15 in CI (#14394) Resolves #14170. Signed-off-by: Sean Quah --- .ci/scripts/calculate_jobs.py | 2 +- .github/workflows/tests.yml | 11 ++++++++++- changelog.d/14394.misc | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14394.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index b48174bea2..0cdc20e19c 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -64,7 +64,7 @@ if not IS_PR: { "python-version": "3.11", "database": "postgres", - "postgres-version": "14", + "postgres-version": "15", "extras": "all", } ) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 2bc237a0ba..ec5ab79f9c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -412,7 +412,7 @@ jobs: postgres-version: "11" - python-version: "3.11" - postgres-version: "14" + postgres-version: "15" services: postgres: @@ -430,6 +430,15 @@ jobs: steps: - uses: actions/checkout@v3 + - name: Add PostgreSQL apt repository + # We need a version of pg_dump that can handle the version of + # PostgreSQL being tested against. The Ubuntu package repository lags + # behind new releases, so we have to use the PostreSQL apt repository. + # Steps taken from https://www.postgresql.org/download/linux/ubuntu/ + run: | + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo apt-get update - run: sudo apt-get -qq install xmlsec1 postgresql-client - uses: matrix-org/setup-python-poetry@v1 with: diff --git a/changelog.d/14394.misc b/changelog.d/14394.misc new file mode 100644 index 0000000000..8b816eb0a3 --- /dev/null +++ b/changelog.d/14394.misc @@ -0,0 +1 @@ +Test against PostgreSQL 15 in CI. -- cgit 1.5.1 From 0cf48f2d5fb0b7c9d76413045c2affbc5d88c0ee Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 9 Nov 2022 10:33:13 +0000 Subject: Build Debian packages for Ubuntu 22.10 Kinetic Kudu (#14396) Signed-off-by: Sean Quah --- changelog.d/14396.feature | 1 + scripts-dev/build_debian_packages.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/14396.feature diff --git a/changelog.d/14396.feature b/changelog.d/14396.feature new file mode 100644 index 0000000000..e875916885 --- /dev/null +++ b/changelog.d/14396.feature @@ -0,0 +1 @@ +Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index cd2e64b75f..7442300196 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -27,6 +27,7 @@ DISTS = ( "debian:sid", "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) + "ubuntu:kinetic", # 22.10 (EOL 2023-07-20) ) DESC = """\ -- cgit 1.5.1 From e9cbddc8e779050e0053826686cc8ac768e37813 Mon Sep 17 00:00:00 2001 From: realtyem Date: Wed, 9 Nov 2022 06:02:15 -0600 Subject: Modernize configure_workers_and_start.py bootstrapping script for Dockerfile-workers. (#14294) --- changelog.d/14294.docker | 1 + docker/configure_workers_and_start.py | 29 ++++++++++++++--------------- 2 files changed, 15 insertions(+), 15 deletions(-) create mode 100644 changelog.d/14294.docker diff --git a/changelog.d/14294.docker b/changelog.d/14294.docker new file mode 100644 index 0000000000..1489470408 --- /dev/null +++ b/changelog.d/14294.docker @@ -0,0 +1 @@ +Remove references to legacy worker types in the multi-worker Dockerfile. diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index da259129d1..62b1bab297 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -20,7 +20,7 @@ # * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver. # * SYNAPSE_REPORT_STATS: Whether to report stats. # * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG -# below. Leave empty for no workers, or set to '*' for all possible workers. +# below. Leave empty for no workers. # * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files # will be treated as Application Service registration files. # * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format. @@ -58,10 +58,10 @@ MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 # have to attach by instance_map to the master process and have client endpoints. WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "pusher": { - "app": "synapse.app.pusher", + "app": "synapse.app.generic_worker", "listener_resources": [], "endpoint_patterns": [], - "shared_extra_conf": {"start_pushers": False}, + "shared_extra_conf": {}, "worker_extra_conf": "", }, "user_dir": { @@ -84,7 +84,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "^/_synapse/admin/v1/media/.*$", "^/_synapse/admin/v1/quarantine_media/.*$", ], - "shared_extra_conf": {"enable_media_repo": False}, + # The first configured media worker will run the media background jobs + "shared_extra_conf": { + "enable_media_repo": False, + "media_instance_running_background_jobs": "media_repository1", + }, "worker_extra_conf": "enable_media_repo: true", }, "appservice": { @@ -95,10 +99,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "worker_extra_conf": "", }, "federation_sender": { - "app": "synapse.app.federation_sender", + "app": "synapse.app.generic_worker", "listener_resources": [], "endpoint_patterns": [], - "shared_extra_conf": {"send_federation": False}, + "shared_extra_conf": {}, "worker_extra_conf": "", }, "synchrotron": { @@ -205,7 +209,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "worker_extra_conf": "", }, "frontend_proxy": { - "app": "synapse.app.frontend_proxy", + "app": "synapse.app.generic_worker", "listener_resources": ["client", "replication"], "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"], "shared_extra_conf": {}, @@ -326,7 +330,7 @@ def add_worker_roles_to_shared_config( worker_port: int, ) -> None: """Given a dictionary representing a config file shared across all workers, - append sharded worker information to it for the current worker_type instance. + append appropriate worker information to it for the current worker_type instance. Args: shared_config: The config dict that all worker instances share (after being converted to YAML) @@ -359,7 +363,7 @@ def add_worker_roles_to_shared_config( elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]: # Update the list of stream writers - # It's convienent that the name of the worker type is the same as the event stream + # It's convenient that the name of the worker type is the same as the stream to write shared_config.setdefault("stream_writers", {}).setdefault( worker_type, [] ).append(worker_name) @@ -371,10 +375,6 @@ def add_worker_roles_to_shared_config( "port": worker_port, } - elif worker_type == "media_repository": - # The first configured media worker will run the media background jobs - shared_config.setdefault("media_instance_running_background_jobs", worker_name) - def generate_base_homeserver_config() -> None: """Starts Synapse and generates a basic homeserver config, which will later be @@ -483,8 +483,7 @@ def generate_worker_files( if worker_config: worker_config = worker_config.copy() else: - log(worker_type + " is an unknown worker type! It will be ignored") - continue + error(worker_type + " is an unknown worker type! Please fix!") new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1 worker_type_counter[worker_type] = new_worker_count -- cgit 1.5.1 From 21447c91021c51b2dce9dd4462043ff79358092e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Nov 2022 12:16:12 +0000 Subject: Bump dawidd6/action-download-artifact from 2.24.0 to 2.24.1 (#14398) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions Co-authored-by: reivilibre --- .github/workflows/docs-pr-netlify.yaml | 2 +- changelog.d/14398.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14398.misc diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml index 5c90cf714c..798a863a0c 100644 --- a/.github/workflows/docs-pr-netlify.yaml +++ b/.github/workflows/docs-pr-netlify.yaml @@ -14,7 +14,7 @@ jobs: # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: - name: 📥 Download artifact - uses: dawidd6/action-download-artifact@46b4ae883bf0726f5949d025d31cb62c7a5ac70c # v2.24.0 + uses: dawidd6/action-download-artifact@b12b127cf24433d14b4f93cee62f5465076ba82a # v2.24.1 with: workflow: docs-pr.yaml run_id: ${{ github.event.workflow_run.id }} diff --git a/changelog.d/14398.misc b/changelog.d/14398.misc new file mode 100644 index 0000000000..c18fb6d2c6 --- /dev/null +++ b/changelog.d/14398.misc @@ -0,0 +1 @@ +Bump dawidd6/action-download-artifact from 2.24.0 to 2.24.1. -- cgit 1.5.1 From e9a4343cb2daa55503bb2a2d1431d83bf9773e68 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Nov 2022 09:55:34 -0500 Subject: Drop support for Postgres 10 in full text search code. (#14397) --- changelog.d/14397.removal | 1 + synapse/storage/databases/main/search.py | 50 +++++++++++------------ synapse/storage/engines/postgres.py | 16 -------- tests/storage/test_room_search.py | 69 ++++++++------------------------ 4 files changed, 41 insertions(+), 95 deletions(-) create mode 100644 changelog.d/14397.removal diff --git a/changelog.d/14397.removal b/changelog.d/14397.removal new file mode 100644 index 0000000000..e96b3de2bd --- /dev/null +++ b/changelog.d/14397.removal @@ -0,0 +1 @@ +Remove support for PostgreSQL 10. diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index e9588d1755..3fe433f66c 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -463,18 +463,17 @@ class SearchStore(SearchBackgroundUpdateStore): if isinstance(self.database_engine, PostgresEngine): search_query = search_term - tsquery_func = self.database_engine.tsquery_func - sql = f""" - SELECT ts_rank_cd(vector, {tsquery_func}('english', ?)) AS rank, + sql = """ + SELECT ts_rank_cd(vector, websearch_to_tsquery('english', ?)) AS rank, room_id, event_id FROM event_search - WHERE vector @@ {tsquery_func}('english', ?) + WHERE vector @@ websearch_to_tsquery('english', ?) """ args = [search_query, search_query] + args - count_sql = f""" + count_sql = """ SELECT room_id, count(*) as count FROM event_search - WHERE vector @@ {tsquery_func}('english', ?) + WHERE vector @@ websearch_to_tsquery('english', ?) """ count_args = [search_query] + count_args elif isinstance(self.database_engine, Sqlite3Engine): @@ -523,9 +522,7 @@ class SearchStore(SearchBackgroundUpdateStore): highlights = None if isinstance(self.database_engine, PostgresEngine): - highlights = await self._find_highlights_in_postgres( - search_query, events, tsquery_func - ) + highlights = await self._find_highlights_in_postgres(search_query, events) count_sql += " GROUP BY room_id" @@ -604,18 +601,17 @@ class SearchStore(SearchBackgroundUpdateStore): if isinstance(self.database_engine, PostgresEngine): search_query = search_term - tsquery_func = self.database_engine.tsquery_func - sql = f""" - SELECT ts_rank_cd(vector, {tsquery_func}('english', ?)) as rank, + sql = """ + SELECT ts_rank_cd(vector, websearch_to_tsquery('english', ?)) as rank, origin_server_ts, stream_ordering, room_id, event_id FROM event_search - WHERE vector @@ {tsquery_func}('english', ?) AND + WHERE vector @@ websearch_to_tsquery('english', ?) AND """ args = [search_query, search_query] + args - count_sql = f""" + count_sql = """ SELECT room_id, count(*) as count FROM event_search - WHERE vector @@ {tsquery_func}('english', ?) AND + WHERE vector @@ websearch_to_tsquery('english', ?) AND """ count_args = [search_query] + count_args elif isinstance(self.database_engine, Sqlite3Engine): @@ -686,9 +682,7 @@ class SearchStore(SearchBackgroundUpdateStore): highlights = None if isinstance(self.database_engine, PostgresEngine): - highlights = await self._find_highlights_in_postgres( - search_query, events, tsquery_func - ) + highlights = await self._find_highlights_in_postgres(search_query, events) count_sql += " GROUP BY room_id" @@ -714,7 +708,7 @@ class SearchStore(SearchBackgroundUpdateStore): } async def _find_highlights_in_postgres( - self, search_query: str, events: List[EventBase], tsquery_func: str + self, search_query: str, events: List[EventBase] ) -> Set[str]: """Given a list of events and a search term, return a list of words that match from the content of the event. @@ -725,7 +719,6 @@ class SearchStore(SearchBackgroundUpdateStore): Args: search_query events: A list of events - tsquery_func: The tsquery_* function to use when making queries Returns: A set of strings. @@ -758,13 +751,16 @@ class SearchStore(SearchBackgroundUpdateStore): while stop_sel in value: stop_sel += ">" - query = f"SELECT ts_headline(?, {tsquery_func}('english', ?), %s)" % ( - _to_postgres_options( - { - "StartSel": start_sel, - "StopSel": stop_sel, - "MaxFragments": "50", - } + query = ( + "SELECT ts_headline(?, websearch_to_tsquery('english', ?), %s)" + % ( + _to_postgres_options( + { + "StartSel": start_sel, + "StopSel": stop_sel, + "MaxFragments": "50", + } + ) ) ) txn.execute(query, (value, search_query)) diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 0c4fd88914..719a517336 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -170,22 +170,6 @@ class PostgresEngine( """Do we support the `RETURNING` clause in insert/update/delete?""" return True - @property - def tsquery_func(self) -> str: - """ - Selects a tsquery_* func to use. - - Ref: https://www.postgresql.org/docs/current/textsearch-controls.html - - Returns: - The function name. - """ - # Postgres 11 added support for websearch_to_tsquery. - assert self._version is not None - if self._version >= 110000: - return "websearch_to_tsquery" - return "plainto_tsquery" - def is_deadlock(self, error: Exception) -> bool: if isinstance(error, psycopg2.DatabaseError): # https://www.postgresql.org/docs/current/static/errcodes-appendix.html diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py index 868b5bee84..ef850daa73 100644 --- a/tests/storage/test_room_search.py +++ b/tests/storage/test_room_search.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Tuple, Union +from typing import List, Tuple from unittest.case import SkipTest -from unittest.mock import PropertyMock, patch from twisted.test.proto_helpers import MemoryReactor @@ -220,10 +219,8 @@ class MessageSearchTest(HomeserverTestCase): PHRASE = "the quick brown fox jumps over the lazy dog" - # Each entry is a search query, followed by either a boolean of whether it is - # in the phrase OR a tuple of booleans: whether it matches using websearch - # and using plain search. - COMMON_CASES: List[Tuple[str, Union[bool, Tuple[bool, bool]]]] = [ + # Each entry is a search query, followed by a boolean of whether it is in the phrase. + COMMON_CASES = [ ("nope", False), ("brown", True), ("quick brown", True), @@ -231,13 +228,13 @@ class MessageSearchTest(HomeserverTestCase): ("quick \t brown", True), ("jump", True), ("brown nope", False), - ('"brown quick"', (False, True)), + ('"brown quick"', False), ('"jumps over"', True), - ('"quick fox"', (False, True)), + ('"quick fox"', False), ("nope OR doublenope", False), - ("furphy OR fox", (True, False)), - ("fox -nope", (True, False)), - ("fox -brown", (False, True)), + ("furphy OR fox", True), + ("fox -nope", True), + ("fox -brown", False), ('"fox" quick', True), ('"quick brown', True), ('" quick "', True), @@ -246,11 +243,11 @@ class MessageSearchTest(HomeserverTestCase): # TODO Test non-ASCII cases. # Case that fail on SQLite. - POSTGRES_CASES: List[Tuple[str, Union[bool, Tuple[bool, bool]]]] = [ + POSTGRES_CASES = [ # SQLite treats NOT as a binary operator. - ("- fox", (False, True)), - ("- nope", (True, False)), - ('"-fox quick', (False, True)), + ("- fox", False), + ("- nope", True), + ('"-fox quick', False), # PostgreSQL skips stop words. ('"the quick brown"', True), ('"over lazy"', True), @@ -275,7 +272,7 @@ class MessageSearchTest(HomeserverTestCase): if isinstance(main_store.database_engine, PostgresEngine): assert main_store.database_engine._version is not None found = main_store.database_engine._version < 140000 - self.COMMON_CASES.append(('"fox quick', (found, True))) + self.COMMON_CASES.append(('"fox quick', found)) def test_tokenize_query(self) -> None: """Test the custom logic to tokenize a user's query.""" @@ -315,16 +312,10 @@ class MessageSearchTest(HomeserverTestCase): ) def _check_test_cases( - self, - store: DataStore, - cases: List[Tuple[str, Union[bool, Tuple[bool, bool]]]], - index=0, + self, store: DataStore, cases: List[Tuple[str, bool]] ) -> None: # Run all the test cases versus search_msgs for query, expect_to_contain in cases: - if isinstance(expect_to_contain, tuple): - expect_to_contain = expect_to_contain[index] - result = self.get_success( store.search_msgs([self.room_id], query, ["content.body"]) ) @@ -343,9 +334,6 @@ class MessageSearchTest(HomeserverTestCase): # Run them again versus search_rooms for query, expect_to_contain in cases: - if isinstance(expect_to_contain, tuple): - expect_to_contain = expect_to_contain[index] - result = self.get_success( store.search_rooms([self.room_id], query, ["content.body"], 10) ) @@ -366,38 +354,15 @@ class MessageSearchTest(HomeserverTestCase): """ Test searching for phrases using typical web search syntax, as per postgres' websearch_to_tsquery. This test is skipped unless the postgres instance supports websearch_to_tsquery. - """ - - store = self.hs.get_datastores().main - if not isinstance(store.database_engine, PostgresEngine): - raise SkipTest("Test only applies when postgres is used as the database") - - if store.database_engine.tsquery_func != "websearch_to_tsquery": - raise SkipTest( - "Test only applies when postgres supporting websearch_to_tsquery is used as the database" - ) - self._check_test_cases(store, self.COMMON_CASES + self.POSTGRES_CASES, index=0) - - def test_postgres_non_web_search_for_phrase(self): - """ - Test postgres searching for phrases without using web search, which is used when websearch_to_tsquery isn't - supported by the current postgres version. + See https://www.postgresql.org/docs/current/textsearch-controls.html """ store = self.hs.get_datastores().main if not isinstance(store.database_engine, PostgresEngine): raise SkipTest("Test only applies when postgres is used as the database") - # Patch supports_websearch_to_tsquery to always return False to ensure we're testing the plainto_tsquery path. - with patch( - "synapse.storage.engines.postgres.PostgresEngine.tsquery_func", - new_callable=PropertyMock, - ) as supports_websearch_to_tsquery: - supports_websearch_to_tsquery.return_value = "plainto_tsquery" - self._check_test_cases( - store, self.COMMON_CASES + self.POSTGRES_CASES, index=1 - ) + self._check_test_cases(store, self.COMMON_CASES + self.POSTGRES_CASES) def test_sqlite_search(self): """ @@ -407,4 +372,4 @@ class MessageSearchTest(HomeserverTestCase): if not isinstance(store.database_engine, Sqlite3Engine): raise SkipTest("Test only applies when sqlite is used as the database") - self._check_test_cases(store, self.COMMON_CASES, index=0) + self._check_test_cases(store, self.COMMON_CASES) -- cgit 1.5.1 From d10a85ec9eac6f31aa82a5f07d74e5914b18b320 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 10 Nov 2022 12:17:46 +0000 Subject: Quieter logging for stateres failure at missing prev events (#14346) --- changelog.d/14346.misc | 1 + synapse/handlers/federation_event.py | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14346.misc diff --git a/changelog.d/14346.misc b/changelog.d/14346.misc new file mode 100644 index 0000000000..9833b0733a --- /dev/null +++ b/changelog.d/14346.misc @@ -0,0 +1 @@ +Concisely log a failure to resolve state due to missing `prev_events`. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 9ca5df7c78..f7223b03c3 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1065,10 +1065,9 @@ class FederationEventHandler: state_res_store=StateResolutionStore(self._store), ) - except Exception: + except Exception as e: logger.warning( - "Error attempting to resolve state at missing prev_events", - exc_info=True, + "Error attempting to resolve state at missing prev_events: %s", e ) raise FederationError( "ERROR", -- cgit 1.5.1 From b2c2b030798d0e74d3bf1afb4726465b53620638 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 10 Nov 2022 19:02:27 +0000 Subject: Fix PostgreSQL sometimes using table scans for `event_search` (#14409) PostgreSQL may underestimate the number of distinct `room_id`s in `event_search`, which can cause it to use table scans for queries for multiple rooms. Fix this by setting `n_distinct` on the column. Resolves #14402. Signed-off-by: Sean Quah --- changelog.d/14409.bugfix | 1 + .../11event_search_room_id_n_distinct.sql.postgres | 33 ++++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 changelog.d/14409.bugfix create mode 100644 synapse/storage/schema/main/delta/73/11event_search_room_id_n_distinct.sql.postgres diff --git a/changelog.d/14409.bugfix b/changelog.d/14409.bugfix new file mode 100644 index 0000000000..f720700653 --- /dev/null +++ b/changelog.d/14409.bugfix @@ -0,0 +1 @@ +Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. diff --git a/synapse/storage/schema/main/delta/73/11event_search_room_id_n_distinct.sql.postgres b/synapse/storage/schema/main/delta/73/11event_search_room_id_n_distinct.sql.postgres new file mode 100644 index 0000000000..93cdaefca1 --- /dev/null +++ b/synapse/storage/schema/main/delta/73/11event_search_room_id_n_distinct.sql.postgres @@ -0,0 +1,33 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- By default the postgres statistics collector massively underestimates the +-- number of distinct rooms in `event_search`, which can cause postgres to use +-- table scans for queries for multiple rooms. +-- +-- To work around this we can manually tell postgres the number of distinct rooms +-- by setting `n_distinct` (a negative value here is the number of distinct values +-- divided by the number of rows, so -0.01 means on average there are 100 rows per +-- distinct value). We don't need a particularly accurate number here, as a) we just +-- want it to always use index scans and b) our estimate is going to be better than the +-- one made by the statistics collector. + +ALTER TABLE event_search ALTER COLUMN room_id SET (n_distinct = -0.01); + +-- Ideally we'd do an `ANALYZE event_search (room_id)` here so that +-- the above gets picked up immediately, but that can take a bit of time so we +-- rely on the autovacuum eventually getting run and doing that in the +-- background for us. -- cgit 1.5.1 From 13ca8bb2fc05d338ccf62e6f8d1cbf5021d935ba Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 10 Nov 2022 15:33:34 -0500 Subject: Remove duplicated code to evict entries. (#14410) This code was factored out to a method, but also left in-place. Calling this twice in a row makes no sense: the first call will reduce the size appropriately, but the loop will immediately exit since the cache size was already reduced. --- changelog.d/14410.misc | 1 + synapse/util/caches/stream_change_cache.py | 11 ++--------- 2 files changed, 3 insertions(+), 9 deletions(-) create mode 100644 changelog.d/14410.misc diff --git a/changelog.d/14410.misc b/changelog.d/14410.misc new file mode 100644 index 0000000000..f085a8bfb2 --- /dev/null +++ b/changelog.d/14410.misc @@ -0,0 +1 @@ +Remove unreachable code. diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 330709b8b7..666f4b6895 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -72,7 +72,7 @@ class StreamChangeCache: items from the cache. Returns: - bool: Whether the cache changed size or not. + Whether the cache changed size or not. """ new_size = math.floor(self._original_max_size * factor) if new_size != self._max_size: @@ -188,14 +188,8 @@ class StreamChangeCache: self._entity_to_key[entity] = stream_pos self._evict() - # if the cache is too big, remove entries - while len(self._cache) > self._max_size: - k, r = self._cache.popitem(0) - self._earliest_known_stream_pos = max(k, self._earliest_known_stream_pos) - for entity in r: - del self._entity_to_key[entity] - def _evict(self) -> None: + # if the cache is too big, remove entries while len(self._cache) > self._max_size: k, r = self._cache.popitem(0) self._earliest_known_stream_pos = max(k, self._earliest_known_stream_pos) @@ -203,7 +197,6 @@ class StreamChangeCache: self._entity_to_key.pop(entity, None) def get_max_pos_of_last_change(self, entity: EntityType) -> int: - """Returns an upper bound of the stream id of the last change to an entity. """ -- cgit 1.5.1 From 3a4f80f8c6f39c5549c56c044e10b35064d8d22f Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Fri, 11 Nov 2022 10:51:49 +0000 Subject: Merge/remove `Slaved*` stores into `WorkerStores` (#14375) --- changelog.d/14375.misc | 1 + synapse/app/admin_cmd.py | 36 ++++++++--- synapse/app/generic_worker.py | 44 ++++++++++---- synapse/replication/slave/storage/devices.py | 79 ------------------------ synapse/replication/slave/storage/events.py | 79 ------------------------ synapse/replication/slave/storage/filtering.py | 35 ----------- synapse/replication/slave/storage/keys.py | 20 ------ synapse/replication/slave/storage/push_rule.py | 35 ----------- synapse/replication/slave/storage/pushers.py | 47 -------------- synapse/storage/databases/main/__init__.py | 35 ----------- synapse/storage/databases/main/devices.py | 81 ++++++++++++++++++++++--- synapse/storage/databases/main/events_worker.py | 16 +++++ synapse/storage/databases/main/filtering.py | 4 +- synapse/storage/databases/main/push_rule.py | 19 ++++-- synapse/storage/databases/main/pusher.py | 41 +++++++++++-- synapse/storage/databases/main/stream.py | 1 + tests/replication/slave/storage/test_events.py | 6 +- 17 files changed, 202 insertions(+), 377 deletions(-) create mode 100644 changelog.d/14375.misc delete mode 100644 synapse/replication/slave/storage/devices.py delete mode 100644 synapse/replication/slave/storage/events.py delete mode 100644 synapse/replication/slave/storage/filtering.py delete mode 100644 synapse/replication/slave/storage/keys.py delete mode 100644 synapse/replication/slave/storage/push_rule.py delete mode 100644 synapse/replication/slave/storage/pushers.py diff --git a/changelog.d/14375.misc b/changelog.d/14375.misc new file mode 100644 index 0000000000..d0369b9b8c --- /dev/null +++ b/changelog.d/14375.misc @@ -0,0 +1 @@ +Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 3c8c00ea5b..165d1c5db0 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -28,10 +28,6 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.events import EventBase from synapse.handlers.admin import ExfiltrationWriter -from synapse.replication.slave.storage.devices import SlavedDeviceStore -from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.filtering import SlavedFilteringStore -from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.server import HomeServer from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main.account_data import AccountDataWorkerStore @@ -40,10 +36,24 @@ from synapse.storage.databases.main.appservice import ( ApplicationServiceWorkerStore, ) from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore +from synapse.storage.databases.main.devices import DeviceWorkerStore +from synapse.storage.databases.main.event_federation import EventFederationWorkerStore +from synapse.storage.databases.main.event_push_actions import ( + EventPushActionsWorkerStore, +) +from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.storage.databases.main.filtering import FilteringWorkerStore +from synapse.storage.databases.main.push_rule import PushRulesWorkerStore from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.storage.databases.main.registration import RegistrationWorkerStore +from synapse.storage.databases.main.relations import RelationsWorkerStore from synapse.storage.databases.main.room import RoomWorkerStore +from synapse.storage.databases.main.roommember import RoomMemberWorkerStore +from synapse.storage.databases.main.signatures import SignatureWorkerStore +from synapse.storage.databases.main.state import StateGroupWorkerStore +from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.databases.main.tags import TagsWorkerStore +from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore from synapse.types import StateMap from synapse.util import SYNAPSE_VERSION from synapse.util.logcontext import LoggingContext @@ -52,17 +62,25 @@ logger = logging.getLogger("synapse.app.admin_cmd") class AdminCmdSlavedStore( - SlavedFilteringStore, - SlavedPushRuleStore, - SlavedEventStore, - SlavedDeviceStore, + FilteringWorkerStore, + DeviceWorkerStore, TagsWorkerStore, DeviceInboxWorkerStore, AccountDataWorkerStore, + PushRulesWorkerStore, ApplicationServiceTransactionWorkerStore, ApplicationServiceWorkerStore, - RegistrationWorkerStore, + RoomMemberWorkerStore, + RelationsWorkerStore, + EventFederationWorkerStore, + EventPushActionsWorkerStore, + StateGroupWorkerStore, + SignatureWorkerStore, + UserErasureWorkerStore, ReceiptsWorkerStore, + StreamWorkerStore, + EventsWorkerStore, + RegistrationWorkerStore, RoomWorkerStore, ): def __init__( diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index cb5892f041..51446b49cd 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -48,12 +48,6 @@ from synapse.http.site import SynapseRequest, SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource -from synapse.replication.slave.storage.devices import SlavedDeviceStore -from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.filtering import SlavedFilteringStore -from synapse.replication.slave.storage.keys import SlavedKeyStore -from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore -from synapse.replication.slave.storage.pushers import SlavedPusherStore from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.client import ( account_data, @@ -101,8 +95,16 @@ from synapse.storage.databases.main.appservice import ( from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.client_ips import ClientIpWorkerStore from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore +from synapse.storage.databases.main.devices import DeviceWorkerStore from synapse.storage.databases.main.directory import DirectoryWorkerStore from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore +from synapse.storage.databases.main.event_federation import EventFederationWorkerStore +from synapse.storage.databases.main.event_push_actions import ( + EventPushActionsWorkerStore, +) +from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.storage.databases.main.filtering import FilteringWorkerStore +from synapse.storage.databases.main.keys import KeyStore from synapse.storage.databases.main.lock import LockStore from synapse.storage.databases.main.media_repository import MediaRepositoryStore from synapse.storage.databases.main.metrics import ServerMetricsStore @@ -111,17 +113,25 @@ from synapse.storage.databases.main.monthly_active_users import ( ) from synapse.storage.databases.main.presence import PresenceStore from synapse.storage.databases.main.profile import ProfileWorkerStore +from synapse.storage.databases.main.push_rule import PushRulesWorkerStore +from synapse.storage.databases.main.pusher import PusherWorkerStore from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.storage.databases.main.registration import RegistrationWorkerStore +from synapse.storage.databases.main.relations import RelationsWorkerStore from synapse.storage.databases.main.room import RoomWorkerStore from synapse.storage.databases.main.room_batch import RoomBatchStore +from synapse.storage.databases.main.roommember import RoomMemberWorkerStore from synapse.storage.databases.main.search import SearchStore from synapse.storage.databases.main.session import SessionStore +from synapse.storage.databases.main.signatures import SignatureWorkerStore +from synapse.storage.databases.main.state import StateGroupWorkerStore from synapse.storage.databases.main.stats import StatsStore +from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.databases.main.tags import TagsWorkerStore from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore +from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore from synapse.types import JsonDict from synapse.util import SYNAPSE_VERSION from synapse.util.httpresourcetree import create_resource_tree @@ -232,26 +242,36 @@ class GenericWorkerSlavedStore( EndToEndRoomKeyStore, PresenceStore, DeviceInboxWorkerStore, - SlavedDeviceStore, - SlavedPushRuleStore, + DeviceWorkerStore, TagsWorkerStore, AccountDataWorkerStore, - SlavedPusherStore, CensorEventsStore, ClientIpWorkerStore, - SlavedEventStore, - SlavedKeyStore, + # KeyStore isn't really safe to use from a worker, but for now we do so and hope that + # the races it creates aren't too bad. + KeyStore, RoomWorkerStore, RoomBatchStore, DirectoryWorkerStore, + PushRulesWorkerStore, ApplicationServiceTransactionWorkerStore, ApplicationServiceWorkerStore, ProfileWorkerStore, - SlavedFilteringStore, + FilteringWorkerStore, MonthlyActiveUsersWorkerStore, MediaRepositoryStore, ServerMetricsStore, + PusherWorkerStore, + RoomMemberWorkerStore, + RelationsWorkerStore, + EventFederationWorkerStore, + EventPushActionsWorkerStore, + StateGroupWorkerStore, + SignatureWorkerStore, + UserErasureWorkerStore, ReceiptsWorkerStore, + StreamWorkerStore, + EventsWorkerStore, RegistrationWorkerStore, SearchStore, TransactionWorkerStore, diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py deleted file mode 100644 index 6fcade510a..0000000000 --- a/synapse/replication/slave/storage/devices.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING, Any, Iterable - -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker -from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection -from synapse.storage.databases.main.devices import DeviceWorkerStore - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class SlavedDeviceStore(DeviceWorkerStore): - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - self.hs = hs - - self._device_list_id_gen = SlavedIdTracker( - db_conn, - "device_lists_stream", - "stream_id", - extra_tables=[ - ("user_signature_stream", "stream_id"), - ("device_lists_outbound_pokes", "stream_id"), - ("device_lists_changes_in_room", "stream_id"), - ], - ) - - super().__init__(database, db_conn, hs) - - def get_device_stream_token(self) -> int: - return self._device_list_id_gen.get_current_token() - - def process_replication_rows( - self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] - ) -> None: - if stream_name == DeviceListsStream.NAME: - self._device_list_id_gen.advance(instance_name, token) - self._invalidate_caches_for_devices(token, rows) - elif stream_name == UserSignatureStream.NAME: - self._device_list_id_gen.advance(instance_name, token) - for row in rows: - self._user_signature_stream_cache.entity_has_changed(row.user_id, token) - return super().process_replication_rows(stream_name, instance_name, token, rows) - - def _invalidate_caches_for_devices( - self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow] - ) -> None: - for row in rows: - # The entities are either user IDs (starting with '@') whose devices - # have changed, or remote servers that we need to tell about - # changes. - if row.entity.startswith("@"): - self._device_list_stream_cache.entity_has_changed(row.entity, token) - self.get_cached_devices_for_user.invalidate((row.entity,)) - self._get_cached_user_device.invalidate((row.entity,)) - self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,)) - - else: - self._device_list_federation_stream_cache.entity_has_changed( - row.entity, token - ) diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py deleted file mode 100644 index fe47778cb1..0000000000 --- a/synapse/replication/slave/storage/events.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -from typing import TYPE_CHECKING - -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection -from synapse.storage.databases.main.event_federation import EventFederationWorkerStore -from synapse.storage.databases.main.event_push_actions import ( - EventPushActionsWorkerStore, -) -from synapse.storage.databases.main.events_worker import EventsWorkerStore -from synapse.storage.databases.main.relations import RelationsWorkerStore -from synapse.storage.databases.main.roommember import RoomMemberWorkerStore -from synapse.storage.databases.main.signatures import SignatureWorkerStore -from synapse.storage.databases.main.state import StateGroupWorkerStore -from synapse.storage.databases.main.stream import StreamWorkerStore -from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore -from synapse.util.caches.stream_change_cache import StreamChangeCache - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -# So, um, we want to borrow a load of functions intended for reading from -# a DataStore, but we don't want to take functions that either write to the -# DataStore or are cached and don't have cache invalidation logic. -# -# Rather than write duplicate versions of those functions, or lift them to -# a common base class, we going to grab the underlying __func__ object from -# the method descriptor on the DataStore and chuck them into our class. - - -class SlavedEventStore( - EventFederationWorkerStore, - RoomMemberWorkerStore, - EventPushActionsWorkerStore, - StreamWorkerStore, - StateGroupWorkerStore, - SignatureWorkerStore, - EventsWorkerStore, - UserErasureWorkerStore, - RelationsWorkerStore, -): - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - super().__init__(database, db_conn, hs) - - events_max = self._stream_id_gen.get_current_token() - curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict( - db_conn, - "current_state_delta_stream", - entity_column="room_id", - stream_column="stream_id", - max_value=events_max, # As we share the stream id with events token - limit=1000, - ) - self._curr_state_delta_stream_cache = StreamChangeCache( - "_curr_state_delta_stream_cache", - min_curr_state_delta_id, - prefilled_cache=curr_state_delta_prefill, - ) diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py deleted file mode 100644 index c52679cd60..0000000000 --- a/synapse/replication/slave/storage/filtering.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from synapse.storage._base import SQLBaseStore -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection -from synapse.storage.databases.main.filtering import FilteringStore - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class SlavedFilteringStore(SQLBaseStore): - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - super().__init__(database, db_conn, hs) - - # Filters are immutable so this cache doesn't need to be expired - get_user_filter = FilteringStore.__dict__["get_user_filter"] diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py deleted file mode 100644 index a00b38c512..0000000000 --- a/synapse/replication/slave/storage/keys.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.keys import KeyStore - -# KeyStore isn't really safe to use from a worker, but for now we do so and hope that -# the races it creates aren't too bad. - -SlavedKeyStore = KeyStore diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py deleted file mode 100644 index 5e65eaf1e0..0000000000 --- a/synapse/replication/slave/storage/push_rule.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any, Iterable - -from synapse.replication.tcp.streams import PushRulesStream -from synapse.storage.databases.main.push_rule import PushRulesWorkerStore - -from .events import SlavedEventStore - - -class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore): - def get_max_push_rules_stream_id(self) -> int: - return self._push_rules_stream_id_gen.get_current_token() - - def process_replication_rows( - self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] - ) -> None: - if stream_name == PushRulesStream.NAME: - self._push_rules_stream_id_gen.advance(instance_name, token) - for row in rows: - self.get_push_rules_for_user.invalidate((row.user_id,)) - self.push_rules_stream_cache.entity_has_changed(row.user_id, token) - return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py deleted file mode 100644 index 44ed20e424..0000000000 --- a/synapse/replication/slave/storage/pushers.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING, Any, Iterable - -from synapse.replication.tcp.streams import PushersStream -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection -from synapse.storage.databases.main.pusher import PusherWorkerStore - -from ._slaved_id_tracker import SlavedIdTracker - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class SlavedPusherStore(PusherWorkerStore): - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - super().__init__(database, db_conn, hs) - self._pushers_id_gen = SlavedIdTracker( # type: ignore - db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")] - ) - - def get_pushers_stream_token(self) -> int: - return self._pushers_id_gen.get_current_token() - - def process_replication_rows( - self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] - ) -> None: - if stream_name == PushersStream.NAME: - self._pushers_id_gen.advance(instance_name, token) - return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index cfaedf5e0c..0e47592be3 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -26,9 +26,7 @@ from synapse.storage.database import ( from synapse.storage.databases.main.stats import UserSortOrder from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.types import Cursor -from synapse.storage.util.id_generators import StreamIdGenerator from synapse.types import JsonDict, get_domain_from_id -from synapse.util.caches.stream_change_cache import StreamChangeCache from .account_data import AccountDataStore from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore @@ -138,41 +136,8 @@ class DataStore( self._clock = hs.get_clock() self.database_engine = database.engine - self._device_list_id_gen = StreamIdGenerator( - db_conn, - "device_lists_stream", - "stream_id", - extra_tables=[ - ("user_signature_stream", "stream_id"), - ("device_lists_outbound_pokes", "stream_id"), - ("device_lists_changes_in_room", "stream_id"), - ], - ) - super().__init__(database, db_conn, hs) - events_max = self._stream_id_gen.get_current_token() - curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict( - db_conn, - "current_state_delta_stream", - entity_column="room_id", - stream_column="stream_id", - max_value=events_max, # As we share the stream id with events token - limit=1000, - ) - self._curr_state_delta_stream_cache = StreamChangeCache( - "_curr_state_delta_stream_cache", - min_curr_state_delta_id, - prefilled_cache=curr_state_delta_prefill, - ) - - self._stream_order_on_start = self.get_room_max_stream_ordering() - self._min_stream_order_on_start = self.get_room_min_stream_ordering() - - def get_device_stream_token(self) -> int: - # TODO: shouldn't this be moved to `DeviceWorkerStore`? - return self._device_list_id_gen.get_current_token() - async def get_users(self) -> List[JsonDict]: """Function to retrieve a list of users in users table. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 979dd4e17e..aa58c2adc3 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import abc import logging from typing import ( TYPE_CHECKING, @@ -39,6 +38,8 @@ from synapse.logging.opentracing import ( whitelisted_homeserver, ) from synapse.metrics.background_process_metrics import wrap_as_background_process +from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker +from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, @@ -49,6 +50,11 @@ from synapse.storage.database import ( from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyWorkerStore from synapse.storage.databases.main.roommember import RoomMemberWorkerStore from synapse.storage.types import Cursor +from synapse.storage.util.id_generators import ( + AbstractStreamIdGenerator, + AbstractStreamIdTracker, + StreamIdGenerator, +) from synapse.types import JsonDict, get_verify_key_from_cross_signing_key from synapse.util import json_decoder, json_encoder from synapse.util.caches.descriptors import cached, cachedList @@ -80,9 +86,32 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ): super().__init__(database, db_conn, hs) + if hs.config.worker.worker_app is None: + self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + db_conn, + "device_lists_stream", + "stream_id", + extra_tables=[ + ("user_signature_stream", "stream_id"), + ("device_lists_outbound_pokes", "stream_id"), + ("device_lists_changes_in_room", "stream_id"), + ], + ) + else: + self._device_list_id_gen = SlavedIdTracker( + db_conn, + "device_lists_stream", + "stream_id", + extra_tables=[ + ("user_signature_stream", "stream_id"), + ("device_lists_outbound_pokes", "stream_id"), + ("device_lists_changes_in_room", "stream_id"), + ], + ) + # Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a # StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker). - device_list_max = self._device_list_id_gen.get_current_token() # type: ignore[attr-defined] + device_list_max = self._device_list_id_gen.get_current_token() device_list_prefill, min_device_list_id = self.db_pool.get_cache_dict( db_conn, "device_lists_stream", @@ -136,6 +165,39 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): self._prune_old_outbound_device_pokes, 60 * 60 * 1000 ) + def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] + ) -> None: + if stream_name == DeviceListsStream.NAME: + self._device_list_id_gen.advance(instance_name, token) + self._invalidate_caches_for_devices(token, rows) + elif stream_name == UserSignatureStream.NAME: + self._device_list_id_gen.advance(instance_name, token) + for row in rows: + self._user_signature_stream_cache.entity_has_changed(row.user_id, token) + return super().process_replication_rows(stream_name, instance_name, token, rows) + + def _invalidate_caches_for_devices( + self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow] + ) -> None: + for row in rows: + # The entities are either user IDs (starting with '@') whose devices + # have changed, or remote servers that we need to tell about + # changes. + if row.entity.startswith("@"): + self._device_list_stream_cache.entity_has_changed(row.entity, token) + self.get_cached_devices_for_user.invalidate((row.entity,)) + self._get_cached_user_device.invalidate((row.entity,)) + self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,)) + + else: + self._device_list_federation_stream_cache.entity_has_changed( + row.entity, token + ) + + def get_device_stream_token(self) -> int: + return self._device_list_id_gen.get_current_token() + async def count_devices_by_users(self, user_ids: Optional[List[str]] = None) -> int: """Retrieve number of all devices of given users. Only returns number of devices that are not marked as hidden. @@ -677,11 +739,6 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): }, ) - @abc.abstractmethod - def get_device_stream_token(self) -> int: - """Get the current stream id from the _device_list_id_gen""" - ... - @trace @cancellable async def get_user_devices_from_cache( @@ -1481,6 +1538,10 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): + # Because we have write access, this will be a StreamIdGenerator + # (see DeviceWorkerStore.__init__) + _device_list_id_gen: AbstractStreamIdGenerator + def __init__( self, database: DatabasePool, @@ -1805,7 +1866,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): context, ) - async with self._device_list_id_gen.get_next_mult( # type: ignore[attr-defined] + async with self._device_list_id_gen.get_next_mult( len(device_ids) ) as stream_ids: await self.db_pool.runInteraction( @@ -2044,7 +2105,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): [], ) - async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids: # type: ignore[attr-defined] + async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids: return await self.db_pool.runInteraction( "add_device_list_outbound_pokes", add_device_list_outbound_pokes_txn, @@ -2058,7 +2119,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): updates during partial joins. """ - async with self._device_list_id_gen.get_next() as stream_id: # type: ignore[attr-defined] + async with self._device_list_id_gen.get_next() as stream_id: await self.db_pool.simple_upsert( table="device_lists_remote_pending", keyvalues={ diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 69fea452ad..a79091952a 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -81,6 +81,7 @@ from synapse.util import unwrapFirstError from synapse.util.async_helpers import ObservableDeferred, delay_cancellation from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.lrucache import AsyncLruCache +from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.cancellation import cancellable from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -233,6 +234,21 @@ class EventsWorkerStore(SQLBaseStore): db_conn, "events", "stream_ordering", step=-1 ) + events_max = self._stream_id_gen.get_current_token() + curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict( + db_conn, + "current_state_delta_stream", + entity_column="room_id", + stream_column="stream_id", + max_value=events_max, # As we share the stream id with events token + limit=1000, + ) + self._curr_state_delta_stream_cache: StreamChangeCache = StreamChangeCache( + "_curr_state_delta_stream_cache", + min_curr_state_delta_id, + prefilled_cache=curr_state_delta_prefill, + ) + if hs.config.worker.run_background_tasks: # We periodically clean out old transaction ID mappings self._clock.looping_call( diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index cb9ee08fa8..12f3b601f1 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -24,7 +24,7 @@ from synapse.types import JsonDict from synapse.util.caches.descriptors import cached -class FilteringStore(SQLBaseStore): +class FilteringWorkerStore(SQLBaseStore): @cached(num_args=2) async def get_user_filter( self, user_localpart: str, filter_id: Union[int, str] @@ -46,6 +46,8 @@ class FilteringStore(SQLBaseStore): return db_to_json(def_json) + +class FilteringStore(FilteringWorkerStore): async def add_user_filter(self, user_localpart: str, user_filter: JsonDict) -> int: def_json = encode_canonical_json(user_filter) diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index b6c15f29f8..8ae10f6127 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -12,13 +12,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import abc import logging from typing import ( TYPE_CHECKING, Any, Collection, Dict, + Iterable, List, Mapping, Optional, @@ -31,6 +31,7 @@ from typing import ( from synapse.api.errors import StoreError from synapse.config.homeserver import ExperimentalConfig from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker +from synapse.replication.tcp.streams import PushRulesStream from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -90,8 +91,6 @@ def _load_rules( return filtered_rules -# The ABCMeta metaclass ensures that it cannot be instantiated without -# the abstract methods being implemented. class PushRulesWorkerStore( ApplicationServiceWorkerStore, PusherWorkerStore, @@ -99,7 +98,6 @@ class PushRulesWorkerStore( ReceiptsWorkerStore, EventsWorkerStore, SQLBaseStore, - metaclass=abc.ABCMeta, ): """This is an abstract base class where subclasses must implement `get_max_push_rules_stream_id` which can be called in the initializer. @@ -136,14 +134,23 @@ class PushRulesWorkerStore( prefilled_cache=push_rules_prefill, ) - @abc.abstractmethod def get_max_push_rules_stream_id(self) -> int: """Get the position of the push rules stream. Returns: int """ - raise NotImplementedError() + return self._push_rules_stream_id_gen.get_current_token() + + def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] + ) -> None: + if stream_name == PushRulesStream.NAME: + self._push_rules_stream_id_gen.advance(instance_name, token) + for row in rows: + self.get_push_rules_for_user.invalidate((row.user_id,)) + self.push_rules_stream_cache.entity_has_changed(row.user_id, token) + return super().process_replication_rows(stream_name, instance_name, token, rows) @cached(max_entries=5000) async def get_push_rules_for_user(self, user_id: str) -> FilteredPushRules: diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index 01206950a9..4a01562d45 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -27,13 +27,19 @@ from typing import ( ) from synapse.push import PusherConfig, ThrottleParams +from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker +from synapse.replication.tcp.streams import PushersStream from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, LoggingTransaction, ) -from synapse.storage.util.id_generators import StreamIdGenerator +from synapse.storage.util.id_generators import ( + AbstractStreamIdGenerator, + AbstractStreamIdTracker, + StreamIdGenerator, +) from synapse.types import JsonDict from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -52,9 +58,21 @@ class PusherWorkerStore(SQLBaseStore): hs: "HomeServer", ): super().__init__(database, db_conn, hs) - self._pushers_id_gen = StreamIdGenerator( - db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")] - ) + + if hs.config.worker.worker_app is None: + self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + db_conn, + "pushers", + "id", + extra_tables=[("deleted_pushers", "stream_id")], + ) + else: + self._pushers_id_gen = SlavedIdTracker( + db_conn, + "pushers", + "id", + extra_tables=[("deleted_pushers", "stream_id")], + ) self.db_pool.updates.register_background_update_handler( "remove_deactivated_pushers", @@ -96,6 +114,16 @@ class PusherWorkerStore(SQLBaseStore): yield PusherConfig(**r) + def get_pushers_stream_token(self) -> int: + return self._pushers_id_gen.get_current_token() + + def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] + ) -> None: + if stream_name == PushersStream.NAME: + self._pushers_id_gen.advance(instance_name, token) + return super().process_replication_rows(stream_name, instance_name, token, rows) + async def get_pushers_by_app_id_and_pushkey( self, app_id: str, pushkey: str ) -> Iterator[PusherConfig]: @@ -545,8 +573,9 @@ class PusherBackgroundUpdatesStore(SQLBaseStore): class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore): - def get_pushers_stream_token(self) -> int: - return self._pushers_id_gen.get_current_token() + # Because we have write access, this will be a StreamIdGenerator + # (see PusherWorkerStore.__init__) + _pushers_id_gen: AbstractStreamIdGenerator async def add_pusher( self, diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 09ce855aa8..cc27ec3804 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -415,6 +415,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) self._stream_order_on_start = self.get_room_max_stream_ordering() + self._min_stream_order_on_start = self.get_room_min_stream_ordering() def get_room_max_stream_ordering(self) -> int: """Get the stream_ordering of regular events that we have committed up to diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index d42e36cdf1..96f3880923 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -21,11 +21,11 @@ from synapse.api.constants import ReceiptTypes from synapse.api.room_versions import RoomVersions from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_dict from synapse.handlers.room import RoomEventSource -from synapse.replication.slave.storage.events import SlavedEventStore from synapse.storage.databases.main.event_push_actions import ( NotifCounts, RoomNotifCounts, ) +from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.roommember import GetRoomsForUserWithStreamOrdering, RoomsForUser from synapse.types import PersistedEventPosition @@ -58,9 +58,9 @@ def patch__eq__(cls): return unpatch -class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): +class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): - STORE_TYPE = SlavedEventStore + STORE_TYPE = EventsWorkerStore def setUp(self): # Patch up the equality operator for events so that we can check -- cgit 1.5.1 From a3623af74e0af0d2f6cbd37b47dc54a1acd314d5 Mon Sep 17 00:00:00 2001 From: Ashish Kumar Date: Fri, 11 Nov 2022 19:38:17 +0400 Subject: Add an Admin API endpoint for looking up users based on 3PID (#14405) --- changelog.d/14405.feature | 1 + docs/admin_api/user_admin_api.md | 39 ++++++++++++++ synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/users.py | 25 +++++++++ tests/rest/admin/test_user.py | 107 ++++++++++++++++++++++++++++++++++----- 5 files changed, 161 insertions(+), 13 deletions(-) create mode 100644 changelog.d/14405.feature diff --git a/changelog.d/14405.feature b/changelog.d/14405.feature new file mode 100644 index 0000000000..d3ba89b597 --- /dev/null +++ b/changelog.d/14405.feature @@ -0,0 +1 @@ +Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index c95d6c9b05..880bef4194 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -1197,3 +1197,42 @@ Returns a `404` HTTP status code if no user was found, with a response body like ``` _Added in Synapse 1.68.0._ + + +### Find a user based on their Third Party ID (ThreePID or 3PID) + +The API is: + +``` +GET /_synapse/admin/v1/threepid/$medium/users/$address +``` + +When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned: + +```json +{ + "user_id": "@hello:example.org" +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `medium` - Kind of third-party ID, either `email` or `msisdn`. +- `address` - Value of the third-party ID. + +The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters. + +**Errors** + +Returns a `404` HTTP status code if no user was found, with a response body like this: + +```json +{ + "errcode":"M_NOT_FOUND", + "error":"User not found" +} +``` + +_Added in Synapse 1.72.0._ diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 885669f9c7..c62ea22116 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -81,6 +81,7 @@ from synapse.rest.admin.users import ( ShadowBanRestServlet, UserAdminServlet, UserByExternalId, + UserByThreePid, UserMembershipRestServlet, UserRegisterServlet, UserRestServletV2, @@ -277,6 +278,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: RoomMessagesRestServlet(hs).register(http_server) RoomTimestampToEventRestServlet(hs).register(http_server) UserByExternalId(hs).register(http_server) + UserByThreePid(hs).register(http_server) # Some servlets only get registered for the main process. if hs.config.worker.worker_app is None: diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 15ac2059aa..1951b8a9f2 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -1224,3 +1224,28 @@ class UserByExternalId(RestServlet): raise NotFoundError("User not found") return HTTPStatus.OK, {"user_id": user_id} + + +class UserByThreePid(RestServlet): + """Find a user based on 3PID of a particular medium""" + + PATTERNS = admin_patterns("/threepid/(?P[^/]*)/users/(?P
[^/]*)") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self._store = hs.get_datastores().main + + async def on_GET( + self, + request: SynapseRequest, + medium: str, + address: str, + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + + user_id = await self._store.get_user_id_by_threepid(medium, address) + + if user_id is None: + raise NotFoundError("User not found") + + return HTTPStatus.OK, {"user_id": user_id} diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 63410ffdf1..e8c9457794 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -41,14 +41,12 @@ from tests.unittest import override_config class UserRegisterTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, profile.register_servlets, ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.url = "/_synapse/admin/v1/register" self.registration_handler = Mock() @@ -446,7 +444,6 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): class UsersListTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -1108,7 +1105,6 @@ class UserDevicesTestCase(unittest.HomeserverTestCase): class DeactivateAccountTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -1382,7 +1378,6 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): class UserRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -2803,7 +2798,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): class UserMembershipRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -2960,7 +2954,6 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): class PushersRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -3089,7 +3082,6 @@ class PushersRestTestCase(unittest.HomeserverTestCase): class UserMediaRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -3881,7 +3873,6 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): ], ) class WhoisRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -3961,7 +3952,6 @@ class WhoisRestTestCase(unittest.HomeserverTestCase): class ShadowBanRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -4042,7 +4032,6 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase): class RateLimitTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -4268,7 +4257,6 @@ class RateLimitTestCase(unittest.HomeserverTestCase): class AccountDataTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -4358,7 +4346,6 @@ class AccountDataTestCase(unittest.HomeserverTestCase): class UsersByExternalIdTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -4442,3 +4429,97 @@ class UsersByExternalIdTestCase(unittest.HomeserverTestCase): {"user_id": self.other_user}, channel.json_body, ) + + +class UsersByThreePidTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.get_success( + self.store.user_add_threepid( + self.other_user, "email", "user@email.com", 1, 1 + ) + ) + self.get_success( + self.store.user_add_threepid(self.other_user, "msidn", "+1-12345678", 1, 1) + ) + + def test_no_auth(self) -> None: + """Try to look up a user without authentication.""" + url = "/_synapse/admin/v1/threepid/email/users/user%40email.com" + + channel = self.make_request( + "GET", + url, + ) + + self.assertEqual(401, channel.code, msg=channel.json_body) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_medium_does_not_exist(self) -> None: + """Tests that both a lookup for a medium that does not exist and a user that + doesn't exist with that third party ID returns a 404""" + # test for unknown medium + url = "/_synapse/admin/v1/threepid/publickey/users/unknown-key" + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + # test for unknown user with a known medium + url = "/_synapse/admin/v1/threepid/email/users/unknown" + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_success(self) -> None: + """Tests a successful medium + address lookup""" + # test for email medium with encoded value of user@email.com + url = "/_synapse/admin/v1/threepid/email/users/user%40email.com" + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual( + {"user_id": self.other_user}, + channel.json_body, + ) + + # test for msidn medium with encoded value of +1-12345678 + url = "/_synapse/admin/v1/threepid/msidn/users/%2B1-12345678" + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual( + {"user_id": self.other_user}, + channel.json_body, + ) -- cgit 1.5.1 From 334a8324d3db26da32a1f015ecf24a1bd25551db Mon Sep 17 00:00:00 2001 From: Brad Jones Date: Fri, 11 Nov 2022 10:28:05 -0700 Subject: Update sample Nginx configuration to HTTP 1.1 (#14414) Signed-off-by: Brad Jones --- changelog.d/14414.doc | 1 + docs/reverse_proxy.md | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/14414.doc diff --git a/changelog.d/14414.doc b/changelog.d/14414.doc new file mode 100644 index 0000000000..23dd963d47 --- /dev/null +++ b/changelog.d/14414.doc @@ -0,0 +1 @@ +Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md index 4e7a1d4435..48dbc1c58e 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md @@ -79,6 +79,9 @@ server { # Nginx by default only allows file uploads up to 1M in size # Increase client_max_body_size to match max_upload_size defined in homeserver.yaml client_max_body_size 50M; + + # Synapse responses may be chunked, which is an HTTP/1.1 feature. + proxy_http_version 1.1; } } ``` -- cgit 1.5.1 From 2e7c86c1295f9dc4d9cd6b5fec93262f8ed55ac6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 10:39:09 +0000 Subject: Bump dawidd6/action-download-artifact from 2.24.1 to 2.24.2 (#14424) * Bump dawidd6/action-download-artifact from 2.24.1 to 2.24.2 Bumps [dawidd6/action-download-artifact](https://github.com/dawidd6/action-download-artifact) from 2.24.1 to 2.24.2. - [Release notes](https://github.com/dawidd6/action-download-artifact/releases) - [Commits](https://github.com/dawidd6/action-download-artifact/compare/b12b127cf24433d14b4f93cee62f5465076ba82a...e6e25ac3a2b93187502a8be1ef9e9603afc34925) --- updated-dependencies: - dependency-name: dawidd6/action-download-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/docs-pr-netlify.yaml | 2 +- changelog.d/14424.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14424.misc diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml index 798a863a0c..231982f681 100644 --- a/.github/workflows/docs-pr-netlify.yaml +++ b/.github/workflows/docs-pr-netlify.yaml @@ -14,7 +14,7 @@ jobs: # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: - name: 📥 Download artifact - uses: dawidd6/action-download-artifact@b12b127cf24433d14b4f93cee62f5465076ba82a # v2.24.1 + uses: dawidd6/action-download-artifact@e6e25ac3a2b93187502a8be1ef9e9603afc34925 # v2.24.2 with: workflow: docs-pr.yaml run_id: ${{ github.event.workflow_run.id }} diff --git a/changelog.d/14424.misc b/changelog.d/14424.misc new file mode 100644 index 0000000000..8b2c88aa33 --- /dev/null +++ b/changelog.d/14424.misc @@ -0,0 +1 @@ +Bump dawidd6/action-download-artifact from 2.24.1 to 2.24.2. -- cgit 1.5.1 From 639780fc15bbdc48bd0ae3ba3b013da2a6798fa0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 10:39:38 +0000 Subject: Bump actions/upload-artifact from 2 to 3 (#14425) * Bump actions/upload-artifact from 2 to 3 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 2 to 3. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/docs-pr.yaml | 2 +- changelog.d/14425.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14425.misc diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index da40fa2408..cde6cf511e 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -26,7 +26,7 @@ jobs: cp book/welcome_and_overview.html book/index.html - name: Upload Artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: book path: book diff --git a/changelog.d/14425.misc b/changelog.d/14425.misc new file mode 100644 index 0000000000..b5ce5898c2 --- /dev/null +++ b/changelog.d/14425.misc @@ -0,0 +1 @@ +Bump actions/upload-artifact from 2 to 3. -- cgit 1.5.1 From fec1e2cb52938f67360564e43496bf2a4f0bfd9a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 10:39:55 +0000 Subject: Bump blake2 from 0.10.4 to 0.10.5 (#14426) * Bump blake2 from 0.10.4 to 0.10.5 Bumps [blake2](https://github.com/RustCrypto/hashes) from 0.10.4 to 0.10.5. - [Release notes](https://github.com/RustCrypto/hashes/releases) - [Commits](https://github.com/RustCrypto/hashes/compare/blake2-v0.10.4...blake2-v0.10.5) --- updated-dependencies: - dependency-name: blake2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- Cargo.lock | 4 ++-- changelog.d/14426.misc | 1 + rust/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14426.misc diff --git a/Cargo.lock b/Cargo.lock index 4b12b8ef34..8a8099bc6d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -37,9 +37,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "blake2" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" +checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e" dependencies = [ "digest", ] diff --git a/changelog.d/14426.misc b/changelog.d/14426.misc new file mode 100644 index 0000000000..f78ec0ab7b --- /dev/null +++ b/changelog.d/14426.misc @@ -0,0 +1 @@ +Bump blake2 from 0.10.4 to 0.10.5. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index d6e72d4c00..48f6144b29 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -31,5 +31,5 @@ serde = { version = "1.0.147", features = ["derive"] } serde_json = "1.0.87" [build-dependencies] -blake2 = "0.10.4" +blake2 = "0.10.5" hex = "0.4.3" -- cgit 1.5.1 From cc45808ea3f30e03d8ff40521d00b2a6d1d7a4cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 10:41:31 +0000 Subject: Bump types-jsonschema from 4.17.0.0 to 4.17.0.1 (#14430) * Bump types-jsonschema from 4.17.0.0 to 4.17.0.1 Bumps [types-jsonschema](https://github.com/python/typeshed) from 4.17.0.0 to 4.17.0.1. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-jsonschema dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14430.misc | 1 + poetry.lock | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14430.misc diff --git a/changelog.d/14430.misc b/changelog.d/14430.misc new file mode 100644 index 0000000000..109a128618 --- /dev/null +++ b/changelog.d/14430.misc @@ -0,0 +1 @@ +Bump types-jsonschema from 4.17.0.0 to 4.17.0.1. diff --git a/poetry.lock b/poetry.lock index 29210dc4cd..fb6125def4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1423,7 +1423,7 @@ python-versions = "*" [[package]] name = "types-jsonschema" -version = "4.17.0.0" +version = "4.17.0.1" description = "Typing stubs for jsonschema" category = "dev" optional = false @@ -2342,6 +2342,8 @@ prometheus-client = [ psycopg2 = [ {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"}, {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"}, + {file = "psycopg2-2.9.5-cp311-cp311-win32.whl", hash = "sha256:093e3894d2d3c592ab0945d9eba9d139c139664dcf83a1c440b8a7aa9bb21955"}, + {file = "psycopg2-2.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:920bf418000dd17669d2904472efeab2b20546efd0548139618f8fa305d1d7ad"}, {file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"}, {file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"}, {file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"}, @@ -2798,8 +2800,8 @@ types-ipaddress = [ {file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"}, ] types-jsonschema = [ - {file = "types-jsonschema-4.17.0.0.tar.gz", hash = "sha256:5b0875503218497cfc5c5ba92b458b1b8ec34a136e4a0d8c4f5889d59b1f5168"}, - {file = "types_jsonschema-4.17.0.0-py3-none-any.whl", hash = "sha256:520816acf40d1d7ce0981aa805862b27395491b4854188844945c674ad9173a1"}, + {file = "types-jsonschema-4.17.0.1.tar.gz", hash = "sha256:62625d492e4930411a431909ac32301aeab6180500e70ee222f81d43204cfb3c"}, + {file = "types_jsonschema-4.17.0.1-py3-none-any.whl", hash = "sha256:77badbe3881cbf79ac9561be2be2b1f37ab104b13afd2231840e6dd6e94e63c2"}, ] types-opentracing = [ {file = "types-opentracing-2.4.10.tar.gz", hash = "sha256:6101414f3b6d3b9c10f1c510a261e8439b6c8d67c723d5c2872084697b4580a7"}, -- cgit 1.5.1 From b7f5a3aaa6f14816cf7175d1741252f6e2128705 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 10:41:55 +0000 Subject: Bump flake8 from 4.0.1 to 5.0.4 (#14431) * Bump flake8 from 4.0.1 to 5.0.4 Bumps [flake8](https://github.com/pycqa/flake8) from 4.0.1 to 5.0.4. - [Release notes](https://github.com/pycqa/flake8/releases) - [Commits](https://github.com/pycqa/flake8/compare/4.0.1...5.0.4) --- updated-dependencies: - dependency-name: flake8 dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14431.misc | 1 + poetry.lock | 40 ++++++++++++++++++++-------------------- 2 files changed, 21 insertions(+), 20 deletions(-) create mode 100644 changelog.d/14431.misc diff --git a/changelog.d/14431.misc b/changelog.d/14431.misc new file mode 100644 index 0000000000..15b6d4f2ee --- /dev/null +++ b/changelog.d/14431.misc @@ -0,0 +1 @@ +Bump flake8 from 4.0.1 to 5.0.4. diff --git a/poetry.lock b/poetry.lock index fb6125def4..ff8c432388 100644 --- a/poetry.lock +++ b/poetry.lock @@ -246,17 +246,17 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910 [[package]] name = "flake8" -version = "4.0.1" +version = "5.0.4" description = "the modular source code checker: pep8 pyflakes and co" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.6.1" [package.dependencies] -importlib-metadata = {version = "<4.3", markers = "python_version < \"3.8\""} -mccabe = ">=0.6.0,<0.7.0" -pycodestyle = ">=2.8.0,<2.9.0" -pyflakes = ">=2.4.0,<2.5.0" +importlib-metadata = {version = ">=1.1.0,<4.3", markers = "python_version < \"3.8\""} +mccabe = ">=0.7.0,<0.8.0" +pycodestyle = ">=2.9.0,<2.10.0" +pyflakes = ">=2.5.0,<2.6.0" [[package]] name = "flake8-bugbear" @@ -555,11 +555,11 @@ dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "ma [[package]] name = "mccabe" -version = "0.6.1" +version = "0.7.0" description = "McCabe checker, plugin for flake8" category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.6" [[package]] name = "msgpack" @@ -775,11 +775,11 @@ pyasn1 = ">=0.4.6,<0.5.0" [[package]] name = "pycodestyle" -version = "2.8.0" +version = "2.9.1" description = "Python style guide checker" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.6" [[package]] name = "pycparser" @@ -806,11 +806,11 @@ email = ["email-validator (>=1.0.3)"] [[package]] name = "pyflakes" -version = "2.4.0" +version = "2.5.0" description = "passive checker of Python programs" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.6" [[package]] name = "pygithub" @@ -1831,8 +1831,8 @@ elementpath = [ {file = "elementpath-2.5.0.tar.gz", hash = "sha256:3a27aaf3399929fccda013899cb76d3ff111734abf4281e5f9d3721ba0b9ffa3"}, ] flake8 = [ - {file = "flake8-4.0.1-py2.py3-none-any.whl", hash = "sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d"}, - {file = "flake8-4.0.1.tar.gz", hash = "sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"}, + {file = "flake8-5.0.4-py2.py3-none-any.whl", hash = "sha256:7a1cf6b73744f5806ab95e526f6f0d8c01c66d7bbe349562d22dfca20610b248"}, + {file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"}, ] flake8-bugbear = [ {file = "flake8-bugbear-22.10.27.tar.gz", hash = "sha256:a6708608965c9e0de5fff13904fed82e0ba21ac929fe4896459226a797e11cd5"}, @@ -2146,8 +2146,8 @@ matrix-synapse-ldap3 = [ {file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"}, ] mccabe = [ - {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, - {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, ] msgpack = [ {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"}, @@ -2369,8 +2369,8 @@ pyasn1-modules = [ {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"}, ] pycodestyle = [ - {file = "pycodestyle-2.8.0-py2.py3-none-any.whl", hash = "sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20"}, - {file = "pycodestyle-2.8.0.tar.gz", hash = "sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"}, + {file = "pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"}, + {file = "pycodestyle-2.9.1.tar.gz", hash = "sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785"}, ] pycparser = [ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, @@ -2415,8 +2415,8 @@ pydantic = [ {file = "pydantic-1.10.2.tar.gz", hash = "sha256:91b8e218852ef6007c2b98cd861601c6a09f1aa32bbbb74fab5b1c33d4a1e410"}, ] pyflakes = [ - {file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"}, - {file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"}, + {file = "pyflakes-2.5.0-py2.py3-none-any.whl", hash = "sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2"}, + {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"}, ] pygithub = [ {file = "PyGithub-1.56-py3-none-any.whl", hash = "sha256:d15f13d82165306da8a68aefc0f848a6f6432d5febbff13b60a94758ce3ef8b5"}, -- cgit 1.5.1 From 8e38d74313b77aa6c0712ebb164a60638939acae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 12:07:44 +0000 Subject: Bump attrs from 21.4.0 to 22.1.0 (#14427) * Bump attrs from 21.4.0 to 22.1.0 Bumps [attrs](https://github.com/python-attrs/attrs) from 21.4.0 to 22.1.0. - [Release notes](https://github.com/python-attrs/attrs/releases) - [Changelog](https://github.com/python-attrs/attrs/blob/main/CHANGELOG.rst) - [Commits](https://github.com/python-attrs/attrs/compare/21.4.0...22.1.0) --- updated-dependencies: - dependency-name: attrs dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14427.misc | 1 + poetry.lock | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/14427.misc diff --git a/changelog.d/14427.misc b/changelog.d/14427.misc new file mode 100644 index 0000000000..f870b19e75 --- /dev/null +++ b/changelog.d/14427.misc @@ -0,0 +1 @@ +Bump attrs from 21.4.0 to 22.1.0. diff --git a/poetry.lock b/poetry.lock index ff8c432388..fe58638160 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,16 +1,16 @@ [[package]] name = "attrs" -version = "21.4.0" +version = "22.1.0" description = "Classes Without Boilerplate" category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.5" [package.extras] -dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "sphinx", "sphinx-notfound-page", "zope.interface"] +dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"] docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"] -tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "zope.interface"] -tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six"] +tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"] +tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"] [[package]] name = "Authlib" @@ -1642,8 +1642,8 @@ content-hash = "27811bd21d56ceeb0f68ded5a00375efcd1a004928f0736f5b02927ce8594cb0 [metadata.files] attrs = [ - {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"}, - {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"}, + {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"}, + {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"}, ] Authlib = [ {file = "Authlib-1.1.0-py2.py3-none-any.whl", hash = "sha256:be4b6a1dea51122336c210a6945b27a105b9ac572baffd15b07bcff4376c1523"}, -- cgit 1.5.1 From 683bf4af4b725a4f1c4956c7e0ee7fcea96a96f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 12:10:40 +0000 Subject: Bump types-pyyaml from 6.0.12.1 to 6.0.12.2 (#14428) * Bump types-pyyaml from 6.0.12.1 to 6.0.12.2 Bumps [types-pyyaml](https://github.com/python/typeshed) from 6.0.12.1 to 6.0.12.2. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pyyaml dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14428.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14428.misc diff --git a/changelog.d/14428.misc b/changelog.d/14428.misc new file mode 100644 index 0000000000..0582ebcf5b --- /dev/null +++ b/changelog.d/14428.misc @@ -0,0 +1 @@ +Bump types-pyyaml from 6.0.12.1 to 6.0.12.2. diff --git a/poetry.lock b/poetry.lock index fe58638160..809e1fae9f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1466,7 +1466,7 @@ types-cryptography = "*" [[package]] name = "types-pyyaml" -version = "6.0.12.1" +version = "6.0.12.2" description = "Typing stubs for PyYAML" category = "dev" optional = false @@ -2820,8 +2820,8 @@ types-pyOpenSSL = [ {file = "types_pyOpenSSL-22.0.10-py3-none-any.whl", hash = "sha256:63baea211768bea580a769ac5c0d637ae8cd3150314aadc5726ca22e4c4f241a"}, ] types-pyyaml = [ - {file = "types-PyYAML-6.0.12.1.tar.gz", hash = "sha256:70ccaafcf3fb404d57bffc1529fdd86a13e8b4f2cf9fc3ee81a6408ce0ad59d2"}, - {file = "types_PyYAML-6.0.12.1-py3-none-any.whl", hash = "sha256:aaf5e51444c13bd34104695a89ad9c48412599a4f615d65a60e649109714f608"}, + {file = "types-PyYAML-6.0.12.2.tar.gz", hash = "sha256:6840819871c92deebe6a2067fb800c11b8a063632eb4e3e755914e7ab3604e83"}, + {file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"}, ] types-requests = [ {file = "types-requests-2.28.11.2.tar.gz", hash = "sha256:fdcd7bd148139fb8eef72cf4a41ac7273872cad9e6ada14b11ff5dfdeee60ed3"}, -- cgit 1.5.1 From 95f7a65a5637e567d9022c945fb30c47ef59273d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 12:15:35 +0000 Subject: Bump gitpython from 3.1.27 to 3.1.29 (#14429) * Bump gitpython from 3.1.27 to 3.1.29 Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.27 to 3.1.29. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.27...3.1.29) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14429.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14429.misc diff --git a/changelog.d/14429.misc b/changelog.d/14429.misc new file mode 100644 index 0000000000..34e767d45a --- /dev/null +++ b/changelog.d/14429.misc @@ -0,0 +1 @@ +Bump gitpython from 3.1.27 to 3.1.29. diff --git a/poetry.lock b/poetry.lock index 809e1fae9f..10c1d98b38 100644 --- a/poetry.lock +++ b/poetry.lock @@ -306,7 +306,7 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.27" +version = "3.1.29" description = "GitPython is a python library used to interact with Git repositories" category = "dev" optional = false @@ -1866,8 +1866,8 @@ gitdb = [ {file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"}, ] gitpython = [ - {file = "GitPython-3.1.27-py3-none-any.whl", hash = "sha256:5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d"}, - {file = "GitPython-3.1.27.tar.gz", hash = "sha256:1c885ce809e8ba2d88a29befeb385fcea06338d3640712b59ca623c220bb5704"}, + {file = "GitPython-3.1.29-py3-none-any.whl", hash = "sha256:41eea0deec2deea139b459ac03656f0dd28fc4a3387240ec1d3c259a2c47850f"}, + {file = "GitPython-3.1.29.tar.gz", hash = "sha256:cc36bfc4a3f913e66805a28e84703e419d9c264c1077e537b54f0e1af85dbefd"}, ] hiredis = [ {file = "hiredis-2.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048"}, -- cgit 1.5.1 From fb66fae84b165e7bd132bc7cbc5732485ceee827 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 14 Nov 2022 08:13:11 -0500 Subject: Clean-up events persistance code (#14411) By removing unused variables and making some arguments required which are always provided. --- changelog.d/14411.misc | 1 + synapse/storage/controllers/persist_events.py | 2 -- synapse/storage/databases/main/events.py | 9 +++------ 3 files changed, 4 insertions(+), 8 deletions(-) create mode 100644 changelog.d/14411.misc diff --git a/changelog.d/14411.misc b/changelog.d/14411.misc new file mode 100644 index 0000000000..f5cca5c833 --- /dev/null +++ b/changelog.d/14411.misc @@ -0,0 +1 @@ +Clean-up event persistence code. diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 06e71a8053..48976dc570 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -716,8 +716,6 @@ class EventsPersistenceStorageController: ) if not is_still_joined: logger.info("Server no longer in room %s", room_id) - latest_event_ids = set() - current_state = {} delta.no_longer_in_room = True state_delta_for_room[room_id] = delta diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 00880bb37d..c4acff5be6 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -355,9 +355,9 @@ class PersistEventsStore: txn: LoggingTransaction, *, events_and_contexts: List[Tuple[EventBase, EventContext]], - inhibit_local_membership_updates: bool = False, - state_delta_for_room: Optional[Dict[str, DeltaState]] = None, - new_forward_extremities: Optional[Dict[str, Set[str]]] = None, + inhibit_local_membership_updates: bool, + state_delta_for_room: Dict[str, DeltaState], + new_forward_extremities: Dict[str, Set[str]], ) -> None: """Insert some number of room events into the necessary database tables. @@ -384,9 +384,6 @@ class PersistEventsStore: PartialStateConflictError: if attempting to persist a partial state event in a room that has been un-partial stated. """ - state_delta_for_room = state_delta_for_room or {} - new_forward_extremities = new_forward_extremities or {} - all_events_and_contexts = events_and_contexts min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering -- cgit 1.5.1 From 2cc592584ae9f225216b7663e9144ac6f565b757 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 14 Nov 2022 13:46:29 +0000 Subject: Remove unused type-ignores (#14433) * Remove unused type-ignores Oversights in #14427 and #14429. * Changelog --- changelog.d/14433.misc | 1 + scripts-dev/release.py | 4 +--- synapse/streams/events.py | 9 ++++++--- 3 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelog.d/14433.misc diff --git a/changelog.d/14433.misc b/changelog.d/14433.misc new file mode 100644 index 0000000000..08a350b13b --- /dev/null +++ b/changelog.d/14433.misc @@ -0,0 +1 @@ +Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index c82c58c54b..bf47b6c713 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -219,9 +219,7 @@ def _prepare() -> None: update_branch(repo) # Create the new release branch - # Type ignore will no longer be needed after GitPython 3.1.28. - # See https://github.com/gitpython-developers/GitPython/pull/1419 - repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type] + repo.create_head(release_branch_name, commit=base_branch) # Special-case SyTest: we don't actually prepare any files so we may # as well push it now (and only when we create a release branch; diff --git a/synapse/streams/events.py b/synapse/streams/events.py index bcd840bd88..f331e1af16 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -45,9 +45,12 @@ class _EventSourcesInner: class EventSources: def __init__(self, hs: "HomeServer"): self.sources = _EventSourcesInner( - # mypy thinks attribute.type is `Optional`, but we know it's never `None` here since - # all the attributes of `_EventSourcesInner` are annotated. - *(attribute.type(hs) for attribute in attr.fields(_EventSourcesInner)) # type: ignore[misc] + # mypy previously warned that attribute.type is `Optional`, but we know it's + # never `None` here since all the attributes of `_EventSourcesInner` are + # annotated. + # As of the stubs in attrs 22.1.0, `attr.fields()` now returns Any, + # so the call to `attribute.type` is not checked. + *(attribute.type(hs) for attribute in attr.fields(_EventSourcesInner)) ) self.store = hs.get_datastores().main -- cgit 1.5.1 From 6816300588b004e2819f6f285eef70a4f0da35d8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 14 Nov 2022 14:45:17 +0000 Subject: Make Dependabot only bump Rust deps in the lock file (#14434) This is to help downstream packagers. --- .github/dependabot.yml | 1 + changelog.d/14434.misc | 1 + rust/Cargo.toml | 12 ++++++------ 3 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelog.d/14434.misc diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9ee62bf539..7ce353ed64 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -18,5 +18,6 @@ updates: - package-ecosystem: "cargo" directory: "/" + versioning-strategy: "lockfile-only" schedule: interval: "weekly" diff --git a/changelog.d/14434.misc b/changelog.d/14434.misc new file mode 100644 index 0000000000..75d24cd73d --- /dev/null +++ b/changelog.d/14434.misc @@ -0,0 +1 @@ +Make Dependabot only bump Rust deps in the lock file. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 48f6144b29..cffaa5b51b 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -20,16 +20,16 @@ crate-type = ["lib", "cdylib"] name = "synapse.synapse_rust" [dependencies] -anyhow = "1.0.66" +anyhow = "1.0.63" lazy_static = "1.4.0" log = "0.4.17" -pyo3 = { version = "0.17.3", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] } +pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] } pyo3-log = "0.7.0" pythonize = "0.17.0" -regex = "1.7.0" -serde = { version = "1.0.147", features = ["derive"] } -serde_json = "1.0.87" +regex = "1.6.0" +serde = { version = "1.0.144", features = ["derive"] } +serde_json = "1.0.85" [build-dependencies] -blake2 = "0.10.5" +blake2 = "0.10.4" hex = "0.4.3" -- cgit 1.5.1 From ae54a9406338a49591334c90792df9df16070129 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 16:34:00 +0000 Subject: Bump types-setuptools from 65.5.0.2 to 65.5.0.3 (#14436) * Bump types-setuptools from 65.5.0.2 to 65.5.0.3 Bumps [types-setuptools](https://github.com/python/typeshed) from 65.5.0.2 to 65.5.0.3. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-setuptools dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14436.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14436.misc diff --git a/changelog.d/14436.misc b/changelog.d/14436.misc new file mode 100644 index 0000000000..2ec3c3d947 --- /dev/null +++ b/changelog.d/14436.misc @@ -0,0 +1 @@ +Bump types-setuptools from 65.5.0.2 to 65.5.0.3. diff --git a/poetry.lock b/poetry.lock index 10c1d98b38..2364c72756 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1485,7 +1485,7 @@ types-urllib3 = "<1.27" [[package]] name = "types-setuptools" -version = "65.5.0.2" +version = "65.5.0.3" description = "Typing stubs for setuptools" category = "dev" optional = false @@ -2828,8 +2828,8 @@ types-requests = [ {file = "types_requests-2.28.11.2-py3-none-any.whl", hash = "sha256:14941f8023a80b16441b3b46caffcbfce5265fd14555844d6029697824b5a2ef"}, ] types-setuptools = [ - {file = "types-setuptools-65.5.0.2.tar.gz", hash = "sha256:9847de6d7087fb1dd4a88c2a21543d1b86a6179c36744f081974303fe2f30f50"}, - {file = "types_setuptools-65.5.0.2-py3-none-any.whl", hash = "sha256:2d33e4ef0d35cd2da48a143eb02184f58398381ddb1f772eff20ccc4126fec13"}, + {file = "types-setuptools-65.5.0.3.tar.gz", hash = "sha256:17769171f5f2a2dc69b25c0d3106552a5cda767bbf6b36cb6212b26dae5aa9fc"}, + {file = "types_setuptools-65.5.0.3-py3-none-any.whl", hash = "sha256:9254c32b0cc91c486548e7d7561243b5bd185402a383e93c6691e1b9bc8d86e2"}, ] types-urllib3 = [ {file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"}, -- cgit 1.5.1 From 2cecb782c4bae8cefd4eedbe280f164ae588ae68 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 16:35:08 +0000 Subject: Bump canonicaljson from 1.6.3 to 1.6.4 (#14440) * Bump canonicaljson from 1.6.3 to 1.6.4 Bumps [canonicaljson](https://github.com/matrix-org/python-canonicaljson) from 1.6.3 to 1.6.4. - [Release notes](https://github.com/matrix-org/python-canonicaljson/releases) - [Changelog](https://github.com/matrix-org/python-canonicaljson/blob/main/CHANGES.md) - [Commits](https://github.com/matrix-org/python-canonicaljson/compare/v1.6.3...v1.6.4) --- updated-dependencies: - dependency-name: canonicaljson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14440.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14440.misc diff --git a/changelog.d/14440.misc b/changelog.d/14440.misc new file mode 100644 index 0000000000..7a7751ec70 --- /dev/null +++ b/changelog.d/14440.misc @@ -0,0 +1 @@ +Bump canonicaljson from 1.6.3 to 1.6.4. diff --git a/poetry.lock b/poetry.lock index 2364c72756..b4b9c4bb72 100644 --- a/poetry.lock +++ b/poetry.lock @@ -91,7 +91,7 @@ dev = ["Sphinx (==4.3.2)", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0 [[package]] name = "canonicaljson" -version = "1.6.3" +version = "1.6.4" description = "Canonical JSON" category = "main" optional = false @@ -1704,8 +1704,8 @@ bleach = [ {file = "bleach-5.0.1.tar.gz", hash = "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c"}, ] canonicaljson = [ - {file = "canonicaljson-1.6.3-py3-none-any.whl", hash = "sha256:6ba3cf1702fa3d209b3e915a4e9a3e4ef194f1e8fca189c1f0b7a2a7686a27e6"}, - {file = "canonicaljson-1.6.3.tar.gz", hash = "sha256:ca59760bc274a899a0da75809d6909ae43e5123381fd6ef040a44d1952c0b448"}, + {file = "canonicaljson-1.6.4-py3-none-any.whl", hash = "sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48"}, + {file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"}, ] certifi = [ {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"}, -- cgit 1.5.1 From 4a333d638b3072818d400014c920a671987c24ba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 17:16:53 +0000 Subject: Bump types-pyopenssl from 22.0.10 to 22.1.0.2 (#14437) * Bump types-pyopenssl from 22.0.10 to 22.1.0.2 Bumps [types-pyopenssl](https://github.com/python/typeshed) from 22.0.10 to 22.1.0.2. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pyopenssl dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14437.misc | 1 + poetry.lock | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/14437.misc diff --git a/changelog.d/14437.misc b/changelog.d/14437.misc new file mode 100644 index 0000000000..79fa202a8d --- /dev/null +++ b/changelog.d/14437.misc @@ -0,0 +1 @@ +Bump types-pyopenssl from 22.0.10 to 22.1.0.2. diff --git a/poetry.lock b/poetry.lock index b4b9c4bb72..e1072d9599 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1454,8 +1454,8 @@ optional = false python-versions = "*" [[package]] -name = "types-pyOpenSSL" -version = "22.0.10" +name = "types-pyopenssl" +version = "22.1.0.2" description = "Typing stubs for pyOpenSSL" category = "dev" optional = false @@ -2815,9 +2815,9 @@ types-psycopg2 = [ {file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"}, {file = "types_psycopg2-2.9.21.1-py3-none-any.whl", hash = "sha256:858838f1972f39da2a6e28274201fed8619a40a235dd86e7f66f4548ec474395"}, ] -types-pyOpenSSL = [ - {file = "types-pyOpenSSL-22.0.10.tar.gz", hash = "sha256:f943b834f5b97e5e808764c2f6e37be1a2e226c46792296f61558196acfcc3a1"}, - {file = "types_pyOpenSSL-22.0.10-py3-none-any.whl", hash = "sha256:63baea211768bea580a769ac5c0d637ae8cd3150314aadc5726ca22e4c4f241a"}, +types-pyopenssl = [ + {file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"}, + {file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"}, ] types-pyyaml = [ {file = "types-PyYAML-6.0.12.2.tar.gz", hash = "sha256:6840819871c92deebe6a2067fb800c11b8a063632eb4e3e755914e7ab3604e83"}, -- cgit 1.5.1 From 4d1de6a944a6210282beca2d8ad3c035aec5f4e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 17:17:19 +0000 Subject: Bump flake8-comprehensions from 3.8.0 to 3.10.1 (#14438) * Bump flake8-comprehensions from 3.8.0 to 3.10.1 Bumps [flake8-comprehensions](https://github.com/adamchainz/flake8-comprehensions) from 3.8.0 to 3.10.1. - [Release notes](https://github.com/adamchainz/flake8-comprehensions/releases) - [Changelog](https://github.com/adamchainz/flake8-comprehensions/blob/main/HISTORY.rst) - [Commits](https://github.com/adamchainz/flake8-comprehensions/compare/3.8.0...3.10.1) --- updated-dependencies: - dependency-name: flake8-comprehensions dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14438.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14438.misc diff --git a/changelog.d/14438.misc b/changelog.d/14438.misc new file mode 100644 index 0000000000..24a7496b74 --- /dev/null +++ b/changelog.d/14438.misc @@ -0,0 +1 @@ +Bump flake8-comprehensions from 3.8.0 to 3.10.1. diff --git a/poetry.lock b/poetry.lock index e1072d9599..965f7cac0a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -275,7 +275,7 @@ dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit", "tox"] [[package]] name = "flake8-comprehensions" -version = "3.8.0" +version = "3.10.1" description = "A flake8 plugin to help you write better list/set/dict comprehensions." category = "dev" optional = false @@ -1839,8 +1839,8 @@ flake8-bugbear = [ {file = "flake8_bugbear-22.10.27-py3-none-any.whl", hash = "sha256:6ad0ab754507319060695e2f2be80e6d8977cfcea082293089a9226276bd825d"}, ] flake8-comprehensions = [ - {file = "flake8-comprehensions-3.8.0.tar.gz", hash = "sha256:8e108707637b1d13734f38e03435984f6b7854fa6b5a4e34f93e69534be8e521"}, - {file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"}, + {file = "flake8-comprehensions-3.10.1.tar.gz", hash = "sha256:412052ac4a947f36b891143430fef4859705af11b2572fbb689f90d372cf26ab"}, + {file = "flake8_comprehensions-3.10.1-py3-none-any.whl", hash = "sha256:d763de3c74bc18a79c039a7ec732e0a1985b0c79309ceb51e56401ad0a2cd44e"}, ] frozendict = [ {file = "frozendict-2.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a3b32d47282ae0098b9239a6d53ec539da720258bd762d62191b46f2f87c5fc"}, -- cgit 1.5.1 From e226513c0f17f1b9ff0e80cbc889dfde31ba5da8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 17:17:29 +0000 Subject: Bump jsonschema from 4.16.0 to 4.17.0 (#14439) * Bump jsonschema from 4.16.0 to 4.17.0 Bumps [jsonschema](https://github.com/python-jsonschema/jsonschema) from 4.16.0 to 4.17.0. - [Release notes](https://github.com/python-jsonschema/jsonschema/releases) - [Changelog](https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst) - [Commits](https://github.com/python-jsonschema/jsonschema/compare/v4.16.0...v4.17.0) --- updated-dependencies: - dependency-name: jsonschema dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14439.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14439.misc diff --git a/changelog.d/14439.misc b/changelog.d/14439.misc new file mode 100644 index 0000000000..f7c9a65cf9 --- /dev/null +++ b/changelog.d/14439.misc @@ -0,0 +1 @@ +Bump jsonschema from 4.16.0 to 4.17.0. diff --git a/poetry.lock b/poetry.lock index 965f7cac0a..8d468adf12 100644 --- a/poetry.lock +++ b/poetry.lock @@ -452,7 +452,7 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jsonschema" -version = "4.16.0" +version = "4.17.0" description = "An implementation of JSON Schema validation for Python" category = "main" optional = false @@ -2012,8 +2012,8 @@ jinja2 = [ {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, ] jsonschema = [ - {file = "jsonschema-4.16.0-py3-none-any.whl", hash = "sha256:9e74b8f9738d6a946d70705dc692b74b5429cd0960d58e79ffecfc43b2221eb9"}, - {file = "jsonschema-4.16.0.tar.gz", hash = "sha256:165059f076eff6971bae5b742fc029a7b4ef3f9bcf04c14e4776a7605de14b23"}, + {file = "jsonschema-4.17.0-py3-none-any.whl", hash = "sha256:f660066c3966db7d6daeaea8a75e0b68237a48e51cf49882087757bb59916248"}, + {file = "jsonschema-4.17.0.tar.gz", hash = "sha256:5bfcf2bca16a087ade17e02b282d34af7ccd749ef76241e7f9bd7c0cb8a9424d"}, ] keyring = [ {file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"}, -- cgit 1.5.1 From 36097e88c4da51fce6556a58c49bd675f4cf20ab Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Mon, 14 Nov 2022 17:31:36 +0000 Subject: Remove slaved id tracker (#14376) This matches the multi instance writer ID generator class which can both handle advancing the current token over replication and by calling the database. --- changelog.d/14376.misc | 1 + synapse/replication/slave/__init__.py | 13 ------ synapse/replication/slave/storage/__init__.py | 13 ------ .../slave/storage/_slaved_id_tracker.py | 50 ---------------------- synapse/storage/databases/main/account_data.py | 30 +++++-------- synapse/storage/databases/main/devices.py | 36 ++++++---------- synapse/storage/databases/main/events_worker.py | 35 ++++++--------- synapse/storage/databases/main/push_rule.py | 17 ++++---- synapse/storage/databases/main/pusher.py | 24 ++++------- synapse/storage/databases/main/receipts.py | 18 ++++---- synapse/storage/util/id_generators.py | 13 ++++-- 11 files changed, 74 insertions(+), 176 deletions(-) create mode 100644 changelog.d/14376.misc delete mode 100644 synapse/replication/slave/__init__.py delete mode 100644 synapse/replication/slave/storage/__init__.py delete mode 100644 synapse/replication/slave/storage/_slaved_id_tracker.py diff --git a/changelog.d/14376.misc b/changelog.d/14376.misc new file mode 100644 index 0000000000..2ca326fea6 --- /dev/null +++ b/changelog.d/14376.misc @@ -0,0 +1 @@ +Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). diff --git a/synapse/replication/slave/__init__.py b/synapse/replication/slave/__init__.py deleted file mode 100644 index f43a360a80..0000000000 --- a/synapse/replication/slave/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/synapse/replication/slave/storage/__init__.py b/synapse/replication/slave/storage/__init__.py deleted file mode 100644 index f43a360a80..0000000000 --- a/synapse/replication/slave/storage/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py deleted file mode 100644 index 8f3f953ed4..0000000000 --- a/synapse/replication/slave/storage/_slaved_id_tracker.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List, Optional, Tuple - -from synapse.storage.database import LoggingDatabaseConnection -from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id - - -class SlavedIdTracker(AbstractStreamIdTracker): - """Tracks the "current" stream ID of a stream with a single writer. - - See `AbstractStreamIdTracker` for more details. - - Note that this class does not work correctly when there are multiple - writers. - """ - - def __init__( - self, - db_conn: LoggingDatabaseConnection, - table: str, - column: str, - extra_tables: Optional[List[Tuple[str, str]]] = None, - step: int = 1, - ): - self.step = step - self._current = _load_current_id(db_conn, table, column, step) - if extra_tables: - for table, column in extra_tables: - self.advance(None, _load_current_id(db_conn, table, column)) - - def advance(self, instance_name: Optional[str], new_id: int) -> None: - self._current = (max if self.step > 0 else min)(self._current, new_id) - - def get_current_token(self) -> int: - return self._current - - def get_current_token_for_writer(self, instance_name: str) -> int: - return self.get_current_token() diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index c38b8a9e5a..282687ebce 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -27,7 +27,6 @@ from typing import ( ) from synapse.api.constants import AccountDataTypes -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import AccountDataStream, TagAccountDataStream from synapse.storage._base import db_to_json from synapse.storage.database import ( @@ -68,12 +67,11 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) # to write account data. A value of `True` implies that `_account_data_id_gen` # is an `AbstractStreamIdGenerator` and not just a tracker. self._account_data_id_gen: AbstractStreamIdTracker + self._can_write_to_account_data = ( + self._instance_name in hs.config.worker.writers.account_data + ) if isinstance(database.engine, PostgresEngine): - self._can_write_to_account_data = ( - self._instance_name in hs.config.worker.writers.account_data - ) - self._account_data_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, @@ -95,21 +93,13 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets # updated over replication. (Multiple writers are not supported for # SQLite). - if self._instance_name in hs.config.worker.writers.account_data: - self._can_write_to_account_data = True - self._account_data_id_gen = StreamIdGenerator( - db_conn, - "room_account_data", - "stream_id", - extra_tables=[("room_tags_revisions", "stream_id")], - ) - else: - self._account_data_id_gen = SlavedIdTracker( - db_conn, - "room_account_data", - "stream_id", - extra_tables=[("room_tags_revisions", "stream_id")], - ) + self._account_data_id_gen = StreamIdGenerator( + db_conn, + "room_account_data", + "stream_id", + extra_tables=[("room_tags_revisions", "stream_id")], + is_writer=self._instance_name in hs.config.worker.writers.account_data, + ) account_max = self.get_max_account_data_stream_id() self._account_data_stream_cache = StreamChangeCache( diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index aa58c2adc3..3e5c16b15b 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -38,7 +38,6 @@ from synapse.logging.opentracing import ( whitelisted_homeserver, ) from synapse.metrics.background_process_metrics import wrap_as_background_process -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( @@ -86,28 +85,19 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ): super().__init__(database, db_conn, hs) - if hs.config.worker.worker_app is None: - self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( - db_conn, - "device_lists_stream", - "stream_id", - extra_tables=[ - ("user_signature_stream", "stream_id"), - ("device_lists_outbound_pokes", "stream_id"), - ("device_lists_changes_in_room", "stream_id"), - ], - ) - else: - self._device_list_id_gen = SlavedIdTracker( - db_conn, - "device_lists_stream", - "stream_id", - extra_tables=[ - ("user_signature_stream", "stream_id"), - ("device_lists_outbound_pokes", "stream_id"), - ("device_lists_changes_in_room", "stream_id"), - ], - ) + # In the worker store this is an ID tracker which we overwrite in the non-worker + # class below that is used on the main process. + self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + db_conn, + "device_lists_stream", + "stream_id", + extra_tables=[ + ("user_signature_stream", "stream_id"), + ("device_lists_outbound_pokes", "stream_id"), + ("device_lists_changes_in_room", "stream_id"), + ], + is_writer=hs.config.worker.worker_app is None, + ) # Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a # StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker). diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index a79091952a..7a003ab88f 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -59,7 +59,6 @@ from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, ) -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import BackfillStream from synapse.replication.tcp.streams.events import EventsStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -213,26 +212,20 @@ class EventsWorkerStore(SQLBaseStore): # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets # updated over replication. (Multiple writers are not supported for # SQLite). - if hs.get_instance_name() in hs.config.worker.writers.events: - self._stream_id_gen = StreamIdGenerator( - db_conn, - "events", - "stream_ordering", - ) - self._backfill_id_gen = StreamIdGenerator( - db_conn, - "events", - "stream_ordering", - step=-1, - extra_tables=[("ex_outlier_stream", "event_stream_ordering")], - ) - else: - self._stream_id_gen = SlavedIdTracker( - db_conn, "events", "stream_ordering" - ) - self._backfill_id_gen = SlavedIdTracker( - db_conn, "events", "stream_ordering", step=-1 - ) + self._stream_id_gen = StreamIdGenerator( + db_conn, + "events", + "stream_ordering", + is_writer=hs.get_instance_name() in hs.config.worker.writers.events, + ) + self._backfill_id_gen = StreamIdGenerator( + db_conn, + "events", + "stream_ordering", + step=-1, + extra_tables=[("ex_outlier_stream", "event_stream_ordering")], + is_writer=hs.get_instance_name() in hs.config.worker.writers.events, + ) events_max = self._stream_id_gen.get_current_token() curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict( diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 8ae10f6127..12ad44dbb3 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -30,7 +30,6 @@ from typing import ( from synapse.api.errors import StoreError from synapse.config.homeserver import ExperimentalConfig -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import PushRulesStream from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -111,14 +110,14 @@ class PushRulesWorkerStore( ): super().__init__(database, db_conn, hs) - if hs.config.worker.worker_app is None: - self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( - db_conn, "push_rules_stream", "stream_id" - ) - else: - self._push_rules_stream_id_gen = SlavedIdTracker( - db_conn, "push_rules_stream", "stream_id" - ) + # In the worker store this is an ID tracker which we overwrite in the non-worker + # class below that is used on the main process. + self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + db_conn, + "push_rules_stream", + "stream_id", + is_writer=hs.config.worker.worker_app is None, + ) push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict( db_conn, diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index 4a01562d45..fee37b9ce4 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -27,7 +27,6 @@ from typing import ( ) from synapse.push import PusherConfig, ThrottleParams -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import PushersStream from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( @@ -59,20 +58,15 @@ class PusherWorkerStore(SQLBaseStore): ): super().__init__(database, db_conn, hs) - if hs.config.worker.worker_app is None: - self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( - db_conn, - "pushers", - "id", - extra_tables=[("deleted_pushers", "stream_id")], - ) - else: - self._pushers_id_gen = SlavedIdTracker( - db_conn, - "pushers", - "id", - extra_tables=[("deleted_pushers", "stream_id")], - ) + # In the worker store this is an ID tracker which we overwrite in the non-worker + # class below that is used on the main process. + self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + db_conn, + "pushers", + "id", + extra_tables=[("deleted_pushers", "stream_id")], + is_writer=hs.config.worker.worker_app is None, + ) self.db_pool.updates.register_background_update_handler( "remove_deactivated_pushers", diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index dc6989527e..64519587f8 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -27,7 +27,6 @@ from typing import ( ) from synapse.api.constants import EduTypes -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import ReceiptsStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( @@ -61,6 +60,9 @@ class ReceiptsWorkerStore(SQLBaseStore): hs: "HomeServer", ): self._instance_name = hs.get_instance_name() + + # In the worker store this is an ID tracker which we overwrite in the non-worker + # class below that is used on the main process. self._receipts_id_gen: AbstractStreamIdTracker if isinstance(database.engine, PostgresEngine): @@ -87,14 +89,12 @@ class ReceiptsWorkerStore(SQLBaseStore): # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets # updated over replication. (Multiple writers are not supported for # SQLite). - if hs.get_instance_name() in hs.config.worker.writers.receipts: - self._receipts_id_gen = StreamIdGenerator( - db_conn, "receipts_linearized", "stream_id" - ) - else: - self._receipts_id_gen = SlavedIdTracker( - db_conn, "receipts_linearized", "stream_id" - ) + self._receipts_id_gen = StreamIdGenerator( + db_conn, + "receipts_linearized", + "stream_id", + is_writer=hs.get_instance_name() in hs.config.worker.writers.receipts, + ) super().__init__(database, db_conn, hs) diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 2dfe4c0b66..1af0af1266 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -186,11 +186,13 @@ class StreamIdGenerator(AbstractStreamIdGenerator): column: str, extra_tables: Iterable[Tuple[str, str]] = (), step: int = 1, + is_writer: bool = True, ) -> None: assert step != 0 self._lock = threading.Lock() self._step: int = step self._current: int = _load_current_id(db_conn, table, column, step) + self._is_writer = is_writer for table, column in extra_tables: self._current = (max if step > 0 else min)( self._current, _load_current_id(db_conn, table, column, step) @@ -204,9 +206,11 @@ class StreamIdGenerator(AbstractStreamIdGenerator): self._unfinished_ids: OrderedDict[int, int] = OrderedDict() def advance(self, instance_name: str, new_id: int) -> None: - # `StreamIdGenerator` should only be used when there is a single writer, - # so replication should never happen. - raise Exception("Replication is not supported by StreamIdGenerator") + # Advance should never be called on a writer instance, only over replication + if self._is_writer: + raise Exception("Replication is not supported by writer StreamIdGenerator") + + self._current = (max if self._step > 0 else min)(self._current, new_id) def get_next(self) -> AsyncContextManager[int]: with self._lock: @@ -249,6 +253,9 @@ class StreamIdGenerator(AbstractStreamIdGenerator): return _AsyncCtxManagerWrapper(manager()) def get_current_token(self) -> int: + if self._is_writer: + return self._current + with self._lock: if self._unfinished_ids: return next(iter(self._unfinished_ids)) - self._step -- cgit 1.5.1 From 64dd8a9c6e43beed32fa1c94fda1d3d80957fef6 Mon Sep 17 00:00:00 2001 From: sando38 <90323876+sando38@users.noreply.github.com> Date: Mon, 14 Nov 2022 18:55:10 +0100 Subject: Include additional TURN server example into documentation (#14293) * Include eturnal TURN server configuration example and moving specific configuration examples into sub folders. * Update docs/turn-howto.md Co-authored-by: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> * Update docs/setup/turn/coturn.md Co-authored-by: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> * Update docs/setup/turn/eturnal.md Co-authored-by: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> * Fix TURN relaying public IP address hint * lint eturnal installation commands * Adjust synapse setup to link to existing documentation ..avoid redundant information. * remove redundant text * include alpine linux package link * Create 14293.doc * Update 14293.doc add missing dot * Update docs/setup/turn/eturnal.md Co-authored-by: reivilibre * Update docs/setup/turn/eturnal.md Co-authored-by: reivilibre * Update docs/setup/turn/coturn.md Co-authored-by: Moritz Dietz * Update docs/setup/turn/coturn.md Co-authored-by: Moritz Dietz * Update docs/setup/turn/coturn.md Co-authored-by: Moritz Dietz * Update docs/setup/turn/eturnal.md Co-authored-by: reivilibre * Update docs/setup/turn/coturn.md Co-authored-by: Moritz Dietz * Update docs/setup/turn/coturn.md Co-authored-by: Moritz Dietz * Update eturnal.md to link to official documentation ... and to simplify some aspects * Adjust coturn to link to default prefix * Mention eturnalctl location * Update docs/turn-howto.md Co-authored-by: Saarko Co-authored-by: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Co-authored-by: reivilibre Co-authored-by: Moritz Dietz --- changelog.d/14293.doc | 1 + docs/SUMMARY.md | 2 + docs/setup/turn/coturn.md | 188 +++++++++++++++++++++++++++++++++++ docs/setup/turn/eturnal.md | 170 ++++++++++++++++++++++++++++++++ docs/turn-howto.md | 240 ++++++--------------------------------------- 5 files changed, 390 insertions(+), 211 deletions(-) create mode 100644 changelog.d/14293.doc create mode 100644 docs/setup/turn/coturn.md create mode 100644 docs/setup/turn/eturnal.md diff --git a/changelog.d/14293.doc b/changelog.d/14293.doc new file mode 100644 index 0000000000..d6410421e7 --- /dev/null +++ b/changelog.d/14293.doc @@ -0,0 +1 @@ +Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 16720bceb5..8d68719958 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -9,6 +9,8 @@ - [Configuring a Reverse Proxy](reverse_proxy.md) - [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md) - [Configuring a Turn Server](turn-howto.md) + - [coturn TURN server](setup/turn/coturn.md) + - [eturnal TURN server](setup/turn/eturnal.md) - [Delegation](delegate.md) # Upgrading diff --git a/docs/setup/turn/coturn.md b/docs/setup/turn/coturn.md new file mode 100644 index 0000000000..a1bb1e934c --- /dev/null +++ b/docs/setup/turn/coturn.md @@ -0,0 +1,188 @@ +# coturn TURN server + +The following sections describe how to install [coturn]() (which implements the TURN REST API). + +## `coturn` setup + +### Initial installation + +The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source. + +#### Debian and Ubuntu based distributions + +Just install the debian package: + +```sh +sudo apt install coturn +``` + +This will install and start a systemd service called `coturn`. + +#### Source installation + +1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory. + +1. Configure it: + + ```sh + ./configure + ``` + + You may need to install `libevent2`: if so, you should do so in + the way recommended by your operating system. You can ignore + warnings about lack of database support: a database is unnecessary + for this purpose. + +1. Build and install it: + + ```sh + make + sudo make install + ``` + +### Configuration + +1. Create or edit the config file in `/etc/turnserver.conf`. The relevant + lines, with example values, are: + + ``` + use-auth-secret + static-auth-secret=[your secret key here] + realm=turn.myserver.org + ``` + + See `turnserver.conf` for explanations of the options. One way to generate + the `static-auth-secret` is with `pwgen`: + + ```sh + pwgen -s 64 1 + ``` + + A `realm` must be specified, but its value is somewhat arbitrary. (It is + sent to clients as part of the authentication flow.) It is conventional to + set it to be your server name. + +1. You will most likely want to configure `coturn` to write logs somewhere. The + easiest way is normally to send them to the syslog: + + ```sh + syslog + ``` + + (in which case, the logs will be available via `journalctl -u coturn` on a + systemd system). Alternatively, `coturn` can be configured to write to a + logfile - check the example config file supplied with `coturn`. + +1. Consider your security settings. TURN lets users request a relay which will + connect to arbitrary IP addresses and ports. The following configuration is + suggested as a minimum starting point: + + ``` + # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay. + no-tcp-relay + + # don't let the relay ever try to connect to private IP address ranges within your network (if any) + # given the turn server is likely behind your firewall, remember to include any privileged public IPs too. + denied-peer-ip=10.0.0.0-10.255.255.255 + denied-peer-ip=192.168.0.0-192.168.255.255 + denied-peer-ip=172.16.0.0-172.31.255.255 + + # recommended additional local peers to block, to mitigate external access to internal services. + # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability + no-multicast-peers + denied-peer-ip=0.0.0.0-0.255.255.255 + denied-peer-ip=100.64.0.0-100.127.255.255 + denied-peer-ip=127.0.0.0-127.255.255.255 + denied-peer-ip=169.254.0.0-169.254.255.255 + denied-peer-ip=192.0.0.0-192.0.0.255 + denied-peer-ip=192.0.2.0-192.0.2.255 + denied-peer-ip=192.88.99.0-192.88.99.255 + denied-peer-ip=198.18.0.0-198.19.255.255 + denied-peer-ip=198.51.100.0-198.51.100.255 + denied-peer-ip=203.0.113.0-203.0.113.255 + denied-peer-ip=240.0.0.0-255.255.255.255 + + # special case the turn server itself so that client->TURN->TURN->client flows work + # this should be one of the turn server's listening IPs + allowed-peer-ip=10.0.0.1 + + # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS. + user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user. + total-quota=1200 + ``` + +1. Also consider supporting TLS/DTLS. To do this, add the following settings + to `turnserver.conf`: + + ``` + # TLS certificates, including intermediate certs. + # For Let's Encrypt certificates, use `fullchain.pem` here. + cert=/path/to/fullchain.pem + + # TLS private key file + pkey=/path/to/privkey.pem + + # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed + #no-tls + #no-dtls + ``` + + In this case, replace the `turn:` schemes in the `turn_uris` settings below + with `turns:`. + + We recommend that you only try to set up TLS/DTLS once you have set up a + basic installation and got it working. + + NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will + not work with any Matrix client that uses Chromium's WebRTC library. This + currently includes Element Android & iOS; for more details, see their + [respective](https://github.com/vector-im/element-android/issues/1533) + [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying + [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710). + Consider using a ZeroSSL certificate for your TURN server as a working alternative. + +1. Ensure your firewall allows traffic into the TURN server on the ports + you've configured it to listen on (By default: 3478 and 5349 for TURN + traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535 + for the UDP relay.) + +1. If your TURN server is behind NAT, the NAT gateway must have an external, + publicly-reachable IP address. You must configure `coturn` to advertise that + address to connecting clients: + + ``` + external-ip=EXTERNAL_NAT_IPv4_ADDRESS + ``` + + You may optionally limit the TURN server to listen only on the local + address that is mapped by NAT to the external address: + + ``` + listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS + ``` + + If your NAT gateway is reachable over both IPv4 and IPv6, you may + configure `coturn` to advertise each available address: + + ``` + external-ip=EXTERNAL_NAT_IPv4_ADDRESS + external-ip=EXTERNAL_NAT_IPv6_ADDRESS + ``` + + When advertising an external IPv6 address, ensure that the firewall and + network settings of the system running your TURN server are configured to + accept IPv6 traffic, and that the TURN server is listening on the local + IPv6 address that is mapped by NAT to the external IPv6 address. + +1. (Re)start the turn server: + + * If you used the Debian package (or have set up a systemd unit yourself): + ```sh + sudo systemctl restart coturn + ``` + + * If you built from source: + + ```sh + /usr/local/bin/turnserver -o + ``` diff --git a/docs/setup/turn/eturnal.md b/docs/setup/turn/eturnal.md new file mode 100644 index 0000000000..2e5a45673e --- /dev/null +++ b/docs/setup/turn/eturnal.md @@ -0,0 +1,170 @@ +# eturnal TURN server + +The following sections describe how to install [eturnal]() +(which implements the TURN REST API). + +## `eturnal` setup + +### Initial installation + +The `eturnal` TURN server implementation is available from a variety of sources +such as native package managers, binary packages, installation from source or +[container image](https://eturnal.net/documentation/code/docker.html). They are +all described [here](https://github.com/processone/eturnal#installation). + +Quick-Test instructions in a [Linux Shell](https://github.com/processone/eturnal/blob/master/QUICK-TEST.md) +or with [Docker](https://github.com/processone/eturnal/blob/master/docker-k8s/QUICK-TEST.md) +are available as well. + +### Configuration + +After installation, `eturnal` usually ships a [default configuration file](https://github.com/processone/eturnal/blob/master/config/eturnal.yml) +here: `/etc/eturnal.yml` (and, if not found there, there is a backup file here: +`/opt/eturnal/etc/eturnal.yml`). It uses the (indentation-sensitive!) [YAML](https://en.wikipedia.org/wiki/YAML) +format. The file contains further explanations. + +Here are some hints how to configure eturnal on your [host machine](https://github.com/processone/eturnal#configuration) +or when using e.g. [Docker](https://eturnal.net/documentation/code/docker.html). +You may also further deep dive into the [reference documentation](https://eturnal.net/documentation/). + +`eturnal` runs out of the box with the default configuration. To enable TURN and +to integrate it with your homeserver, some aspects in `eturnal`'s default configuration file +must be edited: + +1. Homeserver's [`turn_shared_secret`](../../usage/configuration/config_documentation.md#turn_shared_secret) + and eturnal's shared `secret` for authentication + + Both need to have the same value. Uncomment and adjust this line in `eturnal`'s + configuration file: + + ```yaml + secret: "long-and-cryptic" # Shared secret, CHANGE THIS. + ``` + + One way to generate a `secret` is with `pwgen`: + + ```sh + pwgen -s 64 1 + ``` + +1. Public IP address + + If your TURN server is behind NAT, the NAT gateway must have an external, + publicly-reachable IP address. `eturnal` tries to autodetect the public IP address, + however, it may also be configured by uncommenting and adjusting this line, so + `eturnal` advertises that address to connecting clients: + + ```yaml + relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address. + ``` + + If your NAT gateway is reachable over both IPv4 and IPv6, you may + configure `eturnal` to advertise each available address: + + ```yaml + relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address. + relay_ipv6_addr: "2001:db8::4" # The server's public IPv6 address (optional). + ``` + + When advertising an external IPv6 address, ensure that the firewall and + network settings of the system running your TURN server are configured to + accept IPv6 traffic, and that the TURN server is listening on the local + IPv6 address that is mapped by NAT to the external IPv6 address. + +1. Logging + + If `eturnal` was started by systemd, log files are written into the + `/var/log/eturnal` directory by default. In order to log to the [journal](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) + instead, the `log_dir` option can be set to `stdout` in the configuration file. + +1. Security considerations + + Consider your security settings. TURN lets users request a relay which will + connect to arbitrary IP addresses and ports. The following configuration is + suggested as a minimum starting point, [see also the official documentation](https://eturnal.net/documentation/#blacklist): + + ```yaml + ## Reject TURN relaying from/to the following addresses/networks: + blacklist: # This is the default blacklist. + - "127.0.0.0/8" # IPv4 loopback. + - "::1" # IPv6 loopback. + - recommended # Expands to a number of networks recommended to be + # blocked, but includes private networks. Those + # would have to be 'whitelist'ed if eturnal serves + # local clients/peers within such networks. + ``` + + To whitelist IP addresses or specific (private) networks, you need to **add** a + whitelist part into the configuration file, e.g.: + + ```yaml + whitelist: + - "192.168.0.0/16" + - "203.0.113.113" + - "2001:db8::/64" + ``` + + The more specific, the better. + +1. TURNS (TURN via TLS/DTLS) + + Also consider supporting TLS/DTLS. To do this, adjust the following settings + in the `eturnal.yml` configuration file (TLS parts should not be commented anymore): + + ```yaml + listen: + - ip: "::" + port: 3478 + transport: udp + - ip: "::" + port: 3478 + transport: tcp + - ip: "::" + port: 5349 + transport: tls + + ## TLS certificate/key files (must be readable by 'eturnal' user!): + tls_crt_file: /etc/eturnal/tls/crt.pem + tls_key_file: /etc/eturnal/tls/key.pem + ``` + + In this case, replace the `turn:` schemes in homeserver's `turn_uris` settings + with `turns:`. More is described [here](../../usage/configuration/config_documentation.md#turn_uris). + + We recommend that you only try to set up TLS/DTLS once you have set up a + basic installation and got it working. + + NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will + not work with any Matrix client that uses Chromium's WebRTC library. This + currently includes Element Android & iOS; for more details, see their + [respective](https://github.com/vector-im/element-android/issues/1533) + [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying + [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710). + Consider using a ZeroSSL certificate for your TURN server as a working alternative. + +1. Firewall + + Ensure your firewall allows traffic into the TURN server on the ports + you've configured it to listen on (By default: 3478 and 5349 for TURN + traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535 + for the UDP relay.) + +1. Reload/ restarting `eturnal` + + Changes in the configuration file require `eturnal` to reload/ restart, this + can be achieved by: + + ```sh + eturnalctl reload + ``` + + `eturnal` performs a configuration check before actually reloading/ restarting + and provides hints, if something is not correctly configured. + +### eturnalctl opterations script + +`eturnal` offers a handy [operations script](https://eturnal.net/documentation/#Operation) +which can be called e.g. to check, whether the service is up, to restart the service, +to query how many active sessions exist, to change logging behaviour and so on. + +Hint: If `eturnalctl` is not part of your `$PATH`, consider either sym-linking it (e.g. ´ln -s /opt/eturnal/bin/eturnalctl /usr/local/bin/eturnalctl´) or call it from the default `eturnal` directory directly: e.g. `/opt/eturnal/bin/eturnalctl info` diff --git a/docs/turn-howto.md b/docs/turn-howto.md index 37a311ad9c..b466cab40c 100644 --- a/docs/turn-howto.md +++ b/docs/turn-howto.md @@ -9,222 +9,28 @@ allows the homeserver to generate credentials that are valid for use on the TURN server through the use of a secret shared between the homeserver and the TURN server. -The following sections describe how to install [coturn]() (which implements the TURN REST API) and integrate it with synapse. +This documentation provides two TURN server configuration examples: + +* [coturn](setup/turn/coturn.md) +* [eturnal](setup/turn/eturnal.md) ## Requirements -For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP. +For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP. Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP. However, even with appropriate configuration, NAT is known to cause issues and to often not work. -## `coturn` setup - -### Initial installation - -The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source. - -#### Debian installation - -Just install the debian package: - -```sh -apt install coturn -``` - -This will install and start a systemd service called `coturn`. - -#### Source installation - -1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory. - -1. Configure it: - - ```sh - ./configure - ``` - - You may need to install `libevent2`: if so, you should do so in - the way recommended by your operating system. You can ignore - warnings about lack of database support: a database is unnecessary - for this purpose. - -1. Build and install it: - - ```sh - make - make install - ``` - -### Configuration - -1. Create or edit the config file in `/etc/turnserver.conf`. The relevant - lines, with example values, are: - - ``` - use-auth-secret - static-auth-secret=[your secret key here] - realm=turn.myserver.org - ``` - - See `turnserver.conf` for explanations of the options. One way to generate - the `static-auth-secret` is with `pwgen`: - - ```sh - pwgen -s 64 1 - ``` - - A `realm` must be specified, but its value is somewhat arbitrary. (It is - sent to clients as part of the authentication flow.) It is conventional to - set it to be your server name. - -1. You will most likely want to configure coturn to write logs somewhere. The - easiest way is normally to send them to the syslog: - - ```sh - syslog - ``` - - (in which case, the logs will be available via `journalctl -u coturn` on a - systemd system). Alternatively, coturn can be configured to write to a - logfile - check the example config file supplied with coturn. - -1. Consider your security settings. TURN lets users request a relay which will - connect to arbitrary IP addresses and ports. The following configuration is - suggested as a minimum starting point: - - ``` - # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay. - no-tcp-relay - - # don't let the relay ever try to connect to private IP address ranges within your network (if any) - # given the turn server is likely behind your firewall, remember to include any privileged public IPs too. - denied-peer-ip=10.0.0.0-10.255.255.255 - denied-peer-ip=192.168.0.0-192.168.255.255 - denied-peer-ip=172.16.0.0-172.31.255.255 - - # recommended additional local peers to block, to mitigate external access to internal services. - # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability - no-multicast-peers - denied-peer-ip=0.0.0.0-0.255.255.255 - denied-peer-ip=100.64.0.0-100.127.255.255 - denied-peer-ip=127.0.0.0-127.255.255.255 - denied-peer-ip=169.254.0.0-169.254.255.255 - denied-peer-ip=192.0.0.0-192.0.0.255 - denied-peer-ip=192.0.2.0-192.0.2.255 - denied-peer-ip=192.88.99.0-192.88.99.255 - denied-peer-ip=198.18.0.0-198.19.255.255 - denied-peer-ip=198.51.100.0-198.51.100.255 - denied-peer-ip=203.0.113.0-203.0.113.255 - denied-peer-ip=240.0.0.0-255.255.255.255 - - # special case the turn server itself so that client->TURN->TURN->client flows work - # this should be one of the turn server's listening IPs - allowed-peer-ip=10.0.0.1 - - # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS. - user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user. - total-quota=1200 - ``` - -1. Also consider supporting TLS/DTLS. To do this, add the following settings - to `turnserver.conf`: - - ``` - # TLS certificates, including intermediate certs. - # For Let's Encrypt certificates, use `fullchain.pem` here. - cert=/path/to/fullchain.pem - - # TLS private key file - pkey=/path/to/privkey.pem - - # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed - #no-tls - #no-dtls - ``` - - In this case, replace the `turn:` schemes in the `turn_uris` settings below - with `turns:`. - - We recommend that you only try to set up TLS/DTLS once you have set up a - basic installation and got it working. - - NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will - not work with any Matrix client that uses Chromium's WebRTC library. This - currently includes Element Android & iOS; for more details, see their - [respective](https://github.com/vector-im/element-android/issues/1533) - [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying - [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710). - Consider using a ZeroSSL certificate for your TURN server as a working alternative. - -1. Ensure your firewall allows traffic into the TURN server on the ports - you've configured it to listen on (By default: 3478 and 5349 for TURN - traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535 - for the UDP relay.) - -1. If your TURN server is behind NAT, the NAT gateway must have an external, - publicly-reachable IP address. You must configure coturn to advertise that - address to connecting clients: - - ``` - external-ip=EXTERNAL_NAT_IPv4_ADDRESS - ``` - - You may optionally limit the TURN server to listen only on the local - address that is mapped by NAT to the external address: - - ``` - listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS - ``` - - If your NAT gateway is reachable over both IPv4 and IPv6, you may - configure coturn to advertise each available address: - - ``` - external-ip=EXTERNAL_NAT_IPv4_ADDRESS - external-ip=EXTERNAL_NAT_IPv6_ADDRESS - ``` - - When advertising an external IPv6 address, ensure that the firewall and - network settings of the system running your TURN server are configured to - accept IPv6 traffic, and that the TURN server is listening on the local - IPv6 address that is mapped by NAT to the external IPv6 address. - -1. (Re)start the turn server: - - * If you used the Debian package (or have set up a systemd unit yourself): - ```sh - systemctl restart coturn - ``` - - * If you installed from source: - - ```sh - bin/turnserver -o - ``` +Afterwards, the homeserver needs some further configuration. ## Synapse setup Your homeserver configuration file needs the following extra keys: -1. "`turn_uris`": This needs to be a yaml list of public-facing URIs - for your TURN server to be given out to your clients. Add separate - entries for each transport your TURN server supports. -2. "`turn_shared_secret`": This is the secret shared between your - homeserver and your TURN server, so you should set it to the same - string you used in turnserver.conf. -3. "`turn_user_lifetime`": This is the amount of time credentials - generated by your homeserver are valid for (in milliseconds). - Shorter times offer less potential for abuse at the expense of - increased traffic between web clients and your homeserver to - refresh credentials. The TURN REST API specification recommends - one day (86400000). -4. "`turn_allow_guests`": Whether to allow guest users to use the - TURN server. This is enabled by default, as otherwise VoIP will - not work reliably for guests. However, it does introduce a - security risk as it lets guests connect to arbitrary endpoints - without having gone through a CAPTCHA or similar to register a - real account. +1. [`turn_uris`](usage/configuration/config_documentation.md#turn_uris) +2. [`turn_shared_secret`](usage/configuration/config_documentation.md#turn_shared_secret) +3. [`turn_user_lifetime`](usage/configuration/config_documentation.md#turn_user_lifetime) +4. [`turn_allow_guests`](usage/configuration/config_documentation.md#turn_allow_guests) As an example, here is the relevant section of the config file for `matrix.org`. The `turn_uris` are appropriate for TURN servers listening on the default ports, with no TLS. @@ -263,7 +69,7 @@ Here are a few things to try: * Check that you have opened your firewall to allow UDP traffic to the UDP relay ports (49152-65535 by default). - * Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted) + * Try disabling TLS/DTLS listeners and enable only its (unencrypted) TCP/UDP listeners. (This will only leave signaling traffic unencrypted; voice & video WebRTC traffic is always encrypted.) @@ -288,12 +94,19 @@ Here are a few things to try: * ensure that your TURN server uses the NAT gateway as its default route. - * Enable more verbose logging in coturn via the `verbose` setting: + * Enable more verbose logging, in `coturn` via the `verbose` setting: ``` verbose ``` + or with `eturnal` with the shell command `eturnalctl loglevel debug` or in the configuration file (the service needs to [reload](https://eturnal.net/documentation/#Operation) for it to become effective): + + ```yaml + ## Logging configuration: + log_level: debug + ``` + ... and then see if there are any clues in its logs. * If you are using a browser-based client under Chrome, check @@ -317,7 +130,7 @@ Here are a few things to try: matrix client to your homeserver in your browser's network inspector. In the response you should see `username` and `password`. Or: - * Use the following shell commands: + * Use the following shell commands for `coturn`: ```sh secret=staticAuthSecretHere @@ -327,11 +140,16 @@ Here are a few things to try: echo -e "username: $u\npassword: $p" ``` - Or: + or for `eturnal` + + ```sh + eturnalctl credentials + ``` + - * Temporarily configure coturn to accept a static username/password. To do - this, comment out `use-auth-secret` and `static-auth-secret` and add the - following: + * Or (**coturn only**): Temporarily configure `coturn` to accept a static + username/password. To do this, comment out `use-auth-secret` and + `static-auth-secret` and add the following: ``` lt-cred-mech -- cgit 1.5.1 From 634359b083eae319d7f065114851590431b7c7fb Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 15 Nov 2022 10:43:17 +0000 Subject: Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. (#14417) --- changelog.d/14417.misc | 1 + synapse/storage/databases/main/events_worker.py | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14417.misc diff --git a/changelog.d/14417.misc b/changelog.d/14417.misc new file mode 100644 index 0000000000..7527fe97c2 --- /dev/null +++ b/changelog.d/14417.misc @@ -0,0 +1 @@ +Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. \ No newline at end of file diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 7a003ab88f..296e50d661 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -2228,7 +2228,15 @@ class EventsWorkerStore(SQLBaseStore): return result is not None async def get_partial_state_events_batch(self, room_id: str) -> List[str]: - """Get a list of events in the given room that have partial state""" + """ + Get a list of events in the given room that: + - have partial state; and + - are ready to be resynced (because they have no prev_events that are + partial-stated) + + See the docstring on `_get_partial_state_events_batch_txn` for more + information. + """ return await self.db_pool.runInteraction( "get_partial_state_events_batch", self._get_partial_state_events_batch_txn, -- cgit 1.5.1 From b5ab2c428a1c5edd634ff084019811e5f6b963d8 Mon Sep 17 00:00:00 2001 From: Tuomas Ojamies Date: Tue, 15 Nov 2022 13:55:00 +0100 Subject: Support using SSL on worker endpoints. (#14128) * Fix missing SSL support in worker endpoints. * Add changelog * SSL for Replication endpoint * Remove unit test change * Refactor listener creation to reduce duplicated code * Fix the logger message * Update synapse/app/_base.py Co-authored-by: Patrick Cloke * Update synapse/app/_base.py Co-authored-by: Patrick Cloke * Update synapse/app/_base.py Co-authored-by: Patrick Cloke * Add config documentation for new TLS option Co-authored-by: Tuomas Ojamies Co-authored-by: Patrick Cloke Co-authored-by: Olivier Wilkinson (reivilibre) --- changelog.d/14128.misc | 1 + docs/usage/configuration/config_documentation.md | 20 +++++++++ synapse/app/_base.py | 53 +++++++++++++++++++++++- synapse/app/generic_worker.py | 28 ++++--------- synapse/app/homeserver.py | 34 ++------------- synapse/config/workers.py | 7 ++++ synapse/replication/http/_base.py | 10 ++++- 7 files changed, 100 insertions(+), 53 deletions(-) create mode 100644 changelog.d/14128.misc diff --git a/changelog.d/14128.misc b/changelog.d/14128.misc new file mode 100644 index 0000000000..29168ef955 --- /dev/null +++ b/changelog.d/14128.misc @@ -0,0 +1 @@ +Add TLS support for generic worker endpoints. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 9a6bd08d01..f5937dd902 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3893,6 +3893,26 @@ Example configuration: worker_replication_http_port: 9093 ``` --- +### `worker_replication_http_tls` + +Whether TLS should be used for talking to the HTTP replication port on the main +Synapse process. +The main Synapse process defines this with the `tls` option on its [listener](#listeners) that +has the `replication` resource enabled. + +**Please note:** by default, it is not safe to expose replication ports to the +public Internet, even with TLS enabled. +See [`worker_replication_secret`](#worker_replication_secret). + +Defaults to `false`. + +*Added in Synapse 1.72.0.* + +Example configuration: +```yaml +worker_replication_http_tls: true +``` +--- ### `worker_listeners` A worker can handle HTTP requests. To do so, a `worker_listeners` option diff --git a/synapse/app/_base.py b/synapse/app/_base.py index a683ebf4cb..8f5b1a20f5 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -47,6 +47,7 @@ from twisted.internet.tcp import Port from twisted.logger import LoggingFile, LogLevel from twisted.protocols.tls import TLSMemoryBIOFactory from twisted.python.threadpool import ThreadPool +from twisted.web.resource import Resource import synapse.util.caches from synapse.api.constants import MAX_PDU_SIZE @@ -55,12 +56,13 @@ from synapse.app.phone_stats_home import start_phone_stats_home from synapse.config import ConfigError from synapse.config._base import format_config_error from synapse.config.homeserver import HomeServerConfig -from synapse.config.server import ManholeConfig +from synapse.config.server import ListenerConfig, ManholeConfig from synapse.crypto import context_factory from synapse.events.presence_router import load_legacy_presence_router from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.handlers.auth import load_legacy_password_auth_providers +from synapse.http.site import SynapseSite from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import init_tracer from synapse.metrics import install_gc_manager, register_threadpool @@ -357,6 +359,55 @@ def listen_tcp( return r # type: ignore[return-value] +def listen_http( + listener_config: ListenerConfig, + root_resource: Resource, + version_string: str, + max_request_body_size: int, + context_factory: IOpenSSLContextFactory, + reactor: IReactorSSL = reactor, +) -> List[Port]: + port = listener_config.port + bind_addresses = listener_config.bind_addresses + tls = listener_config.tls + + assert listener_config.http_options is not None + + site_tag = listener_config.http_options.tag + if site_tag is None: + site_tag = str(port) + + site = SynapseSite( + "synapse.access.%s.%s" % ("https" if tls else "http", site_tag), + site_tag, + listener_config, + root_resource, + version_string, + max_request_body_size=max_request_body_size, + reactor=reactor, + ) + if tls: + # refresh_certificate should have been called before this. + assert context_factory is not None + ports = listen_ssl( + bind_addresses, + port, + site, + context_factory, + reactor=reactor, + ) + logger.info("Synapse now listening on TCP port %d (TLS)", port) + else: + ports = listen_tcp( + bind_addresses, + port, + site, + reactor=reactor, + ) + logger.info("Synapse now listening on TCP port %d", port) + return ports + + def listen_ssl( bind_addresses: Collection[str], port: int, diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 51446b49cd..1d9aef45c2 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -44,7 +44,7 @@ from synapse.config.server import ListenerConfig from synapse.federation.transport.server import TransportLayerServer from synapse.http.server import JsonResource, OptionsResource from synapse.http.servlet import RestServlet, parse_json_object_from_request -from synapse.http.site import SynapseRequest, SynapseSite +from synapse.http.site import SynapseRequest from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource @@ -288,15 +288,9 @@ class GenericWorkerServer(HomeServer): DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore def _listen_http(self, listener_config: ListenerConfig) -> None: - port = listener_config.port - bind_addresses = listener_config.bind_addresses assert listener_config.http_options is not None - site_tag = listener_config.http_options.tag - if site_tag is None: - site_tag = str(port) - # We always include a health resource. resources: Dict[str, Resource] = {"/health": HealthResource()} @@ -395,23 +389,15 @@ class GenericWorkerServer(HomeServer): root_resource = create_resource_tree(resources, OptionsResource()) - _base.listen_tcp( - bind_addresses, - port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - max_request_body_size=max_request_body_size(self.config), - reactor=self.get_reactor(), - ), + _base.listen_http( + listener_config, + root_resource, + self.version_string, + max_request_body_size(self.config), + self.tls_server_context_factory, reactor=self.get_reactor(), ) - logger.info("Synapse worker now listening on port %d", port) - def start_listening(self) -> None: for listener in self.config.worker.worker_listeners: if listener.type == "http": diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index de3f08876f..4f4fee4782 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -37,8 +37,7 @@ from synapse.api.urls import ( from synapse.app import _base from synapse.app._base import ( handle_startup_exception, - listen_ssl, - listen_tcp, + listen_http, max_request_body_size, redirect_stdio_to_logs, register_start, @@ -53,7 +52,6 @@ from synapse.http.server import ( RootOptionsRedirectResource, StaticResource, ) -from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource @@ -83,8 +81,6 @@ class SynapseHomeServer(HomeServer): self, config: HomeServerConfig, listener_config: ListenerConfig ) -> Iterable[Port]: port = listener_config.port - bind_addresses = listener_config.bind_addresses - tls = listener_config.tls # Must exist since this is an HTTP listener. assert listener_config.http_options is not None site_tag = listener_config.http_options.tag @@ -140,37 +136,15 @@ class SynapseHomeServer(HomeServer): else: root_resource = OptionsResource() - site = SynapseSite( - "synapse.access.%s.%s" % ("https" if tls else "http", site_tag), - site_tag, + ports = listen_http( listener_config, create_resource_tree(resources, root_resource), self.version_string, - max_request_body_size=max_request_body_size(self.config), + max_request_body_size(self.config), + self.tls_server_context_factory, reactor=self.get_reactor(), ) - if tls: - # refresh_certificate should have been called before this. - assert self.tls_server_context_factory is not None - ports = listen_ssl( - bind_addresses, - port, - site, - self.tls_server_context_factory, - reactor=self.get_reactor(), - ) - logger.info("Synapse now listening on TCP port %d (TLS)", port) - - else: - ports = listen_tcp( - bind_addresses, - port, - site, - reactor=self.get_reactor(), - ) - logger.info("Synapse now listening on TCP port %d", port) - return ports def _configure_named_resource( diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 0fb725dd8f..88b3168cbc 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -67,6 +67,7 @@ class InstanceLocationConfig: host: str port: int + tls: bool = False @attr.s @@ -149,6 +150,12 @@ class WorkerConfig(Config): # The port on the main synapse for HTTP replication endpoint self.worker_replication_http_port = config.get("worker_replication_http_port") + # The tls mode on the main synapse for HTTP replication endpoint. + # For backward compatibility this defaults to False. + self.worker_replication_http_tls = config.get( + "worker_replication_http_tls", False + ) + # The shared secret used for authentication when connecting to the main synapse. self.worker_replication_secret = config.get("worker_replication_secret", None) diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index acb0bd18f7..5e661f8c73 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -184,8 +184,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): client = hs.get_simple_http_client() local_instance_name = hs.get_instance_name() + # The value of these option should match the replication listener settings master_host = hs.config.worker.worker_replication_host master_port = hs.config.worker.worker_replication_http_port + master_tls = hs.config.worker.worker_replication_http_tls instance_map = hs.config.worker.instance_map @@ -205,9 +207,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): if instance_name == "master": host = master_host port = master_port + tls = master_tls elif instance_name in instance_map: host = instance_map[instance_name].host port = instance_map[instance_name].port + tls = instance_map[instance_name].tls else: raise Exception( "Instance %r not in 'instance_map' config" % (instance_name,) @@ -238,7 +242,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): "Unknown METHOD on %s replication endpoint" % (cls.NAME,) ) - uri = "http://%s:%s/_synapse/replication/%s/%s" % ( + # Here the protocol is hard coded to be http by default or https in case the replication + # port is set to have tls true. + scheme = "https" if tls else "http" + uri = "%s://%s:%s/_synapse/replication/%s/%s" % ( + scheme, host, port, cls.NAME, -- cgit 1.5.1 From 63cc56affa3872443fffcac655413a8d9ffabfe4 Mon Sep 17 00:00:00 2001 From: "DeepBlueV7.X" Date: Tue, 15 Nov 2022 16:29:30 +0100 Subject: Send content rules with pattern_type to clients (#14356) --- changelog.d/14356.bugfix | 1 + synapse/push/clientformat.py | 16 +++++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14356.bugfix diff --git a/changelog.d/14356.bugfix b/changelog.d/14356.bugfix new file mode 100644 index 0000000000..288d58a540 --- /dev/null +++ b/changelog.d/14356.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index 7095ae83f9..622a1e35c5 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -44,6 +44,12 @@ def format_push_rules_for_user( rulearray.append(template_rule) + pattern_type = template_rule.pop("pattern_type", None) + if pattern_type == "user_id": + template_rule["pattern"] = user.to_string() + elif pattern_type == "user_localpart": + template_rule["pattern"] = user.localpart + template_rule["enabled"] = enabled if "conditions" not in template_rule: @@ -93,10 +99,14 @@ def _rule_to_template(rule: PushRule) -> Optional[Dict[str, Any]]: if len(rule.conditions) != 1: return None thecond = rule.conditions[0] - if "pattern" not in thecond: - return None + templaterule = {"actions": rule.actions} - templaterule["pattern"] = thecond["pattern"] + if "pattern" in thecond: + templaterule["pattern"] = thecond["pattern"] + elif "pattern_type" in thecond: + templaterule["pattern_type"] = thecond["pattern_type"] + else: + return None else: # This should not be reached unless this function is not kept in sync # with PRIORITY_CLASS_INVERSE_MAP. -- cgit 1.5.1 From 258b5285b6b486526dffef9431c2ab063913f42b Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 15 Nov 2022 16:36:43 +0000 Subject: Fix typechecking errors introduced in #14128 (#14455) * Fix typechecking errors introduced in #14128 * Changelog * Correct annotations so that context_factory works if you don't use TLS --- changelog.d/14455.misc | 1 + synapse/app/_base.py | 4 ++-- synapse/server.py | 5 +++-- 3 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14455.misc diff --git a/changelog.d/14455.misc b/changelog.d/14455.misc new file mode 100644 index 0000000000..29168ef955 --- /dev/null +++ b/changelog.d/14455.misc @@ -0,0 +1 @@ +Add TLS support for generic worker endpoints. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 8f5b1a20f5..41d2732ef9 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -364,8 +364,8 @@ def listen_http( root_resource: Resource, version_string: str, max_request_body_size: int, - context_factory: IOpenSSLContextFactory, - reactor: IReactorSSL = reactor, + context_factory: Optional[IOpenSSLContextFactory], + reactor: ISynapseReactor = reactor, ) -> List[Port]: port = listener_config.port bind_addresses = listener_config.bind_addresses diff --git a/synapse/server.py b/synapse/server.py index c4e025af22..f0a60d0056 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -221,8 +221,6 @@ class HomeServer(metaclass=abc.ABCMeta): # instantiated during setup() for future return by get_datastores() DATASTORE_CLASS = abc.abstractproperty() - tls_server_context_factory: Optional[IOpenSSLContextFactory] - def __init__( self, hostname: str, @@ -258,6 +256,9 @@ class HomeServer(metaclass=abc.ABCMeta): self._module_web_resources: Dict[str, Resource] = {} self._module_web_resources_consumed = False + # This attribute is set by the free function `refresh_certificate`. + self.tls_server_context_factory: Optional[IOpenSSLContextFactory] = None + def register_module_web_resource(self, path: str, resource: Resource) -> None: """Allows a module to register a web resource to be served at the given path. -- cgit 1.5.1 From 1eed795fc56d95df3968e37f3a4db92f24513e15 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 15 Nov 2022 17:35:19 +0000 Subject: Include heroes in partial join responses' state (#14442) * Pull out hero selection logic * Include heroes in partial join response's state * Changelog * Fixup trial test * Remove TODO --- changelog.d/14442.feature | 1 + synapse/federation/federation_server.py | 23 +++++++++++++++++---- synapse/handlers/sync.py | 20 +++---------------- synapse/storage/databases/main/roommember.py | 30 ++++++++++++++++++++++++++++ tests/federation/test_federation_server.py | 11 ++++++---- 5 files changed, 60 insertions(+), 25 deletions(-) create mode 100644 changelog.d/14442.feature diff --git a/changelog.d/14442.feature b/changelog.d/14442.feature new file mode 100644 index 0000000000..917e7edfb3 --- /dev/null +++ b/changelog.d/14442.feature @@ -0,0 +1 @@ +Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 59e351595b..bb20af6e91 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -74,6 +74,8 @@ from synapse.replication.http.federation import ( ) from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.lock import Lock +from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary +from synapse.storage.roommember import MemberSummary from synapse.types import JsonDict, StateMap, get_domain_from_id from synapse.util import json_decoder, unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results @@ -691,8 +693,9 @@ class FederationServer(FederationBase): state_event_ids: Collection[str] servers_in_room: Optional[Collection[str]] if caller_supports_partial_state: + summary = await self.store.get_room_summary(room_id) state_event_ids = _get_event_ids_for_partial_state_join( - event, prev_state_ids + event, prev_state_ids, summary ) servers_in_room = await self.state.get_hosts_in_room_at_events( room_id, event_ids=event.prev_event_ids() @@ -1495,6 +1498,7 @@ class FederationHandlerRegistry: def _get_event_ids_for_partial_state_join( join_event: EventBase, prev_state_ids: StateMap[str], + summary: Dict[str, MemberSummary], ) -> Collection[str]: """Calculate state to be retuned in a partial_state send_join @@ -1521,8 +1525,19 @@ def _get_event_ids_for_partial_state_join( if current_membership_event_id is not None: state_event_ids.add(current_membership_event_id) - # TODO: return a few more members: - # - those with invites - # - those that are kicked? / banned + name_id = prev_state_ids.get((EventTypes.Name, "")) + canonical_alias_id = prev_state_ids.get((EventTypes.CanonicalAlias, "")) + if not name_id and not canonical_alias_id: + # Also include the hero members of the room (for DM rooms without a title). + # To do this properly, we should select the correct subset of membership events + # from `prev_state_ids`. Instead, we are lazier and use the (cached) + # `get_room_summary` function, which is based on the current state of the room. + # This introduces races; we choose to ignore them because a) they should be rare + # and b) even if it's wrong, joining servers will get the full state eventually. + heroes = extract_heroes_from_room_summary(summary, join_event.state_key) + for hero in heroes: + membership_event_id = prev_state_ids.get((EventTypes.Member, hero)) + if membership_event_id: + state_event_ids.add(membership_event_id) return state_event_ids diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 1db5d68021..259456b55d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -41,6 +41,7 @@ from synapse.logging.context import current_context from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.databases.main.event_push_actions import RoomNotifCounts +from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter from synapse.types import ( @@ -805,18 +806,6 @@ class SyncHandler: if canonical_alias and canonical_alias.content.get("alias"): return summary - me = sync_config.user.to_string() - - joined_user_ids = [ - r[0] for r in details.get(Membership.JOIN, empty_ms).members if r[0] != me - ] - invited_user_ids = [ - r[0] for r in details.get(Membership.INVITE, empty_ms).members if r[0] != me - ] - gone_user_ids = [ - r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me - ] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me] - # FIXME: only build up a member_ids list for our heroes member_ids = {} for membership in ( @@ -828,11 +817,8 @@ class SyncHandler: for user_id, event_id in details.get(membership, empty_ms).members: member_ids[user_id] = event_id - # FIXME: order by stream ordering rather than as returned by SQL - if joined_user_ids or invited_user_ids: - summary["m.heroes"] = sorted(joined_user_ids + invited_user_ids)[0:5] - else: - summary["m.heroes"] = sorted(gone_user_ids)[0:5] + me = sync_config.user.to_string() + summary["m.heroes"] = extract_heroes_from_room_summary(details, me) if not sync_config.filter_collection.lazy_load_members(): return summary diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index e56a13f21e..f02c1d7ea7 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1517,6 +1517,36 @@ class RoomMemberStore( await self.db_pool.runInteraction("forget_membership", f) +def extract_heroes_from_room_summary( + details: Mapping[str, MemberSummary], me: str +) -> List[str]: + """Determine the users that represent a room, from the perspective of the `me` user. + + The rules which say which users we select are specified in the "Room Summary" + section of + https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv3sync + + Returns a list (possibly empty) of heroes' mxids. + """ + empty_ms = MemberSummary([], 0) + + joined_user_ids = [ + r[0] for r in details.get(Membership.JOIN, empty_ms).members if r[0] != me + ] + invited_user_ids = [ + r[0] for r in details.get(Membership.INVITE, empty_ms).members if r[0] != me + ] + gone_user_ids = [ + r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me + ] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me] + + # FIXME: order by stream ordering rather than as returned by SQL + if joined_user_ids or invited_user_ids: + return sorted(joined_user_ids + invited_user_ids)[0:5] + else: + return sorted(gone_user_ids)[0:5] + + @attr.s(slots=True, auto_attribs=True) class _JoinedHostsCache: """The cached data used by the `_get_joined_hosts_cache`.""" diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 3a6ef221ae..177e5b5afc 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -212,7 +212,7 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): self.assertEqual(r[("m.room.member", joining_user)].membership, "join") @override_config({"experimental_features": {"msc3706_enabled": True}}) - def test_send_join_partial_state(self): + def test_send_join_partial_state(self) -> None: """When MSC3706 support is enabled, /send_join should return partial state""" joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME join_result = self._make_join(joining_user) @@ -240,6 +240,9 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): ("m.room.power_levels", ""), ("m.room.join_rules", ""), ("m.room.history_visibility", ""), + # Users included here because they're heroes. + ("m.room.member", "@kermit:test"), + ("m.room.member", "@fozzie:test"), ], ) @@ -249,9 +252,9 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): ] self.assertCountEqual( returned_auth_chain_events, - [ - ("m.room.member", "@kermit:test"), - ], + # TODO: change the test so that we get at least one event in the auth chain + # here. + [], ) # the room should show that the new user is a member -- cgit 1.5.1 From 5cb6ad3b87caaadaedc3cc57e5513feb459b519d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 16 Nov 2022 11:14:38 +0000 Subject: Fix HTML templates missing correct HTML tags (#14448) --- changelog.d/14448.bugfix | 1 + synapse/res/templates/invalid_token.html | 1 + synapse/res/templates/notif_mail.html | 2 ++ synapse/res/templates/password_reset.html | 1 + synapse/res/templates/password_reset_confirmation.html | 1 + synapse/res/templates/password_reset_failure.html | 1 + synapse/res/templates/password_reset_success.html | 1 + synapse/res/templates/recaptcha.html | 1 + synapse/res/templates/registration.html | 1 + synapse/res/templates/registration_failure.html | 1 + synapse/res/templates/registration_success.html | 1 + synapse/res/templates/registration_token.html | 1 + synapse/res/templates/sso_account_deactivated.html | 1 + synapse/res/templates/sso_auth_account_details.html | 1 + synapse/res/templates/sso_auth_bad_user.html | 1 + synapse/res/templates/sso_auth_confirm.html | 1 + synapse/res/templates/sso_auth_success.html | 1 + synapse/res/templates/sso_error.html | 1 + synapse/res/templates/sso_login_idp_picker.html | 1 + synapse/res/templates/sso_new_user_consent.html | 1 + synapse/res/templates/sso_redirect_confirm.html | 1 + synapse/res/templates/terms.html | 1 + 22 files changed, 23 insertions(+) create mode 100644 changelog.d/14448.bugfix diff --git a/changelog.d/14448.bugfix b/changelog.d/14448.bugfix new file mode 100644 index 0000000000..4bf1c183f6 --- /dev/null +++ b/changelog.d/14448.bugfix @@ -0,0 +1 @@ +Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. diff --git a/synapse/res/templates/invalid_token.html b/synapse/res/templates/invalid_token.html index d0b1dae669..b19e3023a1 100644 --- a/synapse/res/templates/invalid_token.html +++ b/synapse/res/templates/invalid_token.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Invalid renewal token.{% endblock %} {% block body %} diff --git a/synapse/res/templates/notif_mail.html b/synapse/res/templates/notif_mail.html index 939d40315f..2add9dd859 100644 --- a/synapse/res/templates/notif_mail.html +++ b/synapse/res/templates/notif_mail.html @@ -1,3 +1,5 @@ +{% extends "_base.html" %} + {% block title %}New activity in room{% endblock %} {% block header %} diff --git a/synapse/res/templates/password_reset.html b/synapse/res/templates/password_reset.html index de5a9ec68f..1f267946c8 100644 --- a/synapse/res/templates/password_reset.html +++ b/synapse/res/templates/password_reset.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Password reset{% endblock %} {% block body %} diff --git a/synapse/res/templates/password_reset_confirmation.html b/synapse/res/templates/password_reset_confirmation.html index 0eac64b6a8..fabb9a6ed5 100644 --- a/synapse/res/templates/password_reset_confirmation.html +++ b/synapse/res/templates/password_reset_confirmation.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Password reset confirmation{% endblock %} {% block body %} diff --git a/synapse/res/templates/password_reset_failure.html b/synapse/res/templates/password_reset_failure.html index 977babdb40..9990e860f9 100644 --- a/synapse/res/templates/password_reset_failure.html +++ b/synapse/res/templates/password_reset_failure.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Password reset failure{% endblock %} {% block body %} diff --git a/synapse/res/templates/password_reset_success.html b/synapse/res/templates/password_reset_success.html index 0e99fad7ff..edada513ab 100644 --- a/synapse/res/templates/password_reset_success.html +++ b/synapse/res/templates/password_reset_success.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Password reset success{% endblock %} {% block body %} diff --git a/synapse/res/templates/recaptcha.html b/synapse/res/templates/recaptcha.html index feaf3f6aed..8204928cdf 100644 --- a/synapse/res/templates/recaptcha.html +++ b/synapse/res/templates/recaptcha.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Authentication{% endblock %} {% block header %} diff --git a/synapse/res/templates/registration.html b/synapse/res/templates/registration.html index 189960a832..cdb815665e 100644 --- a/synapse/res/templates/registration.html +++ b/synapse/res/templates/registration.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Registration{% endblock %} {% block body %} diff --git a/synapse/res/templates/registration_failure.html b/synapse/res/templates/registration_failure.html index 3debe9301d..ae2a9cae2c 100644 --- a/synapse/res/templates/registration_failure.html +++ b/synapse/res/templates/registration_failure.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Registration failure{% endblock %} {% block body %} diff --git a/synapse/res/templates/registration_success.html b/synapse/res/templates/registration_success.html index e2dd020a9e..6d45111796 100644 --- a/synapse/res/templates/registration_success.html +++ b/synapse/res/templates/registration_success.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Your email has now been validated{% endblock %} {% block body %} diff --git a/synapse/res/templates/registration_token.html b/synapse/res/templates/registration_token.html index 2ee5866ba5..ee4e5295e7 100644 --- a/synapse/res/templates/registration_token.html +++ b/synapse/res/templates/registration_token.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Authentication{% endblock %} {% block header %} diff --git a/synapse/res/templates/sso_account_deactivated.html b/synapse/res/templates/sso_account_deactivated.html index c634229840..b85d96cc74 100644 --- a/synapse/res/templates/sso_account_deactivated.html +++ b/synapse/res/templates/sso_account_deactivated.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}SSO account deactivated{% endblock %} {% block header %} diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index b516333373..11636d7f5d 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Create your account{% endblock %} {% block header %} diff --git a/synapse/res/templates/sso_auth_bad_user.html b/synapse/res/templates/sso_auth_bad_user.html index 69fdcc9ef0..819d79a461 100644 --- a/synapse/res/templates/sso_auth_bad_user.html +++ b/synapse/res/templates/sso_auth_bad_user.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Authentication failed{% endblock %} {% block header %} diff --git a/synapse/res/templates/sso_auth_confirm.html b/synapse/res/templates/sso_auth_confirm.html index 2d106e0ae4..3927d6eda3 100644 --- a/synapse/res/templates/sso_auth_confirm.html +++ b/synapse/res/templates/sso_auth_confirm.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Confirm it's you{% endblock %} {% block header %} diff --git a/synapse/res/templates/sso_auth_success.html b/synapse/res/templates/sso_auth_success.html index 56150eaefe..afeffb7191 100644 --- a/synapse/res/templates/sso_auth_success.html +++ b/synapse/res/templates/sso_auth_success.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Authentication successful{% endblock %} {% block header %} diff --git a/synapse/res/templates/sso_error.html b/synapse/res/templates/sso_error.html index e394a92623..6fa36c11c9 100644 --- a/synapse/res/templates/sso_error.html +++ b/synapse/res/templates/sso_error.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Authentication failed{% endblock %} {% block header %} diff --git a/synapse/res/templates/sso_login_idp_picker.html b/synapse/res/templates/sso_login_idp_picker.html index a2772ca9ef..58b0b3121c 100644 --- a/synapse/res/templates/sso_login_idp_picker.html +++ b/synapse/res/templates/sso_login_idp_picker.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Choose identity provider{% endblock %} {% block header %} diff --git a/synapse/res/templates/sso_new_user_consent.html b/synapse/res/templates/sso_new_user_consent.html index 126887d26c..fda29928d1 100644 --- a/synapse/res/templates/sso_new_user_consent.html +++ b/synapse/res/templates/sso_new_user_consent.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Agree to terms and conditions{% endblock %} {% block header %} diff --git a/synapse/res/templates/sso_redirect_confirm.html b/synapse/res/templates/sso_redirect_confirm.html index 887ee0d294..cc2e7b3a5b 100644 --- a/synapse/res/templates/sso_redirect_confirm.html +++ b/synapse/res/templates/sso_redirect_confirm.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Continue to your account{% endblock %} {% block header %} diff --git a/synapse/res/templates/terms.html b/synapse/res/templates/terms.html index 977c3d0bc7..ffabebdd8b 100644 --- a/synapse/res/templates/terms.html +++ b/synapse/res/templates/terms.html @@ -1,3 +1,4 @@ +{% extends "_base.html" %} {% block title %}Authentication{% endblock %} {% block header %} -- cgit 1.5.1 From f844b470f6f3a352e0e7cc81f9e5c58e35705e27 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 16 Nov 2022 12:03:05 +0000 Subject: Fix stub return type of `PushRuleEvaluator.run` (#14451) --- changelog.d/14451.misc | 1 + stubs/synapse/synapse_rust/push.pyi | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14451.misc diff --git a/changelog.d/14451.misc b/changelog.d/14451.misc new file mode 100644 index 0000000000..ab563195b4 --- /dev/null +++ b/changelog.d/14451.misc @@ -0,0 +1 @@ +Fix an incorrect stub return type for `PushRuleEvaluator.run`. \ No newline at end of file diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index f3b6d6c933..ceade65ef9 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -1,4 +1,4 @@ -from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union +from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union from synapse.types import JsonDict @@ -47,4 +47,4 @@ class PushRuleEvaluator: push_rules: FilteredPushRules, user_id: Optional[str], display_name: Optional[str], - ) -> Collection[dict]: ... + ) -> Collection[Union[Mapping, str]]: ... -- cgit 1.5.1 From 945a0928c793c0bd8573e179583d983187e5f392 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 16 Nov 2022 12:09:33 +0000 Subject: Don't filter state in /context response (#14461) We don't filter state usually, so doing so here is a waste of time. This is not much of an issue for clients that enable lazy loading of members, since there will be fewer state events. --- changelog.d/14461.misc | 1 + synapse/handlers/room.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14461.misc diff --git a/changelog.d/14461.misc b/changelog.d/14461.misc new file mode 100644 index 0000000000..cdfa577a4c --- /dev/null +++ b/changelog.d/14461.misc @@ -0,0 +1 @@ +Improve performance of `/context` in large rooms. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 66a50bca6e..6dcfd86fdf 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1451,7 +1451,7 @@ class RoomContextHandler: events_before=events_before, event=event, events_after=events_after, - state=await filter_evts(state_events), + state=state_events, aggregations=aggregations, start=await token.copy_and_replace( StreamKeyType.ROOM, results.start -- cgit 1.5.1 From d63814fd736fed5d3d45ff3af5e6d3bfae50c439 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 16 Nov 2022 13:50:07 +0000 Subject: Revert "Remove slaved id tracker (#14376)" (#14463) This reverts commit 36097e88c4da51fce6556a58c49bd675f4cf20ab. --- changelog.d/14376.misc | 1 - synapse/replication/slave/__init__.py | 13 ++++++ synapse/replication/slave/storage/__init__.py | 13 ++++++ .../slave/storage/_slaved_id_tracker.py | 50 ++++++++++++++++++++++ synapse/storage/databases/main/account_data.py | 30 ++++++++----- synapse/storage/databases/main/devices.py | 36 ++++++++++------ synapse/storage/databases/main/events_worker.py | 35 +++++++++------ synapse/storage/databases/main/push_rule.py | 17 ++++---- synapse/storage/databases/main/pusher.py | 24 +++++++---- synapse/storage/databases/main/receipts.py | 18 ++++---- synapse/storage/util/id_generators.py | 13 ++---- 11 files changed, 176 insertions(+), 74 deletions(-) delete mode 100644 changelog.d/14376.misc create mode 100644 synapse/replication/slave/__init__.py create mode 100644 synapse/replication/slave/storage/__init__.py create mode 100644 synapse/replication/slave/storage/_slaved_id_tracker.py diff --git a/changelog.d/14376.misc b/changelog.d/14376.misc deleted file mode 100644 index 2ca326fea6..0000000000 --- a/changelog.d/14376.misc +++ /dev/null @@ -1 +0,0 @@ -Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). diff --git a/synapse/replication/slave/__init__.py b/synapse/replication/slave/__init__.py new file mode 100644 index 0000000000..f43a360a80 --- /dev/null +++ b/synapse/replication/slave/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/synapse/replication/slave/storage/__init__.py b/synapse/replication/slave/storage/__init__.py new file mode 100644 index 0000000000..f43a360a80 --- /dev/null +++ b/synapse/replication/slave/storage/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py new file mode 100644 index 0000000000..8f3f953ed4 --- /dev/null +++ b/synapse/replication/slave/storage/_slaved_id_tracker.py @@ -0,0 +1,50 @@ +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Tuple + +from synapse.storage.database import LoggingDatabaseConnection +from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id + + +class SlavedIdTracker(AbstractStreamIdTracker): + """Tracks the "current" stream ID of a stream with a single writer. + + See `AbstractStreamIdTracker` for more details. + + Note that this class does not work correctly when there are multiple + writers. + """ + + def __init__( + self, + db_conn: LoggingDatabaseConnection, + table: str, + column: str, + extra_tables: Optional[List[Tuple[str, str]]] = None, + step: int = 1, + ): + self.step = step + self._current = _load_current_id(db_conn, table, column, step) + if extra_tables: + for table, column in extra_tables: + self.advance(None, _load_current_id(db_conn, table, column)) + + def advance(self, instance_name: Optional[str], new_id: int) -> None: + self._current = (max if self.step > 0 else min)(self._current, new_id) + + def get_current_token(self) -> int: + return self._current + + def get_current_token_for_writer(self, instance_name: str) -> int: + return self.get_current_token() diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 282687ebce..c38b8a9e5a 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -27,6 +27,7 @@ from typing import ( ) from synapse.api.constants import AccountDataTypes +from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import AccountDataStream, TagAccountDataStream from synapse.storage._base import db_to_json from synapse.storage.database import ( @@ -67,11 +68,12 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) # to write account data. A value of `True` implies that `_account_data_id_gen` # is an `AbstractStreamIdGenerator` and not just a tracker. self._account_data_id_gen: AbstractStreamIdTracker - self._can_write_to_account_data = ( - self._instance_name in hs.config.worker.writers.account_data - ) if isinstance(database.engine, PostgresEngine): + self._can_write_to_account_data = ( + self._instance_name in hs.config.worker.writers.account_data + ) + self._account_data_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, @@ -93,13 +95,21 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets # updated over replication. (Multiple writers are not supported for # SQLite). - self._account_data_id_gen = StreamIdGenerator( - db_conn, - "room_account_data", - "stream_id", - extra_tables=[("room_tags_revisions", "stream_id")], - is_writer=self._instance_name in hs.config.worker.writers.account_data, - ) + if self._instance_name in hs.config.worker.writers.account_data: + self._can_write_to_account_data = True + self._account_data_id_gen = StreamIdGenerator( + db_conn, + "room_account_data", + "stream_id", + extra_tables=[("room_tags_revisions", "stream_id")], + ) + else: + self._account_data_id_gen = SlavedIdTracker( + db_conn, + "room_account_data", + "stream_id", + extra_tables=[("room_tags_revisions", "stream_id")], + ) account_max = self.get_max_account_data_stream_id() self._account_data_stream_cache = StreamChangeCache( diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 3e5c16b15b..aa58c2adc3 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -38,6 +38,7 @@ from synapse.logging.opentracing import ( whitelisted_homeserver, ) from synapse.metrics.background_process_metrics import wrap_as_background_process +from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( @@ -85,19 +86,28 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ): super().__init__(database, db_conn, hs) - # In the worker store this is an ID tracker which we overwrite in the non-worker - # class below that is used on the main process. - self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( - db_conn, - "device_lists_stream", - "stream_id", - extra_tables=[ - ("user_signature_stream", "stream_id"), - ("device_lists_outbound_pokes", "stream_id"), - ("device_lists_changes_in_room", "stream_id"), - ], - is_writer=hs.config.worker.worker_app is None, - ) + if hs.config.worker.worker_app is None: + self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + db_conn, + "device_lists_stream", + "stream_id", + extra_tables=[ + ("user_signature_stream", "stream_id"), + ("device_lists_outbound_pokes", "stream_id"), + ("device_lists_changes_in_room", "stream_id"), + ], + ) + else: + self._device_list_id_gen = SlavedIdTracker( + db_conn, + "device_lists_stream", + "stream_id", + extra_tables=[ + ("user_signature_stream", "stream_id"), + ("device_lists_outbound_pokes", "stream_id"), + ("device_lists_changes_in_room", "stream_id"), + ], + ) # Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a # StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker). diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 296e50d661..467d20253d 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -59,6 +59,7 @@ from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, ) +from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import BackfillStream from synapse.replication.tcp.streams.events import EventsStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -212,20 +213,26 @@ class EventsWorkerStore(SQLBaseStore): # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets # updated over replication. (Multiple writers are not supported for # SQLite). - self._stream_id_gen = StreamIdGenerator( - db_conn, - "events", - "stream_ordering", - is_writer=hs.get_instance_name() in hs.config.worker.writers.events, - ) - self._backfill_id_gen = StreamIdGenerator( - db_conn, - "events", - "stream_ordering", - step=-1, - extra_tables=[("ex_outlier_stream", "event_stream_ordering")], - is_writer=hs.get_instance_name() in hs.config.worker.writers.events, - ) + if hs.get_instance_name() in hs.config.worker.writers.events: + self._stream_id_gen = StreamIdGenerator( + db_conn, + "events", + "stream_ordering", + ) + self._backfill_id_gen = StreamIdGenerator( + db_conn, + "events", + "stream_ordering", + step=-1, + extra_tables=[("ex_outlier_stream", "event_stream_ordering")], + ) + else: + self._stream_id_gen = SlavedIdTracker( + db_conn, "events", "stream_ordering" + ) + self._backfill_id_gen = SlavedIdTracker( + db_conn, "events", "stream_ordering", step=-1 + ) events_max = self._stream_id_gen.get_current_token() curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict( diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 12ad44dbb3..8ae10f6127 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -30,6 +30,7 @@ from typing import ( from synapse.api.errors import StoreError from synapse.config.homeserver import ExperimentalConfig +from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import PushRulesStream from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -110,14 +111,14 @@ class PushRulesWorkerStore( ): super().__init__(database, db_conn, hs) - # In the worker store this is an ID tracker which we overwrite in the non-worker - # class below that is used on the main process. - self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( - db_conn, - "push_rules_stream", - "stream_id", - is_writer=hs.config.worker.worker_app is None, - ) + if hs.config.worker.worker_app is None: + self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + db_conn, "push_rules_stream", "stream_id" + ) + else: + self._push_rules_stream_id_gen = SlavedIdTracker( + db_conn, "push_rules_stream", "stream_id" + ) push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict( db_conn, diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index fee37b9ce4..4a01562d45 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -27,6 +27,7 @@ from typing import ( ) from synapse.push import PusherConfig, ThrottleParams +from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import PushersStream from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( @@ -58,15 +59,20 @@ class PusherWorkerStore(SQLBaseStore): ): super().__init__(database, db_conn, hs) - # In the worker store this is an ID tracker which we overwrite in the non-worker - # class below that is used on the main process. - self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( - db_conn, - "pushers", - "id", - extra_tables=[("deleted_pushers", "stream_id")], - is_writer=hs.config.worker.worker_app is None, - ) + if hs.config.worker.worker_app is None: + self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + db_conn, + "pushers", + "id", + extra_tables=[("deleted_pushers", "stream_id")], + ) + else: + self._pushers_id_gen = SlavedIdTracker( + db_conn, + "pushers", + "id", + extra_tables=[("deleted_pushers", "stream_id")], + ) self.db_pool.updates.register_background_update_handler( "remove_deactivated_pushers", diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 64519587f8..dc6989527e 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -27,6 +27,7 @@ from typing import ( ) from synapse.api.constants import EduTypes +from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import ReceiptsStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( @@ -60,9 +61,6 @@ class ReceiptsWorkerStore(SQLBaseStore): hs: "HomeServer", ): self._instance_name = hs.get_instance_name() - - # In the worker store this is an ID tracker which we overwrite in the non-worker - # class below that is used on the main process. self._receipts_id_gen: AbstractStreamIdTracker if isinstance(database.engine, PostgresEngine): @@ -89,12 +87,14 @@ class ReceiptsWorkerStore(SQLBaseStore): # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets # updated over replication. (Multiple writers are not supported for # SQLite). - self._receipts_id_gen = StreamIdGenerator( - db_conn, - "receipts_linearized", - "stream_id", - is_writer=hs.get_instance_name() in hs.config.worker.writers.receipts, - ) + if hs.get_instance_name() in hs.config.worker.writers.receipts: + self._receipts_id_gen = StreamIdGenerator( + db_conn, "receipts_linearized", "stream_id" + ) + else: + self._receipts_id_gen = SlavedIdTracker( + db_conn, "receipts_linearized", "stream_id" + ) super().__init__(database, db_conn, hs) diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 1af0af1266..2dfe4c0b66 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -186,13 +186,11 @@ class StreamIdGenerator(AbstractStreamIdGenerator): column: str, extra_tables: Iterable[Tuple[str, str]] = (), step: int = 1, - is_writer: bool = True, ) -> None: assert step != 0 self._lock = threading.Lock() self._step: int = step self._current: int = _load_current_id(db_conn, table, column, step) - self._is_writer = is_writer for table, column in extra_tables: self._current = (max if step > 0 else min)( self._current, _load_current_id(db_conn, table, column, step) @@ -206,11 +204,9 @@ class StreamIdGenerator(AbstractStreamIdGenerator): self._unfinished_ids: OrderedDict[int, int] = OrderedDict() def advance(self, instance_name: str, new_id: int) -> None: - # Advance should never be called on a writer instance, only over replication - if self._is_writer: - raise Exception("Replication is not supported by writer StreamIdGenerator") - - self._current = (max if self._step > 0 else min)(self._current, new_id) + # `StreamIdGenerator` should only be used when there is a single writer, + # so replication should never happen. + raise Exception("Replication is not supported by StreamIdGenerator") def get_next(self) -> AsyncContextManager[int]: with self._lock: @@ -253,9 +249,6 @@ class StreamIdGenerator(AbstractStreamIdGenerator): return _AsyncCtxManagerWrapper(manager()) def get_current_token(self) -> int: - if self._is_writer: - return self._current - with self._lock: if self._unfinished_ids: return next(iter(self._unfinished_ids)) - self._step -- cgit 1.5.1 From 882277008c7b43ab26e3445ab94a38aa25ad0965 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 16 Nov 2022 15:01:22 +0000 Subject: Fix background updates failing to add unique indexes on receipts (#14453) As part of the database migration to support threaded receipts, there is a possible window in between `73/08thread_receipts_non_null.sql.postgres` removing the original unique constraints on `receipts_linearized` and `receipts_graph` and the `reeipts_linearized_unique_index` and `receipts_graph_unique_index` background updates from `72/08thread_receipts.sql` completing where the unique constraints on `receipts_linearized` and `receipts_graph` are missing. Any emulated upserts on these tables must therefore be performed with a lock held, otherwise duplicate rows can end up in the tables when there are concurrent emulated upserts. Fix the missing lock. Note that emulated upserts no longer happen by default on sqlite, since the minimum supported version of sqlite supports native upserts by default now. Finally, clean up any duplicate receipts that may have crept in before trying to create the `receipts_graph_unique_index` and `receipts_linearized_unique_index` unique indexes. Signed-off-by: Sean Quah --- changelog.d/14453.bugfix | 1 + synapse/storage/databases/main/receipts.py | 171 ++++++++++++++++++--- tests/storage/databases/main/test_receipts.py | 209 ++++++++++++++++++++++++++ 3 files changed, 357 insertions(+), 24 deletions(-) create mode 100644 changelog.d/14453.bugfix create mode 100644 tests/storage/databases/main/test_receipts.py diff --git a/changelog.d/14453.bugfix b/changelog.d/14453.bugfix new file mode 100644 index 0000000000..4969e5450c --- /dev/null +++ b/changelog.d/14453.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index dc6989527e..fbf27497ec 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -113,24 +113,6 @@ class ReceiptsWorkerStore(SQLBaseStore): prefilled_cache=receipts_stream_prefill, ) - self.db_pool.updates.register_background_index_update( - "receipts_linearized_unique_index", - index_name="receipts_linearized_unique_index", - table="receipts_linearized", - columns=["room_id", "receipt_type", "user_id"], - where_clause="thread_id IS NULL", - unique=True, - ) - - self.db_pool.updates.register_background_index_update( - "receipts_graph_unique_index", - index_name="receipts_graph_unique_index", - table="receipts_graph", - columns=["room_id", "receipt_type", "user_id"], - where_clause="thread_id IS NULL", - unique=True, - ) - def get_max_receipt_stream_id(self) -> int: """Get the current max stream ID for receipts stream""" return self._receipts_id_gen.get_current_token() @@ -702,9 +684,6 @@ class ReceiptsWorkerStore(SQLBaseStore): "data": json_encoder.encode(data), }, where_clause=where_clause, - # receipts_linearized has a unique constraint on - # (user_id, room_id, receipt_type), so no need to lock - lock=False, ) return rx_ts @@ -862,14 +841,13 @@ class ReceiptsWorkerStore(SQLBaseStore): "data": json_encoder.encode(data), }, where_clause=where_clause, - # receipts_graph has a unique constraint on - # (user_id, room_id, receipt_type), so no need to lock - lock=False, ) class ReceiptsBackgroundUpdateStore(SQLBaseStore): POPULATE_RECEIPT_EVENT_STREAM_ORDERING = "populate_event_stream_ordering" + RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME = "receipts_linearized_unique_index" + RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME = "receipts_graph_unique_index" def __init__( self, @@ -883,6 +861,14 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore): self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING, self._populate_receipt_event_stream_ordering, ) + self.db_pool.updates.register_background_update_handler( + self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME, + self._background_receipts_linearized_unique_index, + ) + self.db_pool.updates.register_background_update_handler( + self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME, + self._background_receipts_graph_unique_index, + ) async def _populate_receipt_event_stream_ordering( self, progress: JsonDict, batch_size: int @@ -938,6 +924,143 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore): return batch_size + async def _create_receipts_index(self, index_name: str, table: str) -> None: + """Adds a unique index on `(room_id, receipt_type, user_id)` to the given + receipts table, for non-thread receipts.""" + + def _create_index(conn: LoggingDatabaseConnection) -> None: + conn.rollback() + + # we have to set autocommit, because postgres refuses to + # CREATE INDEX CONCURRENTLY without it. + if isinstance(self.database_engine, PostgresEngine): + conn.set_session(autocommit=True) + + try: + c = conn.cursor() + + # Now that the duplicates are gone, we can create the index. + concurrently = ( + "CONCURRENTLY" + if isinstance(self.database_engine, PostgresEngine) + else "" + ) + sql = f""" + CREATE UNIQUE INDEX {concurrently} {index_name} + ON {table}(room_id, receipt_type, user_id) + WHERE thread_id IS NULL + """ + c.execute(sql) + finally: + if isinstance(self.database_engine, PostgresEngine): + conn.set_session(autocommit=False) + + await self.db_pool.runWithConnection(_create_index) + + async def _background_receipts_linearized_unique_index( + self, progress: dict, batch_size: int + ) -> int: + """Removes duplicate receipts and adds a unique index on + `(room_id, receipt_type, user_id)` to `receipts_linearized`, for non-thread + receipts.""" + + def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None: + # Identify any duplicate receipts arising from + # https://github.com/matrix-org/synapse/issues/14406. + # We expect the following query to use the per-thread receipt index and take + # less than a minute. + sql = """ + SELECT MAX(stream_id), room_id, receipt_type, user_id + FROM receipts_linearized + WHERE thread_id IS NULL + GROUP BY room_id, receipt_type, user_id + HAVING COUNT(*) > 1 + """ + txn.execute(sql) + duplicate_keys = cast(List[Tuple[int, str, str, str]], list(txn)) + + # Then remove duplicate receipts, keeping the one with the highest + # `stream_id`. There should only be a single receipt with any given + # `stream_id`. + for max_stream_id, room_id, receipt_type, user_id in duplicate_keys: + sql = """ + DELETE FROM receipts_linearized + WHERE + room_id = ? AND + receipt_type = ? AND + user_id = ? AND + thread_id IS NULL AND + stream_id < ? + """ + txn.execute(sql, (room_id, receipt_type, user_id, max_stream_id)) + + await self.db_pool.runInteraction( + self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME, + _remote_duplicate_receipts_txn, + ) + + await self._create_receipts_index( + "receipts_linearized_unique_index", + "receipts_linearized", + ) + + await self.db_pool.updates._end_background_update( + self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME + ) + + return 1 + + async def _background_receipts_graph_unique_index( + self, progress: dict, batch_size: int + ) -> int: + """Removes duplicate receipts and adds a unique index on + `(room_id, receipt_type, user_id)` to `receipts_graph`, for non-thread + receipts.""" + + def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None: + # Identify any duplicate receipts arising from + # https://github.com/matrix-org/synapse/issues/14406. + # We expect the following query to use the per-thread receipt index and take + # less than a minute. + sql = """ + SELECT room_id, receipt_type, user_id FROM receipts_graph + WHERE thread_id IS NULL + GROUP BY room_id, receipt_type, user_id + HAVING COUNT(*) > 1 + """ + txn.execute(sql) + duplicate_keys = cast(List[Tuple[str, str, str]], list(txn)) + + # Then remove all duplicate receipts. + # We could be clever and try to keep the latest receipt out of every set of + # duplicates, but it's far simpler to remove them all. + for room_id, receipt_type, user_id in duplicate_keys: + sql = """ + DELETE FROM receipts_graph + WHERE + room_id = ? AND + receipt_type = ? AND + user_id = ? AND + thread_id IS NULL + """ + txn.execute(sql, (room_id, receipt_type, user_id)) + + await self.db_pool.runInteraction( + self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME, + _remote_duplicate_receipts_txn, + ) + + await self._create_receipts_index( + "receipts_graph_unique_index", + "receipts_graph", + ) + + await self.db_pool.updates._end_background_update( + self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME + ) + + return 1 + class ReceiptsStore(ReceiptsWorkerStore, ReceiptsBackgroundUpdateStore): pass diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py new file mode 100644 index 0000000000..c4f12d81d7 --- /dev/null +++ b/tests/storage/databases/main/test_receipts.py @@ -0,0 +1,209 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, Optional, Sequence, Tuple + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.rest import admin +from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.storage.database import LoggingTransaction +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + + +class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase): + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): + self.store = hs.get_datastores().main + self.user_id = self.register_user("foo", "pass") + self.token = self.login("foo", "pass") + self.room_id = self.helper.create_room_as(self.user_id, tok=self.token) + self.other_room_id = self.helper.create_room_as(self.user_id, tok=self.token) + + def _test_background_receipts_unique_index( + self, + update_name: str, + index_name: str, + table: str, + receipts: Dict[Tuple[str, str, str], Sequence[Dict[str, Any]]], + expected_unique_receipts: Dict[Tuple[str, str, str], Optional[Dict[str, Any]]], + ): + """Test that the background update to uniqueify non-thread receipts in + the given receipts table works properly. + + Args: + update_name: The name of the background update to test. + index_name: The name of the index that the background update creates. + table: The table of receipts that the background update fixes. + receipts: The test data containing duplicate receipts. + A list of receipt rows to insert, grouped by + `(room_id, receipt_type, user_id)`. + expected_unique_receipts: A dictionary of `(room_id, receipt_type, user_id)` + keys and expected receipt key-values after duplicate receipts have been + removed. + """ + # First, undo the background update. + def drop_receipts_unique_index(txn: LoggingTransaction) -> None: + txn.execute(f"DROP INDEX IF EXISTS {index_name}") + + self.get_success( + self.store.db_pool.runInteraction( + "drop_receipts_unique_index", + drop_receipts_unique_index, + ) + ) + + # Populate the receipts table, including duplicates. + for (room_id, receipt_type, user_id), rows in receipts.items(): + for row in rows: + self.get_success( + self.store.db_pool.simple_insert( + table, + { + "room_id": room_id, + "receipt_type": receipt_type, + "user_id": user_id, + "thread_id": None, + "data": "{}", + **row, + }, + ) + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": update_name, + "progress_json": "{}", + }, + ) + ) + + self.store.db_pool.updates._all_done = False + + self.wait_for_background_updates() + + # Check that the remaining receipts match expectations. + for ( + room_id, + receipt_type, + user_id, + ), expected_row in expected_unique_receipts.items(): + # Include the receipt key in the returned columns, for more informative + # assertion messages. + columns = ["room_id", "receipt_type", "user_id"] + if expected_row is not None: + columns += expected_row.keys() + + rows = self.get_success( + self.store.db_pool.simple_select_list( + table=table, + keyvalues={ + "room_id": room_id, + "receipt_type": receipt_type, + "user_id": user_id, + # `simple_select_onecol` does not support NULL filters, + # so skip the filter on `thread_id`. + }, + retcols=columns, + desc="get_receipt", + ) + ) + + if expected_row is not None: + self.assertEqual( + len(rows), + 1, + f"Background update did not leave behind latest receipt in {table}", + ) + self.assertEqual( + rows[0], + { + "room_id": room_id, + "receipt_type": receipt_type, + "user_id": user_id, + **expected_row, + }, + ) + else: + self.assertEqual( + len(rows), + 0, + f"Background update did not remove all duplicate receipts from {table}", + ) + + def test_background_receipts_linearized_unique_index(self): + """Test that the background update to uniqueify non-thread receipts in + `receipts_linearized` works properly. + """ + self._test_background_receipts_unique_index( + "receipts_linearized_unique_index", + "receipts_linearized_unique_index", + "receipts_linearized", + receipts={ + (self.room_id, "m.read", self.user_id): [ + {"stream_id": 5, "event_id": "$some_event"}, + {"stream_id": 6, "event_id": "$some_event"}, + ], + (self.other_room_id, "m.read", self.user_id): [ + {"stream_id": 7, "event_id": "$some_event"} + ], + }, + expected_unique_receipts={ + (self.room_id, "m.read", self.user_id): {"stream_id": 6}, + (self.other_room_id, "m.read", self.user_id): {"stream_id": 7}, + }, + ) + + def test_background_receipts_graph_unique_index(self): + """Test that the background update to uniqueify non-thread receipts in + `receipts_graph` works properly. + """ + self._test_background_receipts_unique_index( + "receipts_graph_unique_index", + "receipts_graph_unique_index", + "receipts_graph", + receipts={ + (self.room_id, "m.read", self.user_id): [ + { + "event_ids": '["$some_event"]', + }, + { + "event_ids": '["$some_event"]', + }, + ], + (self.other_room_id, "m.read", self.user_id): [ + { + "event_ids": '["$some_event"]', + } + ], + }, + expected_unique_receipts={ + (self.room_id, "m.read", self.user_id): None, + (self.other_room_id, "m.read", self.user_id): { + "event_ids": '["$some_event"]' + }, + }, + ) -- cgit 1.5.1 From 1a8cd8bec0e78121a3d85f20526297f02c4d7412 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 16 Nov 2022 15:11:06 +0000 Subject: 1.72.0rc1 --- CHANGES.md | 95 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/12947.doc | 1 - changelog.d/13812.misc | 1 - changelog.d/14128.misc | 1 - changelog.d/14197.docker | 1 - changelog.d/14260.feature | 1 - changelog.d/14292.bugfix | 1 - changelog.d/14293.doc | 1 - changelog.d/14294.docker | 1 - changelog.d/14297.doc | 1 - changelog.d/14313.misc | 1 - changelog.d/14324.misc | 1 - changelog.d/14329.misc | 1 - changelog.d/14339.misc | 1 - changelog.d/14340.misc | 1 - changelog.d/14346.misc | 1 - changelog.d/14347.bugfix | 1 - changelog.d/14351.misc | 1 - changelog.d/14356.bugfix | 1 - changelog.d/14361.bugfix | 1 - changelog.d/14364.bugfix | 1 - changelog.d/14369.bugfix | 1 - changelog.d/14370.doc | 1 - changelog.d/14374.bugfix | 1 - changelog.d/14375.misc | 1 - changelog.d/14378.misc | 1 - changelog.d/14379.misc | 1 - changelog.d/14380.misc | 1 - changelog.d/14381.misc | 1 - changelog.d/14382.misc | 1 - changelog.d/14383.misc | 1 - changelog.d/14384.misc | 1 - changelog.d/14385.misc | 1 - changelog.d/14386.misc | 1 - changelog.d/14392.removal | 1 - changelog.d/14394.misc | 1 - changelog.d/14396.feature | 1 - changelog.d/14397.removal | 1 - changelog.d/14398.misc | 1 - changelog.d/14405.feature | 1 - changelog.d/14409.bugfix | 1 - changelog.d/14410.misc | 1 - changelog.d/14411.misc | 1 - changelog.d/14414.doc | 1 - changelog.d/14417.misc | 1 - changelog.d/14424.misc | 1 - changelog.d/14425.misc | 1 - changelog.d/14426.misc | 1 - changelog.d/14427.misc | 1 - changelog.d/14428.misc | 1 - changelog.d/14429.misc | 1 - changelog.d/14430.misc | 1 - changelog.d/14431.misc | 1 - changelog.d/14433.misc | 1 - changelog.d/14434.misc | 1 - changelog.d/14436.misc | 1 - changelog.d/14437.misc | 1 - changelog.d/14438.misc | 1 - changelog.d/14439.misc | 1 - changelog.d/14440.misc | 1 - changelog.d/14442.feature | 1 - changelog.d/14448.bugfix | 1 - changelog.d/14451.misc | 1 - changelog.d/14453.bugfix | 1 - changelog.d/14455.misc | 1 - changelog.d/14461.misc | 1 - debian/changelog | 6 +++ pyproject.toml | 2 +- 68 files changed, 102 insertions(+), 66 deletions(-) delete mode 100644 changelog.d/12947.doc delete mode 100644 changelog.d/13812.misc delete mode 100644 changelog.d/14128.misc delete mode 100644 changelog.d/14197.docker delete mode 100644 changelog.d/14260.feature delete mode 100644 changelog.d/14292.bugfix delete mode 100644 changelog.d/14293.doc delete mode 100644 changelog.d/14294.docker delete mode 100644 changelog.d/14297.doc delete mode 100644 changelog.d/14313.misc delete mode 100644 changelog.d/14324.misc delete mode 100644 changelog.d/14329.misc delete mode 100644 changelog.d/14339.misc delete mode 100644 changelog.d/14340.misc delete mode 100644 changelog.d/14346.misc delete mode 100644 changelog.d/14347.bugfix delete mode 100644 changelog.d/14351.misc delete mode 100644 changelog.d/14356.bugfix delete mode 100644 changelog.d/14361.bugfix delete mode 100644 changelog.d/14364.bugfix delete mode 100644 changelog.d/14369.bugfix delete mode 100644 changelog.d/14370.doc delete mode 100644 changelog.d/14374.bugfix delete mode 100644 changelog.d/14375.misc delete mode 100644 changelog.d/14378.misc delete mode 100644 changelog.d/14379.misc delete mode 100644 changelog.d/14380.misc delete mode 100644 changelog.d/14381.misc delete mode 100644 changelog.d/14382.misc delete mode 100644 changelog.d/14383.misc delete mode 100644 changelog.d/14384.misc delete mode 100644 changelog.d/14385.misc delete mode 100644 changelog.d/14386.misc delete mode 100644 changelog.d/14392.removal delete mode 100644 changelog.d/14394.misc delete mode 100644 changelog.d/14396.feature delete mode 100644 changelog.d/14397.removal delete mode 100644 changelog.d/14398.misc delete mode 100644 changelog.d/14405.feature delete mode 100644 changelog.d/14409.bugfix delete mode 100644 changelog.d/14410.misc delete mode 100644 changelog.d/14411.misc delete mode 100644 changelog.d/14414.doc delete mode 100644 changelog.d/14417.misc delete mode 100644 changelog.d/14424.misc delete mode 100644 changelog.d/14425.misc delete mode 100644 changelog.d/14426.misc delete mode 100644 changelog.d/14427.misc delete mode 100644 changelog.d/14428.misc delete mode 100644 changelog.d/14429.misc delete mode 100644 changelog.d/14430.misc delete mode 100644 changelog.d/14431.misc delete mode 100644 changelog.d/14433.misc delete mode 100644 changelog.d/14434.misc delete mode 100644 changelog.d/14436.misc delete mode 100644 changelog.d/14437.misc delete mode 100644 changelog.d/14438.misc delete mode 100644 changelog.d/14439.misc delete mode 100644 changelog.d/14440.misc delete mode 100644 changelog.d/14442.feature delete mode 100644 changelog.d/14448.bugfix delete mode 100644 changelog.d/14451.misc delete mode 100644 changelog.d/14453.bugfix delete mode 100644 changelog.d/14455.misc delete mode 100644 changelog.d/14461.misc diff --git a/CHANGES.md b/CHANGES.md index 078a36eac9..05cb7b1267 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,98 @@ +Synapse 1.72.0rc1 (2022-11-16) +============================== + +Features +-------- + +- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260)) +- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396)) +- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405)) +- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442)) + + +Bugfixes +-------- + +- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292)) +- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347)) +- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356)) +- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361)) +- Fix refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364)) +- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369)) +- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374)) +- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409)) +- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448)) +- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453)) + + +Updates to the Docker image +--------------------------- + +- Add all Stream Writer worker types to configure_workers_and_start.py. ([\#14197](https://github.com/matrix-org/synapse/issues/14197)) +- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294)) + + +Improved Documentation +---------------------- + +- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370)) +- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293)) +- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297)) +- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414)) + + +Deprecations and Removals +------------------------- + +- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397)) + + +Internal Changes +---------------- + +- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812)) +- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455)) +- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313)) +- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324)) +- Bump flake8-bugbear from 22.9.23 to 22.10.27. ([\#14329](https://github.com/matrix-org/synapse/issues/14329)) +- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339)) +- Bump twisted from 22.8.0 to 22.10.0. ([\#14340](https://github.com/matrix-org/synapse/issues/14340)) +- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346)) +- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351)) +- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375)) +- Bump dawidd6/action-download-artifact from 2.15.0 to 2.24.0. ([\#14378](https://github.com/matrix-org/synapse/issues/14378)) +- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14379](https://github.com/matrix-org/synapse/issues/14379)) +- Bump regex from 1.6.0 to 1.7.0. ([\#14380](https://github.com/matrix-org/synapse/issues/14380)) +- Bump pyo3 from 0.17.2 to 0.17.3. ([\#14381](https://github.com/matrix-org/synapse/issues/14381)) +- Bump types-setuptools from 65.5.0.1 to 65.5.0.2. ([\#14382](https://github.com/matrix-org/synapse/issues/14382)) +- Bump pillow from 9.2.0 to 9.3.0. ([\#14383](https://github.com/matrix-org/synapse/issues/14383)) +- Bump cryptography from 36.0.1 to 38.0.3. ([\#14384](https://github.com/matrix-org/synapse/issues/14384)) +- Bump types-pyyaml from 6.0.12 to 6.0.12.1. ([\#14385](https://github.com/matrix-org/synapse/issues/14385)) +- Bump types-jsonschema from 4.4.6 to 4.17.0.0. ([\#14386](https://github.com/matrix-org/synapse/issues/14386)) +- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394)) +- Bump dawidd6/action-download-artifact from 2.24.0 to 2.24.1. ([\#14398](https://github.com/matrix-org/synapse/issues/14398)) +- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410)) +- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411)) +- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417)) +- Bump dawidd6/action-download-artifact from 2.24.1 to 2.24.2. ([\#14424](https://github.com/matrix-org/synapse/issues/14424)) +- Bump actions/upload-artifact from 2 to 3. ([\#14425](https://github.com/matrix-org/synapse/issues/14425)) +- Bump blake2 from 0.10.4 to 0.10.5. ([\#14426](https://github.com/matrix-org/synapse/issues/14426)) +- Bump attrs from 21.4.0 to 22.1.0. ([\#14427](https://github.com/matrix-org/synapse/issues/14427)) +- Bump types-pyyaml from 6.0.12.1 to 6.0.12.2. ([\#14428](https://github.com/matrix-org/synapse/issues/14428)) +- Bump gitpython from 3.1.27 to 3.1.29. ([\#14429](https://github.com/matrix-org/synapse/issues/14429)) +- Bump types-jsonschema from 4.17.0.0 to 4.17.0.1. ([\#14430](https://github.com/matrix-org/synapse/issues/14430)) +- Bump flake8 from 4.0.1 to 5.0.4. ([\#14431](https://github.com/matrix-org/synapse/issues/14431)) +- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433)) +- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434)) +- Bump types-setuptools from 65.5.0.2 to 65.5.0.3. ([\#14436](https://github.com/matrix-org/synapse/issues/14436)) +- Bump types-pyopenssl from 22.0.10 to 22.1.0.2. ([\#14437](https://github.com/matrix-org/synapse/issues/14437)) +- Bump flake8-comprehensions from 3.8.0 to 3.10.1. ([\#14438](https://github.com/matrix-org/synapse/issues/14438)) +- Bump jsonschema from 4.16.0 to 4.17.0. ([\#14439](https://github.com/matrix-org/synapse/issues/14439)) +- Bump canonicaljson from 1.6.3 to 1.6.4. ([\#14440](https://github.com/matrix-org/synapse/issues/14440)) +- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451)) +- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461)) + + Synapse 1.71.0 (2022-11-08) =========================== diff --git a/changelog.d/12947.doc b/changelog.d/12947.doc deleted file mode 100644 index 5319e9ad4f..0000000000 --- a/changelog.d/12947.doc +++ /dev/null @@ -1 +0,0 @@ -Upload documentation PRs to Netlify. diff --git a/changelog.d/13812.misc b/changelog.d/13812.misc deleted file mode 100644 index 667fdee2b7..0000000000 --- a/changelog.d/13812.misc +++ /dev/null @@ -1 +0,0 @@ -Run unit tests against Python 3.11. diff --git a/changelog.d/14128.misc b/changelog.d/14128.misc deleted file mode 100644 index 29168ef955..0000000000 --- a/changelog.d/14128.misc +++ /dev/null @@ -1 +0,0 @@ -Add TLS support for generic worker endpoints. diff --git a/changelog.d/14197.docker b/changelog.d/14197.docker deleted file mode 100644 index 529ccd99c5..0000000000 --- a/changelog.d/14197.docker +++ /dev/null @@ -1 +0,0 @@ -Add all Stream Writer worker types to configure_workers_and_start.py. diff --git a/changelog.d/14260.feature b/changelog.d/14260.feature deleted file mode 100644 index 102dc7b3e0..0000000000 --- a/changelog.d/14260.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. diff --git a/changelog.d/14292.bugfix b/changelog.d/14292.bugfix deleted file mode 100644 index 4ed92f5cf2..0000000000 --- a/changelog.d/14292.bugfix +++ /dev/null @@ -1 +0,0 @@ -Faster joins: do not block creation of or queries for room aliases during the resync. diff --git a/changelog.d/14293.doc b/changelog.d/14293.doc deleted file mode 100644 index d6410421e7..0000000000 --- a/changelog.d/14293.doc +++ /dev/null @@ -1 +0,0 @@ -Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. diff --git a/changelog.d/14294.docker b/changelog.d/14294.docker deleted file mode 100644 index 1489470408..0000000000 --- a/changelog.d/14294.docker +++ /dev/null @@ -1 +0,0 @@ -Remove references to legacy worker types in the multi-worker Dockerfile. diff --git a/changelog.d/14297.doc b/changelog.d/14297.doc deleted file mode 100644 index e418c2a18a..0000000000 --- a/changelog.d/14297.doc +++ /dev/null @@ -1 +0,0 @@ -Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). diff --git a/changelog.d/14313.misc b/changelog.d/14313.misc deleted file mode 100644 index 755527daef..0000000000 --- a/changelog.d/14313.misc +++ /dev/null @@ -1 +0,0 @@ -Switch to a maintained action for installing Rust in CI. diff --git a/changelog.d/14324.misc b/changelog.d/14324.misc deleted file mode 100644 index 54d8198a8f..0000000000 --- a/changelog.d/14324.misc +++ /dev/null @@ -1 +0,0 @@ -Add override ability to `complement.sh` command line script to request certain types of workers. diff --git a/changelog.d/14329.misc b/changelog.d/14329.misc deleted file mode 100644 index 2f6bbd3af7..0000000000 --- a/changelog.d/14329.misc +++ /dev/null @@ -1 +0,0 @@ -Bump flake8-bugbear from 22.9.23 to 22.10.27. diff --git a/changelog.d/14339.misc b/changelog.d/14339.misc deleted file mode 100644 index 3761d453a8..0000000000 --- a/changelog.d/14339.misc +++ /dev/null @@ -1 +0,0 @@ -Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. diff --git a/changelog.d/14340.misc b/changelog.d/14340.misc deleted file mode 100644 index 4e01bf777c..0000000000 --- a/changelog.d/14340.misc +++ /dev/null @@ -1 +0,0 @@ -Bump twisted from 22.8.0 to 22.10.0. diff --git a/changelog.d/14346.misc b/changelog.d/14346.misc deleted file mode 100644 index 9833b0733a..0000000000 --- a/changelog.d/14346.misc +++ /dev/null @@ -1 +0,0 @@ -Concisely log a failure to resolve state due to missing `prev_events`. diff --git a/changelog.d/14347.bugfix b/changelog.d/14347.bugfix deleted file mode 100644 index 91975757ae..0000000000 --- a/changelog.d/14347.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. diff --git a/changelog.d/14351.misc b/changelog.d/14351.misc deleted file mode 100644 index 4a3e053c08..0000000000 --- a/changelog.d/14351.misc +++ /dev/null @@ -1 +0,0 @@ -Use a maintained Github action to install Rust. diff --git a/changelog.d/14356.bugfix b/changelog.d/14356.bugfix deleted file mode 100644 index 288d58a540..0000000000 --- a/changelog.d/14356.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. diff --git a/changelog.d/14361.bugfix b/changelog.d/14361.bugfix deleted file mode 100644 index 33ba1d92af..0000000000 --- a/changelog.d/14361.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. \ No newline at end of file diff --git a/changelog.d/14364.bugfix b/changelog.d/14364.bugfix deleted file mode 100644 index 514bf859bb..0000000000 --- a/changelog.d/14364.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. diff --git a/changelog.d/14369.bugfix b/changelog.d/14369.bugfix deleted file mode 100644 index e6709f4eec..0000000000 --- a/changelog.d/14369.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. diff --git a/changelog.d/14370.doc b/changelog.d/14370.doc deleted file mode 100644 index 5319e9ad4f..0000000000 --- a/changelog.d/14370.doc +++ /dev/null @@ -1 +0,0 @@ -Upload documentation PRs to Netlify. diff --git a/changelog.d/14374.bugfix b/changelog.d/14374.bugfix deleted file mode 100644 index 8366cfbf8a..0000000000 --- a/changelog.d/14374.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. diff --git a/changelog.d/14375.misc b/changelog.d/14375.misc deleted file mode 100644 index d0369b9b8c..0000000000 --- a/changelog.d/14375.misc +++ /dev/null @@ -1 +0,0 @@ -Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/14378.misc b/changelog.d/14378.misc deleted file mode 100644 index 3d397cdf00..0000000000 --- a/changelog.d/14378.misc +++ /dev/null @@ -1 +0,0 @@ -Bump dawidd6/action-download-artifact from 2.15.0 to 2.24.0. diff --git a/changelog.d/14379.misc b/changelog.d/14379.misc deleted file mode 100644 index bcda89ff92..0000000000 --- a/changelog.d/14379.misc +++ /dev/null @@ -1 +0,0 @@ -Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. diff --git a/changelog.d/14380.misc b/changelog.d/14380.misc deleted file mode 100644 index 078fb6cf95..0000000000 --- a/changelog.d/14380.misc +++ /dev/null @@ -1 +0,0 @@ -Bump regex from 1.6.0 to 1.7.0. diff --git a/changelog.d/14381.misc b/changelog.d/14381.misc deleted file mode 100644 index 1efeb6f378..0000000000 --- a/changelog.d/14381.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pyo3 from 0.17.2 to 0.17.3. diff --git a/changelog.d/14382.misc b/changelog.d/14382.misc deleted file mode 100644 index e648f3fcc3..0000000000 --- a/changelog.d/14382.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-setuptools from 65.5.0.1 to 65.5.0.2. diff --git a/changelog.d/14383.misc b/changelog.d/14383.misc deleted file mode 100644 index 44b77488d9..0000000000 --- a/changelog.d/14383.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pillow from 9.2.0 to 9.3.0. diff --git a/changelog.d/14384.misc b/changelog.d/14384.misc deleted file mode 100644 index 44d3d03183..0000000000 --- a/changelog.d/14384.misc +++ /dev/null @@ -1 +0,0 @@ -Bump cryptography from 36.0.1 to 38.0.3. diff --git a/changelog.d/14385.misc b/changelog.d/14385.misc deleted file mode 100644 index 960ad9b0cd..0000000000 --- a/changelog.d/14385.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pyyaml from 6.0.12 to 6.0.12.1. diff --git a/changelog.d/14386.misc b/changelog.d/14386.misc deleted file mode 100644 index 03c2af8cdb..0000000000 --- a/changelog.d/14386.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-jsonschema from 4.4.6 to 4.17.0.0. diff --git a/changelog.d/14392.removal b/changelog.d/14392.removal deleted file mode 100644 index e96b3de2bd..0000000000 --- a/changelog.d/14392.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for PostgreSQL 10. diff --git a/changelog.d/14394.misc b/changelog.d/14394.misc deleted file mode 100644 index 8b816eb0a3..0000000000 --- a/changelog.d/14394.misc +++ /dev/null @@ -1 +0,0 @@ -Test against PostgreSQL 15 in CI. diff --git a/changelog.d/14396.feature b/changelog.d/14396.feature deleted file mode 100644 index e875916885..0000000000 --- a/changelog.d/14396.feature +++ /dev/null @@ -1 +0,0 @@ -Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). diff --git a/changelog.d/14397.removal b/changelog.d/14397.removal deleted file mode 100644 index e96b3de2bd..0000000000 --- a/changelog.d/14397.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for PostgreSQL 10. diff --git a/changelog.d/14398.misc b/changelog.d/14398.misc deleted file mode 100644 index c18fb6d2c6..0000000000 --- a/changelog.d/14398.misc +++ /dev/null @@ -1 +0,0 @@ -Bump dawidd6/action-download-artifact from 2.24.0 to 2.24.1. diff --git a/changelog.d/14405.feature b/changelog.d/14405.feature deleted file mode 100644 index d3ba89b597..0000000000 --- a/changelog.d/14405.feature +++ /dev/null @@ -1 +0,0 @@ -Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. diff --git a/changelog.d/14409.bugfix b/changelog.d/14409.bugfix deleted file mode 100644 index f720700653..0000000000 --- a/changelog.d/14409.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. diff --git a/changelog.d/14410.misc b/changelog.d/14410.misc deleted file mode 100644 index f085a8bfb2..0000000000 --- a/changelog.d/14410.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unreachable code. diff --git a/changelog.d/14411.misc b/changelog.d/14411.misc deleted file mode 100644 index f5cca5c833..0000000000 --- a/changelog.d/14411.misc +++ /dev/null @@ -1 +0,0 @@ -Clean-up event persistence code. diff --git a/changelog.d/14414.doc b/changelog.d/14414.doc deleted file mode 100644 index 23dd963d47..0000000000 --- a/changelog.d/14414.doc +++ /dev/null @@ -1 +0,0 @@ -Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. diff --git a/changelog.d/14417.misc b/changelog.d/14417.misc deleted file mode 100644 index 7527fe97c2..0000000000 --- a/changelog.d/14417.misc +++ /dev/null @@ -1 +0,0 @@ -Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. \ No newline at end of file diff --git a/changelog.d/14424.misc b/changelog.d/14424.misc deleted file mode 100644 index 8b2c88aa33..0000000000 --- a/changelog.d/14424.misc +++ /dev/null @@ -1 +0,0 @@ -Bump dawidd6/action-download-artifact from 2.24.1 to 2.24.2. diff --git a/changelog.d/14425.misc b/changelog.d/14425.misc deleted file mode 100644 index b5ce5898c2..0000000000 --- a/changelog.d/14425.misc +++ /dev/null @@ -1 +0,0 @@ -Bump actions/upload-artifact from 2 to 3. diff --git a/changelog.d/14426.misc b/changelog.d/14426.misc deleted file mode 100644 index f78ec0ab7b..0000000000 --- a/changelog.d/14426.misc +++ /dev/null @@ -1 +0,0 @@ -Bump blake2 from 0.10.4 to 0.10.5. diff --git a/changelog.d/14427.misc b/changelog.d/14427.misc deleted file mode 100644 index f870b19e75..0000000000 --- a/changelog.d/14427.misc +++ /dev/null @@ -1 +0,0 @@ -Bump attrs from 21.4.0 to 22.1.0. diff --git a/changelog.d/14428.misc b/changelog.d/14428.misc deleted file mode 100644 index 0582ebcf5b..0000000000 --- a/changelog.d/14428.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pyyaml from 6.0.12.1 to 6.0.12.2. diff --git a/changelog.d/14429.misc b/changelog.d/14429.misc deleted file mode 100644 index 34e767d45a..0000000000 --- a/changelog.d/14429.misc +++ /dev/null @@ -1 +0,0 @@ -Bump gitpython from 3.1.27 to 3.1.29. diff --git a/changelog.d/14430.misc b/changelog.d/14430.misc deleted file mode 100644 index 109a128618..0000000000 --- a/changelog.d/14430.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-jsonschema from 4.17.0.0 to 4.17.0.1. diff --git a/changelog.d/14431.misc b/changelog.d/14431.misc deleted file mode 100644 index 15b6d4f2ee..0000000000 --- a/changelog.d/14431.misc +++ /dev/null @@ -1 +0,0 @@ -Bump flake8 from 4.0.1 to 5.0.4. diff --git a/changelog.d/14433.misc b/changelog.d/14433.misc deleted file mode 100644 index 08a350b13b..0000000000 --- a/changelog.d/14433.misc +++ /dev/null @@ -1 +0,0 @@ -Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. diff --git a/changelog.d/14434.misc b/changelog.d/14434.misc deleted file mode 100644 index 75d24cd73d..0000000000 --- a/changelog.d/14434.misc +++ /dev/null @@ -1 +0,0 @@ -Make Dependabot only bump Rust deps in the lock file. diff --git a/changelog.d/14436.misc b/changelog.d/14436.misc deleted file mode 100644 index 2ec3c3d947..0000000000 --- a/changelog.d/14436.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-setuptools from 65.5.0.2 to 65.5.0.3. diff --git a/changelog.d/14437.misc b/changelog.d/14437.misc deleted file mode 100644 index 79fa202a8d..0000000000 --- a/changelog.d/14437.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pyopenssl from 22.0.10 to 22.1.0.2. diff --git a/changelog.d/14438.misc b/changelog.d/14438.misc deleted file mode 100644 index 24a7496b74..0000000000 --- a/changelog.d/14438.misc +++ /dev/null @@ -1 +0,0 @@ -Bump flake8-comprehensions from 3.8.0 to 3.10.1. diff --git a/changelog.d/14439.misc b/changelog.d/14439.misc deleted file mode 100644 index f7c9a65cf9..0000000000 --- a/changelog.d/14439.misc +++ /dev/null @@ -1 +0,0 @@ -Bump jsonschema from 4.16.0 to 4.17.0. diff --git a/changelog.d/14440.misc b/changelog.d/14440.misc deleted file mode 100644 index 7a7751ec70..0000000000 --- a/changelog.d/14440.misc +++ /dev/null @@ -1 +0,0 @@ -Bump canonicaljson from 1.6.3 to 1.6.4. diff --git a/changelog.d/14442.feature b/changelog.d/14442.feature deleted file mode 100644 index 917e7edfb3..0000000000 --- a/changelog.d/14442.feature +++ /dev/null @@ -1 +0,0 @@ -Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. diff --git a/changelog.d/14448.bugfix b/changelog.d/14448.bugfix deleted file mode 100644 index 4bf1c183f6..0000000000 --- a/changelog.d/14448.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. diff --git a/changelog.d/14451.misc b/changelog.d/14451.misc deleted file mode 100644 index ab563195b4..0000000000 --- a/changelog.d/14451.misc +++ /dev/null @@ -1 +0,0 @@ -Fix an incorrect stub return type for `PushRuleEvaluator.run`. \ No newline at end of file diff --git a/changelog.d/14453.bugfix b/changelog.d/14453.bugfix deleted file mode 100644 index 4969e5450c..0000000000 --- a/changelog.d/14453.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. diff --git a/changelog.d/14455.misc b/changelog.d/14455.misc deleted file mode 100644 index 29168ef955..0000000000 --- a/changelog.d/14455.misc +++ /dev/null @@ -1 +0,0 @@ -Add TLS support for generic worker endpoints. diff --git a/changelog.d/14461.misc b/changelog.d/14461.misc deleted file mode 100644 index cdfa577a4c..0000000000 --- a/changelog.d/14461.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of `/context` in large rooms. diff --git a/debian/changelog b/debian/changelog index 74ae2870bb..57d7b18078 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.72.0~rc1) stable; urgency=medium + + * New Synapse release 1.72.0rc1. + + -- Synapse Packaging team Wed, 16 Nov 2022 15:10:59 +0000 + matrix-synapse-py3 (1.71.0) stable; urgency=medium * New Synapse release 1.71.0. diff --git a/pyproject.toml b/pyproject.toml index e07a208e67..7695ebc252 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.71.0" +version = "1.72.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From d8cc86eff484b6f570f55a5badb337080c6e4dcd Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Nov 2022 10:25:24 -0500 Subject: Remove redundant types from comments. (#14412) Remove type hints from comments which have been added as Python type hints. This helps avoid drift between comments and reality, as well as removing redundant information. Also adds some missing type hints which were simple to fill in. --- changelog.d/14412.misc | 1 + synapse/api/errors.py | 2 +- synapse/config/logger.py | 5 ++- synapse/crypto/keyring.py | 9 +++-- synapse/events/__init__.py | 3 +- synapse/federation/transport/client.py | 11 +++--- synapse/federation/transport/server/_base.py | 4 +-- synapse/handlers/e2e_keys.py | 2 +- synapse/handlers/e2e_room_keys.py | 5 +-- synapse/handlers/federation.py | 4 +-- synapse/handlers/identity.py | 2 +- synapse/handlers/oidc.py | 2 +- synapse/handlers/presence.py | 2 +- synapse/handlers/saml.py | 4 +-- synapse/http/additional_resource.py | 3 +- synapse/http/federation/matrix_federation_agent.py | 9 +++-- synapse/http/matrixfederationclient.py | 3 +- synapse/http/proxyagent.py | 20 +++++------ synapse/http/server.py | 2 +- synapse/http/site.py | 2 +- synapse/logging/context.py | 39 +++++++++++----------- synapse/logging/opentracing.py | 4 +-- synapse/module_api/__init__.py | 7 ++-- synapse/replication/http/_base.py | 2 +- synapse/rest/admin/users.py | 5 +-- synapse/rest/client/login.py | 2 +- synapse/rest/media/v1/media_repository.py | 4 +-- synapse/rest/media/v1/thumbnailer.py | 4 +-- synapse/server_notices/consent_server_notices.py | 5 ++- .../resource_limits_server_notices.py | 12 ++++--- synapse/storage/controllers/persist_events.py | 5 ++- synapse/storage/databases/main/devices.py | 2 +- synapse/storage/databases/main/e2e_room_keys.py | 8 ++--- synapse/storage/databases/main/end_to_end_keys.py | 7 ++-- synapse/storage/databases/main/events.py | 22 ++++++------ synapse/storage/databases/main/events_worker.py | 2 +- .../storage/databases/main/monthly_active_users.py | 8 ++--- synapse/storage/databases/main/registration.py | 6 ++-- synapse/storage/databases/main/room.py | 8 +++-- synapse/storage/databases/main/user_directory.py | 9 +++-- synapse/types.py | 4 +-- synapse/util/async_helpers.py | 3 +- synapse/util/caches/__init__.py | 2 +- synapse/util/caches/deferred_cache.py | 2 +- synapse/util/caches/dictionary_cache.py | 9 ++--- synapse/util/caches/expiringcache.py | 2 +- synapse/util/caches/lrucache.py | 8 ++--- synapse/util/ratelimitutils.py | 2 +- synapse/util/threepids.py | 2 +- synapse/util/wheel_timer.py | 4 +-- tests/http/__init__.py | 7 ++-- tests/replication/slave/storage/test_events.py | 7 ++-- tests/replication/test_multi_media_repo.py | 14 ++++---- .../test_resource_limits_server_notices.py | 10 +++--- tests/unittest.py | 18 +++++----- 55 files changed, 174 insertions(+), 176 deletions(-) create mode 100644 changelog.d/14412.misc diff --git a/changelog.d/14412.misc b/changelog.d/14412.misc new file mode 100644 index 0000000000..4da061d461 --- /dev/null +++ b/changelog.d/14412.misc @@ -0,0 +1 @@ +Remove duplicated type information from type hints. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 400dd12aba..e2cfcea0f2 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -713,7 +713,7 @@ class HttpResponseException(CodeMessageException): set to the reason code from the HTTP response. Returns: - SynapseError: + The error converted to a SynapseError. """ # try to parse the body as json, to get better errcode/msg, but # default to M_UNKNOWN with the HTTP status as the error text diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 94d1150415..5468b963a2 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -317,10 +317,9 @@ def setup_logging( Set up the logging subsystem. Args: - config (LoggingConfig | synapse.config.worker.WorkerConfig): - configuration data + config: configuration data - use_worker_options (bool): True to use the 'worker_log_config' option + use_worker_options: True to use the 'worker_log_config' option instead of 'log_config'. logBeginner: The Twisted logBeginner to use. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index c88afb2986..dd9b8089ec 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -213,7 +213,7 @@ class Keyring: def verify_json_objects_for_server( self, server_and_json: Iterable[Tuple[str, dict, int]] - ) -> List[defer.Deferred]: + ) -> List["defer.Deferred[None]"]: """Bulk verifies signatures of json objects, bulk fetching keys as necessary. @@ -226,10 +226,9 @@ class Keyring: valid. Returns: - List: for each input triplet, a deferred indicating success - or failure to verify each json object's signature for the given - server_name. The deferreds run their callbacks in the sentinel - logcontext. + For each input triplet, a deferred indicating success or failure to + verify each json object's signature for the given server_name. The + deferreds run their callbacks in the sentinel logcontext. """ return [ run_in_background( diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 030c3ca408..8aca9a3ab9 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -597,8 +597,7 @@ def _event_type_from_format_version( format_version: The event format version Returns: - type: A type that can be initialized as per the initializer of - `FrozenEvent` + A type that can be initialized as per the initializer of `FrozenEvent` """ if format_version == EventFormatVersions.ROOM_V1_V2: diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index cd39d4d111..a3cfc701cd 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -280,12 +280,11 @@ class TransportLayerClient: Note that this does not append any events to any graphs. Args: - destination (str): address of remote homeserver - room_id (str): room to join/leave - user_id (str): user to be joined/left - membership (str): one of join/leave - params (dict[str, str|Iterable[str]]): Query parameters to include in the - request. + destination: address of remote homeserver + room_id: room to join/leave + user_id: user to be joined/left + membership: one of join/leave + params: Query parameters to include in the request. Returns: Succeeds when we get a 2xx HTTP response. The result diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index 1db8009d6c..cdaf0d5de7 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -224,10 +224,10 @@ class BaseFederationServlet: With arguments: - origin (unicode|None): The authenticated server_name of the calling server, + origin (str|None): The authenticated server_name of the calling server, unless REQUIRE_AUTH is set to False and authentication failed. - content (unicode|None): decoded json body of the request. None if the + content (str|None): decoded json body of the request. None if the request was a GET. query (dict[bytes, list[bytes]]): Query params from the request. url-decoded diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index a9912c467d..bf1221f523 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -870,7 +870,7 @@ class E2eKeysHandler: - signatures of the user's master key by the user's devices. Args: - user_id (string): the user uploading the keys + user_id: the user uploading the keys signatures (dict[string, dict]): map of devices to signed keys Returns: diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 28dc08c22a..83f53ceb88 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -377,8 +377,9 @@ class E2eRoomKeysHandler: """Deletes a given version of the user's e2e_room_keys backup Args: - user_id(str): the user whose current backup version we're deleting - version(str): the version id of the backup being deleted + user_id: the user whose current backup version we're deleting + version: Optional. the version ID of the backup version we're deleting + If missing, we delete the current backup version info. Raises: NotFoundError: if this backup version doesn't exist """ diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 5fc3b8bc8c..188f0956ef 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1596,8 +1596,8 @@ class FederationHandler: Fetch the complexity of a remote room over federation. Args: - remote_room_hosts (list[str]): The remote servers to ask. - room_id (str): The room ID to ask about. + remote_room_hosts: The remote servers to ask. + room_id: The room ID to ask about. Returns: Dict contains the complexity diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 93d09e9939..848e46eb9b 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -711,7 +711,7 @@ class IdentityHandler: inviter_display_name: The current display name of the inviter. inviter_avatar_url: The URL of the inviter's avatar. - id_access_token (str): The access token to authenticate to the identity + id_access_token: The access token to authenticate to the identity server with Returns: diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 867973dcca..41c675f408 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -787,7 +787,7 @@ class OidcProvider: Must include an ``access_token`` field. Returns: - UserInfo: an object representing the user. + an object representing the user. """ logger.debug("Using the OAuth2 access_token to request userinfo") metadata = await self.load_metadata() diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 0066d63987..b7bc787636 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -201,7 +201,7 @@ class BasePresenceHandler(abc.ABC): """Get the current presence state for multiple users. Returns: - dict: `user_id` -> `UserPresenceState` + A mapping of `user_id` -> `UserPresenceState` """ states = {} missing = [] diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py index 9602f0d0bb..874860d461 100644 --- a/synapse/handlers/saml.py +++ b/synapse/handlers/saml.py @@ -441,7 +441,7 @@ class DefaultSamlMappingProvider: client_redirect_url: where the client wants to redirect to Returns: - dict: A dict containing new user attributes. Possible keys: + A dict containing new user attributes. Possible keys: * mxid_localpart (str): Required. The localpart of the user's mxid * displayname (str): The displayname of the user * emails (list[str]): Any emails for the user @@ -483,7 +483,7 @@ class DefaultSamlMappingProvider: Args: config: A dictionary containing configuration options for this provider Returns: - SamlConfig: A custom config object for this module + A custom config object for this module """ # Parse config options and use defaults where necessary mxid_source_attribute = config.get("mxid_source_attribute", "uid") diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py index 6a9f6635d2..8729630581 100644 --- a/synapse/http/additional_resource.py +++ b/synapse/http/additional_resource.py @@ -45,8 +45,7 @@ class AdditionalResource(DirectServeJsonResource): Args: hs: homeserver - handler ((twisted.web.server.Request) -> twisted.internet.defer.Deferred): - function to be called to handle the request. + handler: function to be called to handle the request. """ super().__init__() self._handler = handler diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 2f0177f1e2..0359231e7d 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -155,11 +155,10 @@ class MatrixFederationAgent: a file for a file upload). Or None if the request is to have no body. Returns: - Deferred[twisted.web.iweb.IResponse]: - fires when the header of the response has been received (regardless of the - response status code). Fails if there is any problem which prevents that - response from being received (including problems that prevent the request - from being sent). + A deferred which fires when the header of the response has been received + (regardless of the response status code). Fails if there is any problem + which prevents that response from being received (including problems that + prevent the request from being sent). """ # We use urlparse as that will set `port` to None if there is no # explicit port. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 3c35b1d2c7..b92f1d3d1a 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -951,8 +951,7 @@ class MatrixFederationHttpClient: args: query params Returns: - dict|list: Succeeds when we get a 2xx HTTP response. The - result will be the decoded JSON body. + Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Raises: HttpResponseException: If we get an HTTP response code >= 300 diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 1f8227896f..18899bc6d1 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -34,7 +34,7 @@ from twisted.web.client import ( ) from twisted.web.error import SchemeNotSupported from twisted.web.http_headers import Headers -from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS +from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse from synapse.http import redact_uri from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials @@ -134,7 +134,7 @@ class ProxyAgent(_AgentBase): uri: bytes, headers: Optional[Headers] = None, bodyProducer: Optional[IBodyProducer] = None, - ) -> defer.Deferred: + ) -> "defer.Deferred[IResponse]": """ Issue a request to the server indicated by the given uri. @@ -157,17 +157,17 @@ class ProxyAgent(_AgentBase): a file upload). Or, None if the request is to have no body. Returns: - Deferred[IResponse]: completes when the header of the response has - been received (regardless of the response status code). + A deferred which completes when the header of the response has + been received (regardless of the response status code). - Can fail with: - SchemeNotSupported: if the uri is not http or https + Can fail with: + SchemeNotSupported: if the uri is not http or https - twisted.internet.error.TimeoutError if the server we are connecting - to (proxy or destination) does not accept a connection before - connectTimeout. + twisted.internet.error.TimeoutError if the server we are connecting + to (proxy or destination) does not accept a connection before + connectTimeout. - ... other things too. + ... other things too. """ uri = uri.strip() if not _VALID_URI.match(uri): diff --git a/synapse/http/server.py b/synapse/http/server.py index b26e34bceb..051a1899a0 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -267,7 +267,7 @@ class HttpServer(Protocol): request. The first argument will be the request object and subsequent arguments will be any matched groups from the regex. This should return either tuple of (code, response), or None. - servlet_classname (str): The name of the handler to be used in prometheus + servlet_classname: The name of the handler to be used in prometheus and opentracing logs. """ diff --git a/synapse/http/site.py b/synapse/http/site.py index 3dbd541fed..6a1dbf7f33 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -400,7 +400,7 @@ class SynapseRequest(Request): be sure to call finished_processing. Args: - servlet_name (str): the name of the servlet which will be + servlet_name: the name of the servlet which will be processing this request. This is used in the metrics. It is possible to update this afterwards by updating diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 6a08ffed64..f62bea968f 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -117,8 +117,7 @@ class ContextResourceUsage: """Create a new ContextResourceUsage Args: - copy_from (ContextResourceUsage|None): if not None, an object to - copy stats from + copy_from: if not None, an object to copy stats from """ if copy_from is None: self.reset() @@ -162,7 +161,7 @@ class ContextResourceUsage: """Add another ContextResourceUsage's stats to this one's. Args: - other (ContextResourceUsage): the other resource usage object + other: the other resource usage object """ self.ru_utime += other.ru_utime self.ru_stime += other.ru_stime @@ -342,7 +341,7 @@ class LoggingContext: called directly. Returns: - LoggingContext: the current logging context + The current logging context """ warnings.warn( "synapse.logging.context.LoggingContext.current_context() is deprecated " @@ -362,7 +361,8 @@ class LoggingContext: called directly. Args: - context(LoggingContext): The context to activate. + context: The context to activate. + Returns: The context that was previously active """ @@ -474,8 +474,7 @@ class LoggingContext: """Get resources used by this logcontext so far. Returns: - ContextResourceUsage: a *copy* of the object tracking resource - usage so far + A *copy* of the object tracking resource usage so far """ # we always return a copy, for consistency res = self._resource_usage.copy() @@ -663,7 +662,8 @@ def current_context() -> LoggingContextOrSentinel: def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel: """Set the current logging context in thread local storage Args: - context(LoggingContext): The context to activate. + context: The context to activate. + Returns: The context that was previously active """ @@ -700,7 +700,7 @@ def nested_logging_context(suffix: str) -> LoggingContext: suffix: suffix to add to the parent context's 'name'. Returns: - LoggingContext: new logging context. + A new logging context. """ curr_context = current_context() if not curr_context: @@ -898,20 +898,19 @@ def defer_to_thread( on it. Args: - reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread - the Deferred will be invoked, and whose threadpool we should use for the - function. + reactor: The reactor in whose main thread the Deferred will be invoked, + and whose threadpool we should use for the function. Normally this will be hs.get_reactor(). - f (callable): The function to call. + f: The function to call. args: positional arguments to pass to f. kwargs: keyword arguments to pass to f. Returns: - Deferred: A Deferred which fires a callback with the result of `f`, or an + A Deferred which fires a callback with the result of `f`, or an errback if `f` throws an exception. """ return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs) @@ -939,20 +938,20 @@ def defer_to_threadpool( on it. Args: - reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread - the Deferred will be invoked. Normally this will be hs.get_reactor(). + reactor: The reactor in whose main thread the Deferred will be invoked. + Normally this will be hs.get_reactor(). - threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for - running `f`. Normally this will be hs.get_reactor().getThreadPool(). + threadpool: The threadpool to use for running `f`. Normally this will be + hs.get_reactor().getThreadPool(). - f (callable): The function to call. + f: The function to call. args: positional arguments to pass to f. kwargs: keyword arguments to pass to f. Returns: - Deferred: A Deferred which fires a callback with the result of `f`, or an + A Deferred which fires a callback with the result of `f`, or an errback if `f` throws an exception. """ curr_context = current_context() diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 8ce5a2a338..b69060854f 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -721,7 +721,7 @@ def inject_header_dict( destination: address of entity receiving the span context. Must be given unless check_destination is False. The context will only be injected if the destination matches the opentracing whitelist - check_destination (bool): If false, destination will be ignored and the context + check_destination: If false, destination will be ignored and the context will always be injected. Note: @@ -780,7 +780,7 @@ def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str destination: the name of the remote server. Returns: - dict: the active span's context if opentracing is enabled, otherwise empty. + the active span's context if opentracing is enabled, otherwise empty. """ if destination and not whitelisted_homeserver(destination): diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 30e689d00d..1adc1fd64f 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -787,7 +787,7 @@ class ModuleApi: Added in Synapse v0.25.0. Args: - access_token(str): access token + access_token: access token Returns: twisted.internet.defer.Deferred - resolves once the access token @@ -832,7 +832,7 @@ class ModuleApi: **kwargs: named args to be passed to func Returns: - Deferred[object]: result of func + Result of func """ # type-ignore: See https://github.com/python/mypy/issues/8862 return defer.ensureDeferred( @@ -924,8 +924,7 @@ class ModuleApi: to represent 'any') of the room state to acquire. Returns: - twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]: - The filtered state events in the room. + The filtered state events in the room. """ state_ids = yield defer.ensureDeferred( self._storage_controllers.state.get_current_state_ids( diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 5e661f8c73..3f4d3fc51a 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -153,7 +153,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): argument list. Returns: - dict: If POST/PUT request then dictionary must be JSON serialisable, + If POST/PUT request then dictionary must be JSON serialisable, otherwise must be appropriate for adding as query args. """ return {} diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 1951b8a9f2..6e0c44be2a 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -903,8 +903,9 @@ class PushersRestServlet(RestServlet): @user:server/pushers Returns: - pushers: Dictionary containing pushers information. - total: Number of pushers in dictionary `pushers`. + A dictionary with keys: + pushers: Dictionary containing pushers information. + total: Number of pushers in dictionary `pushers`. """ PATTERNS = admin_patterns("/users/(?P[^/]*)/pushers$") diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 05706b598c..8adced41e5 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -350,7 +350,7 @@ class LoginRestServlet(RestServlet): auth_provider_session_id: The session ID got during login from the SSO IdP. Returns: - result: Dictionary of account information after successful login. + Dictionary of account information after successful login. """ # Before we actually log them in we check if they've already logged in diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 328c0c5477..40b0d39eb2 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -344,8 +344,8 @@ class MediaRepository: download from remote server. Args: - server_name (str): Remote server_name where the media originated. - media_id (str): The media ID of the content (as defined by the + server_name: Remote server_name where the media originated. + media_id: The media ID of the content (as defined by the remote server). Returns: diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 9b93b9b4f6..a48a4de92a 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -138,7 +138,7 @@ class Thumbnailer: """Rescales the image to the given dimensions. Returns: - BytesIO: the bytes of the encoded image ready to be written to disk + The bytes of the encoded image ready to be written to disk """ with self._resize(width, height) as scaled: return self._encode_image(scaled, output_type) @@ -155,7 +155,7 @@ class Thumbnailer: max_height: The largest possible height. Returns: - BytesIO: the bytes of the encoded image ready to be written to disk + The bytes of the encoded image ready to be written to disk """ if width * self.height > height * self.width: scaled_width = width diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index 698ca742ed..94025ba41f 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -113,9 +113,8 @@ def copy_with_str_subst(x: Any, substitutions: Any) -> Any: """Deep-copy a structure, carrying out string substitutions on any strings Args: - x (object): structure to be copied - substitutions (object): substitutions to be made - passed into the - string '%' operator + x: structure to be copied + substitutions: substitutions to be made - passed into the string '%' operator Returns: copy of x diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 3134cd2d3d..a31a2c99a7 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -170,11 +170,13 @@ class ResourceLimitsServerNotices: room_id: The room id of the server notices room Returns: - bool: Is the room currently blocked - list: The list of pinned event IDs that are unrelated to limit blocking - This list can be used as a convenience in the case where the block - is to be lifted and the remaining pinned event references need to be - preserved + Tuple of: + Is the room currently blocked + + The list of pinned event IDs that are unrelated to limit blocking + This list can be used as a convenience in the case where the block + is to be lifted and the remaining pinned event references need to be + preserved """ currently_blocked = False pinned_state_event = None diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 48976dc570..33ffef521b 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -204,9 +204,8 @@ class _EventPeristenceQueue(Generic[_PersistResult]): process to to so, calling the per_item_callback for each item. Args: - room_id (str): - task (_EventPersistQueueTask): A _PersistEventsTask or - _UpdateCurrentStateTask to process. + room_id: + task: A _PersistEventsTask or _UpdateCurrentStateTask to process. Returns: the result returned by the `_per_item_callback` passed to diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index aa58c2adc3..e114c733d1 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -535,7 +535,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): limit: Maximum number of device updates to return Returns: - List: List of device update tuples: + List of device update tuples: - user_id - device_id - stream_id diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index af59be6b48..6240f9a75e 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -391,10 +391,10 @@ class EndToEndRoomKeyStore(SQLBaseStore): Returns: A dict giving the info metadata for this backup version, with fields including: - version(str) - algorithm(str) - auth_data(object): opaque dict supplied by the client - etag(int): tag of the keys in the backup + version (str) + algorithm (str) + auth_data (object): opaque dict supplied by the client + etag (int): tag of the keys in the backup """ def _get_e2e_room_keys_version_info_txn(txn: LoggingTransaction) -> JsonDict: diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 2a4f58ed92..cf33e73e2b 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -412,10 +412,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """Retrieve a number of one-time keys for a user Args: - user_id(str): id of user to get keys for - device_id(str): id of device to get keys for - key_ids(list[str]): list of key ids (excluding algorithm) to - retrieve + user_id: id of user to get keys for + device_id: id of device to get keys for + key_ids: list of key ids (excluding algorithm) to retrieve Returns: A map from (algorithm, key_id) to json string for key diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index c4acff5be6..d68f127f9b 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1279,9 +1279,10 @@ class PersistEventsStore: Pick the earliest non-outlier if there is one, else the earliest one. Args: - events_and_contexts (list[(EventBase, EventContext)]): + events_and_contexts: + Returns: - list[(EventBase, EventContext)]: filtered list + filtered list """ new_events_and_contexts: OrderedDict[ str, Tuple[EventBase, EventContext] @@ -1307,9 +1308,8 @@ class PersistEventsStore: """Update min_depth for each room Args: - txn (twisted.enterprise.adbapi.Connection): db connection - events_and_contexts (list[(EventBase, EventContext)]): events - we are persisting + txn: db connection + events_and_contexts: events we are persisting """ depth_updates: Dict[str, int] = {} for event, context in events_and_contexts: @@ -1580,13 +1580,11 @@ class PersistEventsStore: """Update all the miscellaneous tables for new events Args: - txn (twisted.enterprise.adbapi.Connection): db connection - events_and_contexts (list[(EventBase, EventContext)]): events - we are persisting - all_events_and_contexts (list[(EventBase, EventContext)]): all - events that we were going to persist. This includes events - we've already persisted, etc, that wouldn't appear in - events_and_context. + txn: db connection + events_and_contexts: events we are persisting + all_events_and_contexts: all events that we were going to persist. + This includes events we've already persisted, etc, that wouldn't + appear in events_and_context. inhibit_local_membership_updates: Stop the local_current_membership from being updated by these events. This should be set to True for backfilled events because backfilled events in the past do diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 467d20253d..8a104f7e93 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1589,7 +1589,7 @@ class EventsWorkerStore(SQLBaseStore): room_id: The room ID to query. Returns: - dict[str:float] of complexity version to complexity. + Map of complexity version to complexity. """ state_events = await self.get_current_state_event_counts(room_id) diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index efd136a864..db9a24db5e 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -217,7 +217,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): def _reap_users(txn: LoggingTransaction, reserved_users: List[str]) -> None: """ Args: - reserved_users (tuple): reserved users to preserve + reserved_users: reserved users to preserve """ thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) @@ -370,8 +370,8 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): should not appear in the MAU stats). Args: - txn (cursor): - user_id (str): user to add/update + txn: + user_id: user to add/update """ assert ( self._update_on_this_worker @@ -401,7 +401,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): add the user to the monthly active tables Args: - user_id(str): the user_id to query + user_id: the user_id to query """ assert ( self._update_on_this_worker diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 5167089e03..31f0f2bd3d 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -953,7 +953,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): """Returns user id from threepid Args: - txn (cursor): + txn: medium: threepid medium e.g. email address: threepid address e.g. me@example.com @@ -1283,8 +1283,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): """Sets an expiration date to the account with the given user ID. Args: - user_id (str): User ID to set an expiration date for. - use_delta (bool): If set to False, the expiration date for the user will be + user_id: User ID to set an expiration date for. + use_delta: If set to False, the expiration date for the user will be now + validity period. If set to True, this expiration date will be a random value in the [now + period - d ; now + period] range, d being a delta equal to 10% of the validity period. diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 7d97f8f60e..4fbaefad73 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -2057,7 +2057,8 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): Args: report_id: ID of reported event in database Returns: - event_report: json list of information from event report + JSON dict of information from an event report or None if the + report does not exist. """ def _get_event_report_txn( @@ -2130,8 +2131,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): user_id: search for user_id. Ignored if user_id is None room_id: search for room_id. Ignored if room_id is None Returns: - event_reports: json list of event reports - count: total number of event reports matching the filter criteria + Tuple of: + json list of event reports + total number of event reports matching the filter criteria """ def _get_event_reports_paginate_txn( diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index ddb25b5cea..698d6f7515 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -185,9 +185,8 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): - who should be in the user_directory. Args: - progress (dict) - batch_size (int): Maximum number of state events to process - per cycle. + progress + batch_size: Maximum number of state events to process per cycle. Returns: number of events processed. @@ -708,10 +707,10 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): Returns the rooms that a user is in. Args: - user_id(str): Must be a local user + user_id: Must be a local user Returns: - list: user_id + List of room IDs """ rows = await self.db_pool.simple_select_onecol( table="users_who_share_private_rooms", diff --git a/synapse/types.py b/synapse/types.py index 773f0438d5..f2d436ddc3 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -143,8 +143,8 @@ class Requester: Requester. Args: - store (DataStore): Used to convert AS ID to AS object - input (dict): A dict produced by `serialize` + store: Used to convert AS ID to AS object + input: A dict produced by `serialize` Returns: Requester diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 7f1d41eb3c..d24c4f68c4 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -217,7 +217,8 @@ async def concurrently_execute( limit: Maximum number of conccurent executions. Returns: - Deferred: Resolved when all function invocations have finished. + None, when all function invocations have finished. The return values + from those functions are discarded. """ it = iter(args) diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index f7c3a6794e..9387632d0d 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -197,7 +197,7 @@ def register_cache( resize_callback: A function which can be called to resize the cache. Returns: - CacheMetric: an object which provides inc_{hits,misses,evictions} methods + an object which provides inc_{hits,misses,evictions} methods """ if resizable: if not resize_callback: diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index bcb1cba362..bf7bd351e0 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -153,7 +153,7 @@ class DeferredCache(Generic[KT, VT]): Args: key: callback: Gets called when the entry in the cache is invalidated - update_metrics (bool): whether to update the cache hit rate metrics + update_metrics: whether to update the cache hit rate metrics Returns: A Deferred which completes with the result. Note that this may later fail diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index fa91479c97..5eaf70c7ab 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -169,10 +169,11 @@ class DictionaryCache(Generic[KT, DKT, DV]): if it is in the cache. Returns: - DictionaryEntry: If `dict_keys` is not None then `DictionaryEntry` - will contain include the keys that are in the cache. If None then - will either return the full dict if in the cache, or the empty - dict (with `full` set to False) if it isn't. + If `dict_keys` is not None then `DictionaryEntry` will contain include + the keys that are in the cache. + + If None then will either return the full dict if in the cache, or the + empty dict (with `full` set to False) if it isn't. """ if dict_keys is None: # The caller wants the full set of dictionary keys for this cache key diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index c6a5d0dfc0..01ad02af67 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -207,7 +207,7 @@ class ExpiringCache(Generic[KT, VT]): items from the cache. Returns: - bool: Whether the cache changed size or not. + Whether the cache changed size or not. """ new_size = int(self._original_max_size * factor) if new_size != self._max_size: diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index aa93109d13..dcf0eac3bf 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -389,11 +389,11 @@ class LruCache(Generic[KT, VT]): cache_name: The name of this cache, for the prometheus metrics. If unset, no metrics will be reported on this cache. - cache_type (type): + cache_type: type of underlying cache to be used. Typically one of dict or TreeCache. - size_callback (func(V) -> int | None): + size_callback: metrics_collection_callback: metrics collection callback. This is called early in the metrics @@ -403,7 +403,7 @@ class LruCache(Generic[KT, VT]): Ignored if cache_name is None. - apply_cache_factor_from_config (bool): If true, `max_size` will be + apply_cache_factor_from_config: If true, `max_size` will be multiplied by a cache factor derived from the homeserver config clock: @@ -796,7 +796,7 @@ class LruCache(Generic[KT, VT]): items from the cache. Returns: - bool: Whether the cache changed size or not. + Whether the cache changed size or not. """ if not self.apply_cache_factor_from_config: return False diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 9f64fed0d7..2aceb1a47f 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -183,7 +183,7 @@ class FederationRateLimiter: # Handle request ... Args: - host (str): Origin of incoming request. + host: Origin of incoming request. Returns: context manager which returns a deferred. diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py index 1e9c2faa64..54bc7589fd 100644 --- a/synapse/util/threepids.py +++ b/synapse/util/threepids.py @@ -48,7 +48,7 @@ async def check_3pid_allowed( registration: whether we want to bind the 3PID as part of registering a new user. Returns: - bool: whether the 3PID medium/address is allowed to be added to this HS + whether the 3PID medium/address is allowed to be added to this HS """ if not await hs.get_password_auth_provider().is_3pid_allowed( medium, address, registration diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index 177e198e7e..b1ec7f4bd8 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -90,10 +90,10 @@ class WheelTimer(Generic[T]): """Fetch any objects that have timed out Args: - now (ms): Current time in msec + now: Current time in msec Returns: - list: List of objects that have timed out + List of objects that have timed out """ now_key = int(now / self.bucket_size) diff --git a/tests/http/__init__.py b/tests/http/__init__.py index e74f7f5b48..093537adef 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -13,6 +13,7 @@ # limitations under the License. import os.path import subprocess +from typing import List from zope.interface import implementer @@ -70,14 +71,14 @@ subjectAltName = %(sanentries)s """ -def create_test_cert_file(sanlist): +def create_test_cert_file(sanlist: List[bytes]) -> str: """build an x509 certificate file Args: - sanlist: list[bytes]: a list of subjectAltName values for the cert + sanlist: a list of subjectAltName values for the cert Returns: - str: the path to the file + The path to the file """ global cert_file_count csr_filename = "server.csr" diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 96f3880923..dce71f7334 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -143,6 +143,7 @@ class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): self.persist(type="m.room.create", key="", creator=USER_ID) self.check("get_invited_rooms_for_local_user", [USER_ID_2], []) event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite") + assert event.internal_metadata.stream_ordering is not None self.replicate() @@ -230,6 +231,7 @@ class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): j2 = self.persist( type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" ) + assert j2.internal_metadata.stream_ordering is not None self.replicate() expected_pos = PersistedEventPosition( @@ -287,6 +289,7 @@ class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): ) ) self.replicate() + assert j2.internal_metadata.stream_ordering is not None event_source = RoomEventSource(self.hs) event_source.store = self.slaved_store @@ -336,10 +339,10 @@ class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): event_id = 0 - def persist(self, backfill=False, **kwargs): + def persist(self, backfill=False, **kwargs) -> FrozenEvent: """ Returns: - synapse.events.FrozenEvent: The event that was persisted. + The event that was persisted. """ event, context = self.build_event(**kwargs) diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index 13aa5eb51a..96cdf2c45b 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -15,8 +15,9 @@ import logging import os from typing import Optional, Tuple +from twisted.internet.interfaces import IOpenSSLServerConnectionCreator from twisted.internet.protocol import Factory -from twisted.protocols.tls import TLSMemoryBIOFactory +from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web.http import HTTPChannel from twisted.web.server import Request @@ -102,7 +103,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): ) # fish the test server back out of the server-side TLS protocol. - http_server = server_tls_protocol.wrappedProtocol + http_server: HTTPChannel = server_tls_protocol.wrappedProtocol # type: ignore[assignment] # give the reactor a pump to get the TLS juices flowing. self.reactor.pump((0.1,)) @@ -238,16 +239,15 @@ def get_connection_factory(): return test_server_connection_factory -def _build_test_server(connection_creator): +def _build_test_server( + connection_creator: IOpenSSLServerConnectionCreator, +) -> TLSMemoryBIOProtocol: """Construct a test server This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol Args: - connection_creator (IOpenSSLServerConnectionCreator): thing to build - SSL connections - sanlist (list[bytes]): list of the SAN entries for the cert returned - by the server + connection_creator: thing to build SSL connections Returns: TLSMemoryBIOProtocol diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index bf403045e9..7cbc40736c 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Tuple from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor @@ -350,14 +351,15 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): self.assertTrue(notice_in_room, "No server notice in room") - def _trigger_notice_and_join(self): + def _trigger_notice_and_join(self) -> Tuple[str, str, str]: """Creates enough active users to hit the MAU limit and trigger a system notice about it, then joins the system notices room with one of the users created. Returns: - user_id (str): The ID of the user that joined the room. - tok (str): The access token of the user that joined the room. - room_id (str): The ID of the room that's been joined. + A tuple of: + user_id: The ID of the user that joined the room. + tok: The access token of the user that joined the room. + room_id: The ID of the room that's been joined. """ user_id = None tok = None diff --git a/tests/unittest.py b/tests/unittest.py index 5116be338e..a120c2976c 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -360,13 +360,13 @@ class HomeserverTestCase(TestCase): store.db_pool.updates.do_next_background_update(False), by=0.1 ) - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock): """ Make and return a homeserver. Args: reactor: A Twisted Reactor, or something that pretends to be one. - clock (synapse.util.Clock): The Clock, associated with the reactor. + clock: The Clock, associated with the reactor. Returns: A homeserver suitable for testing. @@ -426,9 +426,8 @@ class HomeserverTestCase(TestCase): Args: reactor: A Twisted Reactor, or something that pretends to be one. - clock (synapse.util.Clock): The Clock, associated with the reactor. - homeserver (synapse.server.HomeServer): The HomeServer to test - against. + clock: The Clock, associated with the reactor. + homeserver: The HomeServer to test against. Function to optionally be overridden in subclasses. """ @@ -452,11 +451,10 @@ class HomeserverTestCase(TestCase): given content. Args: - method (bytes/unicode): The HTTP request method ("verb"). - path (bytes/unicode): The HTTP path, suitably URL encoded (e.g. - escaped UTF-8 & spaces and such). - content (bytes or dict): The body of the request. JSON-encoded, if - a dict. + method: The HTTP request method ("verb"). + path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces + and such). content (bytes or dict): The body of the request. + JSON-encoded, if a dict. shorthand: Whether to try and be helpful and prefix the given URL with the usual REST API path, if it doesn't contain it. federation_auth_origin: if set to not-None, we will add a fake -- cgit 1.5.1 From 618e4ab81b70e37bdb8e9224bd84fcfe4b15bdea Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 16 Nov 2022 15:25:35 +0000 Subject: Fix an invalid comparison of `UserPresenceState` to `str` (#14393) --- changelog.d/14393.bugfix | 1 + synapse/handlers/presence.py | 2 +- tests/handlers/test_presence.py | 41 +++++++++++++++++++++++++++++++++++------ tests/module_api/test_api.py | 3 +++ tests/replication/_base.py | 7 ++++++- 5 files changed, 46 insertions(+), 8 deletions(-) create mode 100644 changelog.d/14393.bugfix diff --git a/changelog.d/14393.bugfix b/changelog.d/14393.bugfix new file mode 100644 index 0000000000..97177bc62f --- /dev/null +++ b/changelog.d/14393.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.58.0 where a user with presence state 'org.matrix.msc3026.busy' would mistakenly be set to 'online' when calling `/sync` or `/events` on a worker process. \ No newline at end of file diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index b7bc787636..cf08737d11 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -478,7 +478,7 @@ class WorkerPresenceHandler(BasePresenceHandler): return _NullContextManager() prev_state = await self.current_state_for_user(user_id) - if prev_state != PresenceState.BUSY: + if prev_state.state != PresenceState.BUSY: # We set state here but pass ignore_status_msg = True as we don't want to # cause the status message to be cleared. # Note that this causes last_active_ts to be incremented which is not diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index c96dc6caf2..c5981ff965 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -15,6 +15,7 @@ from typing import Optional from unittest.mock import Mock, call +from parameterized import parameterized from signedjson.key import generate_signing_key from synapse.api.constants import EventTypes, Membership, PresenceState @@ -37,6 +38,7 @@ from synapse.rest.client import room from synapse.types import UserID, get_domain_from_id from tests import unittest +from tests.replication._base import BaseMultiWorkerStreamTestCase class PresenceUpdateTestCase(unittest.HomeserverTestCase): @@ -505,7 +507,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): self.assertEqual(state, new_state) -class PresenceHandlerTestCase(unittest.HomeserverTestCase): +class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): def prepare(self, reactor, clock, hs): self.presence_handler = hs.get_presence_handler() self.clock = hs.get_clock() @@ -716,20 +718,47 @@ class PresenceHandlerTestCase(unittest.HomeserverTestCase): # our status message should be the same as it was before self.assertEqual(state.status_msg, status_msg) - def test_set_presence_from_syncing_keeps_busy(self): - """Test that presence set by syncing doesn't affect busy status""" - # while this isn't the default - self.presence_handler._busy_presence_enabled = True + @parameterized.expand([(False,), (True,)]) + @unittest.override_config( + { + "experimental_features": { + "msc3026_enabled": True, + }, + } + ) + def test_set_presence_from_syncing_keeps_busy(self, test_with_workers: bool): + """Test that presence set by syncing doesn't affect busy status + Args: + test_with_workers: If True, check the presence state of the user by calling + /sync against a worker, rather than the main process. + """ user_id = "@test:server" status_msg = "I'm busy!" + # By default, we call /sync against the main process. + worker_to_sync_against = self.hs + if test_with_workers: + # Create a worker and use it to handle /sync traffic instead. + # This is used to test that presence changes get replicated from workers + # to the main process correctly. + worker_to_sync_against = self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "presence_writer"} + ) + + # Set presence to BUSY self._set_presencestate_with_status_msg(user_id, PresenceState.BUSY, status_msg) + # Perform a sync with a presence state other than busy. This should NOT change + # our presence status; we only change from busy if we explicitly set it via + # /presence/*. self.get_success( - self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE) + worker_to_sync_against.get_presence_handler().user_syncing( + user_id, True, PresenceState.ONLINE + ) ) + # Check against the main process that the user's presence did not change. state = self.get_success( self.presence_handler.get_state(UserID.from_string(user_id)) ) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 02cef6f876..058ca57e55 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -778,8 +778,11 @@ def _test_sending_local_online_presence_to_local_user( worker process. The test users will still sync with the main process. The purpose of testing with a worker is to check whether a Synapse module running on a worker can inform other workers/ the main process that they should include additional presence when a user next syncs. + If this argument is True, `test_case` MUST be an instance of BaseMultiWorkerStreamTestCase. """ if test_with_workers: + assert isinstance(test_case, BaseMultiWorkerStreamTestCase) + # Create a worker process to make module_api calls against worker_hs = test_case.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "presence_writer"} diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 121f3d8d65..3029a16dda 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -542,8 +542,13 @@ class FakeRedisPubSubProtocol(Protocol): self.send("OK") elif command == b"GET": self.send(None) + + # Connection keep-alives. + elif command == b"PING": + self.send("PONG") + else: - raise Exception("Unknown command") + raise Exception(f"Unknown command: {command}") def send(self, msg): """Send a message back to the client.""" -- cgit 1.5.1 From e6b5ca1a9f44569e0a81e2e889a2a917dc29ec7e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 16 Nov 2022 16:32:56 +0000 Subject: Update changelog --- CHANGES.md | 27 ++------------------------- 1 file changed, 2 insertions(+), 25 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 05cb7b1267..49ad212e15 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.72.0rc1 (2022-11-16) ============================== +Please note that we now only support PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life. + Features -------- @@ -54,41 +56,16 @@ Internal Changes - Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455)) - Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313)) - Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324)) -- Bump flake8-bugbear from 22.9.23 to 22.10.27. ([\#14329](https://github.com/matrix-org/synapse/issues/14329)) - Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339)) -- Bump twisted from 22.8.0 to 22.10.0. ([\#14340](https://github.com/matrix-org/synapse/issues/14340)) - Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346)) - Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351)) - Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375)) -- Bump dawidd6/action-download-artifact from 2.15.0 to 2.24.0. ([\#14378](https://github.com/matrix-org/synapse/issues/14378)) -- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14379](https://github.com/matrix-org/synapse/issues/14379)) -- Bump regex from 1.6.0 to 1.7.0. ([\#14380](https://github.com/matrix-org/synapse/issues/14380)) -- Bump pyo3 from 0.17.2 to 0.17.3. ([\#14381](https://github.com/matrix-org/synapse/issues/14381)) -- Bump types-setuptools from 65.5.0.1 to 65.5.0.2. ([\#14382](https://github.com/matrix-org/synapse/issues/14382)) -- Bump pillow from 9.2.0 to 9.3.0. ([\#14383](https://github.com/matrix-org/synapse/issues/14383)) -- Bump cryptography from 36.0.1 to 38.0.3. ([\#14384](https://github.com/matrix-org/synapse/issues/14384)) -- Bump types-pyyaml from 6.0.12 to 6.0.12.1. ([\#14385](https://github.com/matrix-org/synapse/issues/14385)) -- Bump types-jsonschema from 4.4.6 to 4.17.0.0. ([\#14386](https://github.com/matrix-org/synapse/issues/14386)) - Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394)) -- Bump dawidd6/action-download-artifact from 2.24.0 to 2.24.1. ([\#14398](https://github.com/matrix-org/synapse/issues/14398)) - Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410)) - Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411)) - Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417)) -- Bump dawidd6/action-download-artifact from 2.24.1 to 2.24.2. ([\#14424](https://github.com/matrix-org/synapse/issues/14424)) -- Bump actions/upload-artifact from 2 to 3. ([\#14425](https://github.com/matrix-org/synapse/issues/14425)) -- Bump blake2 from 0.10.4 to 0.10.5. ([\#14426](https://github.com/matrix-org/synapse/issues/14426)) -- Bump attrs from 21.4.0 to 22.1.0. ([\#14427](https://github.com/matrix-org/synapse/issues/14427)) -- Bump types-pyyaml from 6.0.12.1 to 6.0.12.2. ([\#14428](https://github.com/matrix-org/synapse/issues/14428)) -- Bump gitpython from 3.1.27 to 3.1.29. ([\#14429](https://github.com/matrix-org/synapse/issues/14429)) -- Bump types-jsonschema from 4.17.0.0 to 4.17.0.1. ([\#14430](https://github.com/matrix-org/synapse/issues/14430)) -- Bump flake8 from 4.0.1 to 5.0.4. ([\#14431](https://github.com/matrix-org/synapse/issues/14431)) - Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433)) - Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434)) -- Bump types-setuptools from 65.5.0.2 to 65.5.0.3. ([\#14436](https://github.com/matrix-org/synapse/issues/14436)) -- Bump types-pyopenssl from 22.0.10 to 22.1.0.2. ([\#14437](https://github.com/matrix-org/synapse/issues/14437)) -- Bump flake8-comprehensions from 3.8.0 to 3.10.1. ([\#14438](https://github.com/matrix-org/synapse/issues/14438)) -- Bump jsonschema from 4.16.0 to 4.17.0. ([\#14439](https://github.com/matrix-org/synapse/issues/14439)) -- Bump canonicaljson from 1.6.3 to 1.6.4. ([\#14440](https://github.com/matrix-org/synapse/issues/14440)) - Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451)) - Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461)) -- cgit 1.5.1 From f0d18772f3bb33671e26b95b62c0313aa1cfb1ee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 16 Nov 2022 16:37:22 +0000 Subject: Point to our deprecation policy --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 49ad212e15..6dccf4ea9e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse 1.72.0rc1 (2022-11-16) ============================== -Please note that we now only support PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life. +Please note that we now only support PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md). Features -------- -- cgit 1.5.1 From 7f44f3aee3d3bb930c3df0af4663c40acfa3f40c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 16 Nov 2022 16:58:03 +0000 Subject: Update changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 6dccf4ea9e..0fa6f7fab7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -19,7 +19,7 @@ Bugfixes - Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347)) - Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356)) - Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361)) -- Fix refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364)) +- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364)) - Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369)) - Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374)) - Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409)) @@ -30,7 +30,7 @@ Bugfixes Updates to the Docker image --------------------------- -- Add all Stream Writer worker types to configure_workers_and_start.py. ([\#14197](https://github.com/matrix-org/synapse/issues/14197)) +- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197)) - Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294)) -- cgit 1.5.1 From c15e9a0edb696990365ac5a4e5be847b5ae23921 Mon Sep 17 00:00:00 2001 From: realtyem Date: Wed, 16 Nov 2022 16:16:25 -0600 Subject: Remove need for `worker_main_http_uri` setting to use /keys/upload. (#14400) --- changelog.d/14400.misc | 1 + docker/configure_workers_and_start.py | 5 +- docs/workers.md | 7 +-- synapse/app/generic_worker.py | 103 +--------------------------------- synapse/config/workers.py | 6 ++ synapse/replication/http/devices.py | 67 ++++++++++++++++++++++ synapse/rest/client/keys.py | 68 ++++++++++++++++------ 7 files changed, 130 insertions(+), 127 deletions(-) create mode 100644 changelog.d/14400.misc diff --git a/changelog.d/14400.misc b/changelog.d/14400.misc new file mode 100644 index 0000000000..6e025329c4 --- /dev/null +++ b/changelog.d/14400.misc @@ -0,0 +1 @@ +Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 62b1bab297..c1e1544536 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -213,10 +213,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "listener_resources": ["client", "replication"], "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"], "shared_extra_conf": {}, - "worker_extra_conf": ( - "worker_main_http_uri: http://127.0.0.1:%d" - % (MAIN_PROCESS_HTTP_LISTENER_PORT,) - ), + "worker_extra_conf": "", }, "account_data": { "app": "synapse.app.generic_worker", diff --git a/docs/workers.md b/docs/workers.md index 7ee8801161..4604650803 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -135,8 +135,8 @@ In the config file for each worker, you must specify: [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option with an `http` listener. - * If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for - the main process (`worker_main_http_uri`). + * **Synapse 1.71 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for + the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.72 and newer. For example: @@ -221,7 +221,6 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/search$ # Encryption requests - # Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri` ^/_matrix/client/(r0|v3|unstable)/keys/query$ ^/_matrix/client/(r0|v3|unstable)/keys/changes$ ^/_matrix/client/(r0|v3|unstable)/keys/claim$ @@ -376,7 +375,7 @@ responsible for - persisting them to the DB, and finally - updating the events stream. -Because load is sharded in this way, you *must* restart all worker instances when +Because load is sharded in this way, you *must* restart all worker instances when adding or removing event persisters. An `event_persister` should not be mistaken for an `event_creator`. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 1d9aef45c2..74909b7d4a 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -14,14 +14,12 @@ # limitations under the License. import logging import sys -from typing import Dict, List, Optional, Tuple +from typing import Dict, List -from twisted.internet import address from twisted.web.resource import Resource import synapse import synapse.events -from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError from synapse.api.urls import ( CLIENT_API_PREFIX, FEDERATION_PREFIX, @@ -43,8 +41,6 @@ from synapse.config.logger import setup_logging from synapse.config.server import ListenerConfig from synapse.federation.transport.server import TransportLayerServer from synapse.http.server import JsonResource, OptionsResource -from synapse.http.servlet import RestServlet, parse_json_object_from_request -from synapse.http.site import SynapseRequest from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource @@ -70,12 +66,12 @@ from synapse.rest.client import ( versions, voip, ) -from synapse.rest.client._base import client_patterns from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet from synapse.rest.client.devices import DevicesRestServlet from synapse.rest.client.keys import ( KeyChangesServlet, KeyQueryServlet, + KeyUploadServlet, OneTimeKeyServlet, ) from synapse.rest.client.register import ( @@ -132,107 +128,12 @@ from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore -from synapse.types import JsonDict from synapse.util import SYNAPSE_VERSION from synapse.util.httpresourcetree import create_resource_tree logger = logging.getLogger("synapse.app.generic_worker") -class KeyUploadServlet(RestServlet): - """An implementation of the `KeyUploadServlet` that responds to read only - requests, but otherwise proxies through to the master instance. - """ - - PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$") - - def __init__(self, hs: HomeServer): - """ - Args: - hs: server - """ - super().__init__() - self.auth = hs.get_auth() - self.store = hs.get_datastores().main - self.http_client = hs.get_simple_http_client() - self.main_uri = hs.config.worker.worker_main_http_uri - - async def on_POST( - self, request: SynapseRequest, device_id: Optional[str] - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - user_id = requester.user.to_string() - body = parse_json_object_from_request(request) - - if device_id is not None: - # passing the device_id here is deprecated; however, we allow it - # for now for compatibility with older clients. - if requester.device_id is not None and device_id != requester.device_id: - logger.warning( - "Client uploading keys for a different device " - "(logged in as %s, uploading for %s)", - requester.device_id, - device_id, - ) - else: - device_id = requester.device_id - - if device_id is None: - raise SynapseError( - 400, "To upload keys, you must pass device_id when authenticating" - ) - - if body: - # They're actually trying to upload something, proxy to main synapse. - - # Proxy headers from the original request, such as the auth headers - # (in case the access token is there) and the original IP / - # User-Agent of the request. - headers: Dict[bytes, List[bytes]] = { - header: list(request.requestHeaders.getRawHeaders(header, [])) - for header in (b"Authorization", b"User-Agent") - } - # Add the previous hop to the X-Forwarded-For header. - x_forwarded_for = list( - request.requestHeaders.getRawHeaders(b"X-Forwarded-For", []) - ) - # we use request.client here, since we want the previous hop, not the - # original client (as returned by request.getClientAddress()). - if isinstance(request.client, (address.IPv4Address, address.IPv6Address)): - previous_host = request.client.host.encode("ascii") - # If the header exists, add to the comma-separated list of the first - # instance of the header. Otherwise, generate a new header. - if x_forwarded_for: - x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host] - x_forwarded_for.extend(x_forwarded_for[1:]) - else: - x_forwarded_for = [previous_host] - headers[b"X-Forwarded-For"] = x_forwarded_for - - # Replicate the original X-Forwarded-Proto header. Note that - # XForwardedForRequest overrides isSecure() to give us the original protocol - # used by the client, as opposed to the protocol used by our upstream proxy - # - which is what we want here. - headers[b"X-Forwarded-Proto"] = [ - b"https" if request.isSecure() else b"http" - ] - - try: - result = await self.http_client.post_json_get_json( - self.main_uri + request.uri.decode("ascii"), body, headers=headers - ) - except HttpResponseException as e: - raise e.to_synapse_error() from e - except RequestSendFailed as e: - raise SynapseError(502, "Failed to talk to master") from e - - return 200, result - else: - # Just interested in counts. - result = await self.store.count_e2e_one_time_keys(user_id, device_id) - return 200, {"one_time_key_counts": result} - - class GenericWorkerSlavedStore( # FIXME(#3714): We need to add UserDirectoryStore as we write directly # rather than going via the correct worker. diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 88b3168cbc..c4e2273a95 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -162,7 +162,13 @@ class WorkerConfig(Config): self.worker_name = config.get("worker_name", self.worker_app) self.instance_name = self.worker_name or "master" + # FIXME: Remove this check after a suitable amount of time. self.worker_main_http_uri = config.get("worker_main_http_uri", None) + if self.worker_main_http_uri is not None: + logger.warning( + "The config option worker_main_http_uri is unused since Synapse 1.72. " + "It can be safely removed from your configuration." + ) # This option is really only here to support `--manhole` command line # argument. diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 3d63645726..c21629def8 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, Tuple from twisted.web.server import Request from synapse.http.server import HttpServer +from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict @@ -78,5 +79,71 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint): return 200, user_devices +class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint): + """Ask master to upload keys for the user and send them out over federation to + update other servers. + + For now, only the master is permitted to handle key upload requests; + any worker can handle key query requests (since they're read-only). + + Calls to e2e_keys_handler.upload_keys_for_user(user_id, device_id, keys) on + the main process to accomplish this. + + Defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload + Request format(borrowed and expanded from KeyUploadServlet): + + POST /_synapse/replication/upload_keys_for_user + + { + "user_id": "", + "device_id": "", + "keys": { + ....this part can be found in KeyUploadServlet in rest/client/keys.py.... + } + } + + Response is equivalent to ` /_matrix/client/v3/keys/upload` found in KeyUploadServlet + + """ + + NAME = "upload_keys_for_user" + PATH_ARGS = () + CACHE = False + + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + + self.e2e_keys_handler = hs.get_e2e_keys_handler() + self.store = hs.get_datastores().main + self.clock = hs.get_clock() + + @staticmethod + async def _serialize_payload( # type: ignore[override] + user_id: str, device_id: str, keys: JsonDict + ) -> JsonDict: + + return { + "user_id": user_id, + "device_id": device_id, + "keys": keys, + } + + async def _handle_request( # type: ignore[override] + self, request: Request + ) -> Tuple[int, JsonDict]: + content = parse_json_object_from_request(request) + + user_id = content["user_id"] + device_id = content["device_id"] + keys = content["keys"] + + results = await self.e2e_keys_handler.upload_keys_for_user( + user_id, device_id, keys + ) + + return 200, results + + def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationUserDevicesResyncRestServlet(hs).register(http_server) + ReplicationUploadKeysForUserRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index f653d2a3e1..ee038c7192 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -27,6 +27,7 @@ from synapse.http.servlet import ( ) from synapse.http.site import SynapseRequest from synapse.logging.opentracing import log_kv, set_tag +from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet from synapse.rest.client._base import client_patterns, interactive_auth_handler from synapse.types import JsonDict, StreamToken from synapse.util.cancellation import cancellable @@ -43,24 +44,48 @@ class KeyUploadServlet(RestServlet): Content-Type: application/json { - "device_keys": { - "user_id": "", - "device_id": "", - "valid_until_ts": , - "algorithms": [ - "m.olm.curve25519-aes-sha2", - ] - "keys": { - ":": "", + "device_keys": { + "user_id": "", + "device_id": "", + "valid_until_ts": , + "algorithms": [ + "m.olm.curve25519-aes-sha2", + ] + "keys": { + ":": "", + }, + "signatures:" { + "" { + ":": "" + } + } + }, + "fallback_keys": { + ":": "", + "signed_:": { + "fallback": true, + "key": "", + "signatures": { + "": { + ":": "" + } + } + } + } + "one_time_keys": { + ":": "" }, - "signatures:" { - "" { - ":": "" - } } }, - "one_time_keys": { - ":": "" - }, } + + response, e.g.: + + { + "one_time_key_counts": { + "curve25519": 10, + "signed_curve25519": 20 + } + } + """ PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$") @@ -71,6 +96,13 @@ class KeyUploadServlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() self.device_handler = hs.get_device_handler() + if hs.config.worker.worker_app is None: + # if main process + self.key_uploader = self.e2e_keys_handler.upload_keys_for_user + else: + # then a worker + self.key_uploader = ReplicationUploadKeysForUserRestServlet.make_client(hs) + async def on_POST( self, request: SynapseRequest, device_id: Optional[str] ) -> Tuple[int, JsonDict]: @@ -109,8 +141,8 @@ class KeyUploadServlet(RestServlet): 400, "To upload keys, you must pass device_id when authenticating" ) - result = await self.e2e_keys_handler.upload_keys_for_user( - user_id, device_id, body + result = await self.key_uploader( + user_id=user_id, device_id=device_id, keys=body ) return 200, result -- cgit 1.5.1 From 115f0eb2334b13665e5c112bd87f95ea393c9047 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 16 Nov 2022 22:16:46 +0000 Subject: Reintroduce #14376, with bugfix for monoliths (#14468) * Add tests for StreamIdGenerator * Drive-by: annotate all defs * Revert "Revert "Remove slaved id tracker (#14376)" (#14463)" This reverts commit d63814fd736fed5d3d45ff3af5e6d3bfae50c439, which in turn reverted 36097e88c4da51fce6556a58c49bd675f4cf20ab. This restores the latter. * Fix StreamIdGenerator not handling unpersisted IDs Spotted by @erikjohnston. Closes #14456. * Changelog Co-authored-by: Nick Mills-Barrett Co-authored-by: Erik Johnston --- changelog.d/14376.misc | 1 + changelog.d/14468.misc | 1 + mypy.ini | 3 + synapse/replication/slave/__init__.py | 13 -- synapse/replication/slave/storage/__init__.py | 13 -- .../slave/storage/_slaved_id_tracker.py | 50 ------- synapse/storage/databases/main/account_data.py | 30 ++-- synapse/storage/databases/main/devices.py | 36 ++--- synapse/storage/databases/main/events_worker.py | 35 ++--- synapse/storage/databases/main/push_rule.py | 17 +-- synapse/storage/databases/main/pusher.py | 24 ++- synapse/storage/databases/main/receipts.py | 18 +-- synapse/storage/util/id_generators.py | 13 +- tests/storage/test_id_generators.py | 162 +++++++++++++++++++-- 14 files changed, 230 insertions(+), 186 deletions(-) create mode 100644 changelog.d/14376.misc create mode 100644 changelog.d/14468.misc delete mode 100644 synapse/replication/slave/__init__.py delete mode 100644 synapse/replication/slave/storage/__init__.py delete mode 100644 synapse/replication/slave/storage/_slaved_id_tracker.py diff --git a/changelog.d/14376.misc b/changelog.d/14376.misc new file mode 100644 index 0000000000..2ca326fea6 --- /dev/null +++ b/changelog.d/14376.misc @@ -0,0 +1 @@ +Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). diff --git a/changelog.d/14468.misc b/changelog.d/14468.misc new file mode 100644 index 0000000000..2ca326fea6 --- /dev/null +++ b/changelog.d/14468.misc @@ -0,0 +1 @@ +Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). diff --git a/mypy.ini b/mypy.ini index 8f1141a239..53512b2584 100644 --- a/mypy.ini +++ b/mypy.ini @@ -117,6 +117,9 @@ disallow_untyped_defs = True [mypy-tests.state.test_profile] disallow_untyped_defs = True +[mypy-tests.storage.test_id_generators] +disallow_untyped_defs = True + [mypy-tests.storage.test_profile] disallow_untyped_defs = True diff --git a/synapse/replication/slave/__init__.py b/synapse/replication/slave/__init__.py deleted file mode 100644 index f43a360a80..0000000000 --- a/synapse/replication/slave/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/synapse/replication/slave/storage/__init__.py b/synapse/replication/slave/storage/__init__.py deleted file mode 100644 index f43a360a80..0000000000 --- a/synapse/replication/slave/storage/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py deleted file mode 100644 index 8f3f953ed4..0000000000 --- a/synapse/replication/slave/storage/_slaved_id_tracker.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List, Optional, Tuple - -from synapse.storage.database import LoggingDatabaseConnection -from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id - - -class SlavedIdTracker(AbstractStreamIdTracker): - """Tracks the "current" stream ID of a stream with a single writer. - - See `AbstractStreamIdTracker` for more details. - - Note that this class does not work correctly when there are multiple - writers. - """ - - def __init__( - self, - db_conn: LoggingDatabaseConnection, - table: str, - column: str, - extra_tables: Optional[List[Tuple[str, str]]] = None, - step: int = 1, - ): - self.step = step - self._current = _load_current_id(db_conn, table, column, step) - if extra_tables: - for table, column in extra_tables: - self.advance(None, _load_current_id(db_conn, table, column)) - - def advance(self, instance_name: Optional[str], new_id: int) -> None: - self._current = (max if self.step > 0 else min)(self._current, new_id) - - def get_current_token(self) -> int: - return self._current - - def get_current_token_for_writer(self, instance_name: str) -> int: - return self.get_current_token() diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index c38b8a9e5a..282687ebce 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -27,7 +27,6 @@ from typing import ( ) from synapse.api.constants import AccountDataTypes -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import AccountDataStream, TagAccountDataStream from synapse.storage._base import db_to_json from synapse.storage.database import ( @@ -68,12 +67,11 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) # to write account data. A value of `True` implies that `_account_data_id_gen` # is an `AbstractStreamIdGenerator` and not just a tracker. self._account_data_id_gen: AbstractStreamIdTracker + self._can_write_to_account_data = ( + self._instance_name in hs.config.worker.writers.account_data + ) if isinstance(database.engine, PostgresEngine): - self._can_write_to_account_data = ( - self._instance_name in hs.config.worker.writers.account_data - ) - self._account_data_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, @@ -95,21 +93,13 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets # updated over replication. (Multiple writers are not supported for # SQLite). - if self._instance_name in hs.config.worker.writers.account_data: - self._can_write_to_account_data = True - self._account_data_id_gen = StreamIdGenerator( - db_conn, - "room_account_data", - "stream_id", - extra_tables=[("room_tags_revisions", "stream_id")], - ) - else: - self._account_data_id_gen = SlavedIdTracker( - db_conn, - "room_account_data", - "stream_id", - extra_tables=[("room_tags_revisions", "stream_id")], - ) + self._account_data_id_gen = StreamIdGenerator( + db_conn, + "room_account_data", + "stream_id", + extra_tables=[("room_tags_revisions", "stream_id")], + is_writer=self._instance_name in hs.config.worker.writers.account_data, + ) account_max = self.get_max_account_data_stream_id() self._account_data_stream_cache = StreamChangeCache( diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index e114c733d1..57230df5ae 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -38,7 +38,6 @@ from synapse.logging.opentracing import ( whitelisted_homeserver, ) from synapse.metrics.background_process_metrics import wrap_as_background_process -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( @@ -86,28 +85,19 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ): super().__init__(database, db_conn, hs) - if hs.config.worker.worker_app is None: - self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( - db_conn, - "device_lists_stream", - "stream_id", - extra_tables=[ - ("user_signature_stream", "stream_id"), - ("device_lists_outbound_pokes", "stream_id"), - ("device_lists_changes_in_room", "stream_id"), - ], - ) - else: - self._device_list_id_gen = SlavedIdTracker( - db_conn, - "device_lists_stream", - "stream_id", - extra_tables=[ - ("user_signature_stream", "stream_id"), - ("device_lists_outbound_pokes", "stream_id"), - ("device_lists_changes_in_room", "stream_id"), - ], - ) + # In the worker store this is an ID tracker which we overwrite in the non-worker + # class below that is used on the main process. + self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + db_conn, + "device_lists_stream", + "stream_id", + extra_tables=[ + ("user_signature_stream", "stream_id"), + ("device_lists_outbound_pokes", "stream_id"), + ("device_lists_changes_in_room", "stream_id"), + ], + is_writer=hs.config.worker.worker_app is None, + ) # Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a # StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker). diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 8a104f7e93..01e935edef 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -59,7 +59,6 @@ from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, ) -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import BackfillStream from synapse.replication.tcp.streams.events import EventsStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -213,26 +212,20 @@ class EventsWorkerStore(SQLBaseStore): # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets # updated over replication. (Multiple writers are not supported for # SQLite). - if hs.get_instance_name() in hs.config.worker.writers.events: - self._stream_id_gen = StreamIdGenerator( - db_conn, - "events", - "stream_ordering", - ) - self._backfill_id_gen = StreamIdGenerator( - db_conn, - "events", - "stream_ordering", - step=-1, - extra_tables=[("ex_outlier_stream", "event_stream_ordering")], - ) - else: - self._stream_id_gen = SlavedIdTracker( - db_conn, "events", "stream_ordering" - ) - self._backfill_id_gen = SlavedIdTracker( - db_conn, "events", "stream_ordering", step=-1 - ) + self._stream_id_gen = StreamIdGenerator( + db_conn, + "events", + "stream_ordering", + is_writer=hs.get_instance_name() in hs.config.worker.writers.events, + ) + self._backfill_id_gen = StreamIdGenerator( + db_conn, + "events", + "stream_ordering", + step=-1, + extra_tables=[("ex_outlier_stream", "event_stream_ordering")], + is_writer=hs.get_instance_name() in hs.config.worker.writers.events, + ) events_max = self._stream_id_gen.get_current_token() curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict( diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 8ae10f6127..12ad44dbb3 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -30,7 +30,6 @@ from typing import ( from synapse.api.errors import StoreError from synapse.config.homeserver import ExperimentalConfig -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import PushRulesStream from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -111,14 +110,14 @@ class PushRulesWorkerStore( ): super().__init__(database, db_conn, hs) - if hs.config.worker.worker_app is None: - self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( - db_conn, "push_rules_stream", "stream_id" - ) - else: - self._push_rules_stream_id_gen = SlavedIdTracker( - db_conn, "push_rules_stream", "stream_id" - ) + # In the worker store this is an ID tracker which we overwrite in the non-worker + # class below that is used on the main process. + self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + db_conn, + "push_rules_stream", + "stream_id", + is_writer=hs.config.worker.worker_app is None, + ) push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict( db_conn, diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index 4a01562d45..fee37b9ce4 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -27,7 +27,6 @@ from typing import ( ) from synapse.push import PusherConfig, ThrottleParams -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import PushersStream from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( @@ -59,20 +58,15 @@ class PusherWorkerStore(SQLBaseStore): ): super().__init__(database, db_conn, hs) - if hs.config.worker.worker_app is None: - self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( - db_conn, - "pushers", - "id", - extra_tables=[("deleted_pushers", "stream_id")], - ) - else: - self._pushers_id_gen = SlavedIdTracker( - db_conn, - "pushers", - "id", - extra_tables=[("deleted_pushers", "stream_id")], - ) + # In the worker store this is an ID tracker which we overwrite in the non-worker + # class below that is used on the main process. + self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + db_conn, + "pushers", + "id", + extra_tables=[("deleted_pushers", "stream_id")], + is_writer=hs.config.worker.worker_app is None, + ) self.db_pool.updates.register_background_update_handler( "remove_deactivated_pushers", diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index fbf27497ec..a580e4bdda 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -27,7 +27,6 @@ from typing import ( ) from synapse.api.constants import EduTypes -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import ReceiptsStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( @@ -61,6 +60,9 @@ class ReceiptsWorkerStore(SQLBaseStore): hs: "HomeServer", ): self._instance_name = hs.get_instance_name() + + # In the worker store this is an ID tracker which we overwrite in the non-worker + # class below that is used on the main process. self._receipts_id_gen: AbstractStreamIdTracker if isinstance(database.engine, PostgresEngine): @@ -87,14 +89,12 @@ class ReceiptsWorkerStore(SQLBaseStore): # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets # updated over replication. (Multiple writers are not supported for # SQLite). - if hs.get_instance_name() in hs.config.worker.writers.receipts: - self._receipts_id_gen = StreamIdGenerator( - db_conn, "receipts_linearized", "stream_id" - ) - else: - self._receipts_id_gen = SlavedIdTracker( - db_conn, "receipts_linearized", "stream_id" - ) + self._receipts_id_gen = StreamIdGenerator( + db_conn, + "receipts_linearized", + "stream_id", + is_writer=hs.get_instance_name() in hs.config.worker.writers.receipts, + ) super().__init__(database, db_conn, hs) diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 2dfe4c0b66..0d7108f01b 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -186,11 +186,13 @@ class StreamIdGenerator(AbstractStreamIdGenerator): column: str, extra_tables: Iterable[Tuple[str, str]] = (), step: int = 1, + is_writer: bool = True, ) -> None: assert step != 0 self._lock = threading.Lock() self._step: int = step self._current: int = _load_current_id(db_conn, table, column, step) + self._is_writer = is_writer for table, column in extra_tables: self._current = (max if step > 0 else min)( self._current, _load_current_id(db_conn, table, column, step) @@ -204,9 +206,11 @@ class StreamIdGenerator(AbstractStreamIdGenerator): self._unfinished_ids: OrderedDict[int, int] = OrderedDict() def advance(self, instance_name: str, new_id: int) -> None: - # `StreamIdGenerator` should only be used when there is a single writer, - # so replication should never happen. - raise Exception("Replication is not supported by StreamIdGenerator") + # Advance should never be called on a writer instance, only over replication + if self._is_writer: + raise Exception("Replication is not supported by writer StreamIdGenerator") + + self._current = (max if self._step > 0 else min)(self._current, new_id) def get_next(self) -> AsyncContextManager[int]: with self._lock: @@ -249,6 +253,9 @@ class StreamIdGenerator(AbstractStreamIdGenerator): return _AsyncCtxManagerWrapper(manager()) def get_current_token(self) -> int: + if not self._is_writer: + return self._current + with self._lock: if self._unfinished_ids: return next(iter(self._unfinished_ids)) - self._step diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 2d8d1f860f..d6a2b8d274 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -16,15 +16,157 @@ from typing import List, Optional from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer -from synapse.storage.database import DatabasePool, LoggingTransaction +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.storage.engines import IncorrectDatabaseSetup -from synapse.storage.util.id_generators import MultiWriterIdGenerator +from synapse.storage.types import Cursor +from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.util import Clock from tests.unittest import HomeserverTestCase from tests.utils import USE_POSTGRES_FOR_TESTS +class StreamIdGeneratorTestCase(HomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.db_pool: DatabasePool = self.store.db_pool + + self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) + + def _setup_db(self, txn: LoggingTransaction) -> None: + txn.execute( + """ + CREATE TABLE foobar ( + stream_id BIGINT NOT NULL, + data TEXT + ); + """ + ) + txn.execute("INSERT INTO foobar VALUES (123, 'hello world');") + + def _create_id_generator(self) -> StreamIdGenerator: + def _create(conn: LoggingDatabaseConnection) -> StreamIdGenerator: + return StreamIdGenerator( + db_conn=conn, + table="foobar", + column="stream_id", + ) + + return self.get_success_or_raise(self.db_pool.runWithConnection(_create)) + + def test_initial_value(self) -> None: + """Check that we read the current token from the DB.""" + id_gen = self._create_id_generator() + self.assertEqual(id_gen.get_current_token(), 123) + + def test_single_gen_next(self) -> None: + """Check that we correctly increment the current token from the DB.""" + id_gen = self._create_id_generator() + + async def test_gen_next() -> None: + async with id_gen.get_next() as next_id: + # We haven't persisted `next_id` yet; current token is still 123 + self.assertEqual(id_gen.get_current_token(), 123) + # But we did learn what the next value is + self.assertEqual(next_id, 124) + + # Once the context manager closes we assume that the `next_id` has been + # written to the DB. + self.assertEqual(id_gen.get_current_token(), 124) + + self.get_success(test_gen_next()) + + def test_multiple_gen_nexts(self) -> None: + """Check that we handle overlapping calls to gen_next sensibly.""" + id_gen = self._create_id_generator() + + async def test_gen_next() -> None: + ctx1 = id_gen.get_next() + ctx2 = id_gen.get_next() + ctx3 = id_gen.get_next() + + # Request three new stream IDs. + self.assertEqual(await ctx1.__aenter__(), 124) + self.assertEqual(await ctx2.__aenter__(), 125) + self.assertEqual(await ctx3.__aenter__(), 126) + + # None are persisted: current token unchanged. + self.assertEqual(id_gen.get_current_token(), 123) + + # Persist each in turn. + await ctx1.__aexit__(None, None, None) + self.assertEqual(id_gen.get_current_token(), 124) + await ctx2.__aexit__(None, None, None) + self.assertEqual(id_gen.get_current_token(), 125) + await ctx3.__aexit__(None, None, None) + self.assertEqual(id_gen.get_current_token(), 126) + + self.get_success(test_gen_next()) + + def test_multiple_gen_nexts_closed_in_different_order(self) -> None: + """Check that we handle overlapping calls to gen_next, even when their IDs + created and persisted in different orders.""" + id_gen = self._create_id_generator() + + async def test_gen_next() -> None: + ctx1 = id_gen.get_next() + ctx2 = id_gen.get_next() + ctx3 = id_gen.get_next() + + # Request three new stream IDs. + self.assertEqual(await ctx1.__aenter__(), 124) + self.assertEqual(await ctx2.__aenter__(), 125) + self.assertEqual(await ctx3.__aenter__(), 126) + + # None are persisted: current token unchanged. + self.assertEqual(id_gen.get_current_token(), 123) + + # Persist them in a different order, starting with 126 from ctx3. + await ctx3.__aexit__(None, None, None) + # We haven't persisted 124 from ctx1 yet---current token is still 123. + self.assertEqual(id_gen.get_current_token(), 123) + + # Now persist 124 from ctx1. + await ctx1.__aexit__(None, None, None) + # Current token is then 124, waiting for 125 to be persisted. + self.assertEqual(id_gen.get_current_token(), 124) + + # Finally persist 125 from ctx2. + await ctx2.__aexit__(None, None, None) + # Current token is then 126 (skipping over 125). + self.assertEqual(id_gen.get_current_token(), 126) + + self.get_success(test_gen_next()) + + def test_gen_next_while_still_waiting_for_persistence(self) -> None: + """Check that we handle overlapping calls to gen_next.""" + id_gen = self._create_id_generator() + + async def test_gen_next() -> None: + ctx1 = id_gen.get_next() + ctx2 = id_gen.get_next() + ctx3 = id_gen.get_next() + + # Request two new stream IDs. + self.assertEqual(await ctx1.__aenter__(), 124) + self.assertEqual(await ctx2.__aenter__(), 125) + + # Persist ctx2 first. + await ctx2.__aexit__(None, None, None) + # Still waiting on ctx1's ID to be persisted. + self.assertEqual(id_gen.get_current_token(), 123) + + # Now request a third stream ID. It should be 126 (the smallest ID that + # we've not yet handed out.) + self.assertEqual(await ctx3.__aenter__(), 126) + + self.get_success(test_gen_next()) + + class MultiWriterIdGeneratorTestCase(HomeserverTestCase): if not USE_POSTGRES_FOR_TESTS: skip = "Requires Postgres" @@ -48,9 +190,9 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): ) def _create_id_generator( - self, instance_name="master", writers: Optional[List[str]] = None + self, instance_name: str = "master", writers: Optional[List[str]] = None ) -> MultiWriterIdGenerator: - def _create(conn): + def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: return MultiWriterIdGenerator( conn, self.db_pool, @@ -446,7 +588,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self._insert_row_with_id("master", 3) # Now we add a row *without* updating the stream ID - def _insert(txn): + def _insert(txn: Cursor) -> None: txn.execute("INSERT INTO foobar VALUES (26, 'master')") self.get_success(self.db_pool.runInteraction("_insert", _insert)) @@ -481,9 +623,9 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): ) def _create_id_generator( - self, instance_name="master", writers: Optional[List[str]] = None + self, instance_name: str = "master", writers: Optional[List[str]] = None ) -> MultiWriterIdGenerator: - def _create(conn): + def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: return MultiWriterIdGenerator( conn, self.db_pool, @@ -617,9 +759,9 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase): ) def _create_id_generator( - self, instance_name="master", writers: Optional[List[str]] = None + self, instance_name: str = "master", writers: Optional[List[str]] = None ) -> MultiWriterIdGenerator: - def _create(conn): + def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: return MultiWriterIdGenerator( conn, self.db_pool, @@ -641,7 +783,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase): instance_name: str, number: int, update_stream_table: bool = True, - ): + ) -> None: """Insert N rows as the given instance, inserting with stream IDs pulled from the postgres sequence. """ -- cgit 1.5.1 From 75888c2b1f5ec1c865c4690627bf101f7e0dffb9 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Thu, 17 Nov 2022 17:01:14 +0100 Subject: Faster joins: do not wait for full state when creating events to send (#14403) Signed-off-by: Mathieu Velten --- changelog.d/14403.misc | 1 + synapse/events/builder.py | 1 + synapse/state/__init__.py | 8 +++++++- 3 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14403.misc diff --git a/changelog.d/14403.misc b/changelog.d/14403.misc new file mode 100644 index 0000000000..ff28a2712a --- /dev/null +++ b/changelog.d/14403.misc @@ -0,0 +1 @@ +Faster joins: do not wait for full state when creating events to send. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index e2ee10dd3d..d62906043f 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -128,6 +128,7 @@ class EventBuilder: state_filter=StateFilter.from_types( auth_types_for_event(self.room_version, self) ), + await_full_state=False, ) auth_event_ids = self._event_auth_handler.compute_auth_events( self, state_ids diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 6f3dd0463e..833ffec3de 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -190,6 +190,7 @@ class StateHandler: room_id: str, event_ids: Collection[str], state_filter: Optional[StateFilter] = None, + await_full_state: bool = True, ) -> StateMap[str]: """Fetch the state after each of the given event IDs. Resolve them and return. @@ -200,13 +201,18 @@ class StateHandler: Args: room_id: the room_id containing the given events. event_ids: the events whose state should be fetched and resolved. + await_full_state: if `True`, will block if we do not yet have complete state + at the given `event_id`s, regardless of whether `state_filter` is + satisfied by partial state. Returns: the state dict (a mapping from (event_type, state_key) -> event_id) which holds the resolution of the states after the given event IDs. """ logger.debug("calling resolve_state_groups from compute_state_after_events") - ret = await self.resolve_state_groups_for_events(room_id, event_ids) + ret = await self.resolve_state_groups_for_events( + room_id, event_ids, await_full_state + ) return await ret.get_state(self._state_storage_controller, state_filter) async def get_current_user_ids_in_room( -- cgit 1.5.1 From e7132c3f81acbc50c1923cad7eeab96d3b2e05fd Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 17 Nov 2022 16:09:56 +0000 Subject: Fix check to ignore blank lines in incoming TCP replication (#14449) --- changelog.d/14449.misc | 1 + synapse/replication/tcp/protocol.py | 2 +- synapse/storage/database.py | 6 +++--- 3 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14449.misc diff --git a/changelog.d/14449.misc b/changelog.d/14449.misc new file mode 100644 index 0000000000..320c0b6fae --- /dev/null +++ b/changelog.d/14449.misc @@ -0,0 +1 @@ +Fix type logic in TCP replication code that prevented correctly ignoring blank commands. \ No newline at end of file diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 7763ffb2d0..56a5c21910 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -245,7 +245,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): self._parse_and_dispatch_line(line) def _parse_and_dispatch_line(self, line: bytes) -> None: - if line.strip() == "": + if line.strip() == b"": # Ignore blank lines return diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 4717c9728a..0dc44b246c 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -569,15 +569,15 @@ class DatabasePool: retcols=["update_name"], desc="check_background_updates", ) - updates = [x["update_name"] for x in updates] + background_update_names = [x["update_name"] for x in updates] for table, update_name in UNIQUE_INDEX_BACKGROUND_UPDATES.items(): - if update_name not in updates: + if update_name not in background_update_names: logger.debug("Now safe to upsert in %s", table) self._unsafe_to_upsert_tables.discard(table) # If there's any updates still running, reschedule to run. - if updates: + if background_update_names: self._clock.call_later( 15.0, run_as_background_process, -- cgit 1.5.1 From 01a052789266179c70c10ea6a6253c64fd9990d2 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 17 Nov 2022 16:11:08 +0000 Subject: Fix version that `worker_main_http_uri` is redundant from (#14476) * Fix version that `worker_main_http_uri` is redundant from * Changelog --- changelog.d/14476.misc | 1 + docs/workers.md | 4 ++-- synapse/config/workers.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14476.misc diff --git a/changelog.d/14476.misc b/changelog.d/14476.misc new file mode 100644 index 0000000000..6e025329c4 --- /dev/null +++ b/changelog.d/14476.misc @@ -0,0 +1 @@ +Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. diff --git a/docs/workers.md b/docs/workers.md index 4604650803..27e54c5846 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -135,8 +135,8 @@ In the config file for each worker, you must specify: [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option with an `http` listener. - * **Synapse 1.71 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for - the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.72 and newer. + * **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for + the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer. For example: diff --git a/synapse/config/workers.py b/synapse/config/workers.py index c4e2273a95..913b83e174 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -166,7 +166,7 @@ class WorkerConfig(Config): self.worker_main_http_uri = config.get("worker_main_http_uri", None) if self.worker_main_http_uri is not None: logger.warning( - "The config option worker_main_http_uri is unused since Synapse 1.72. " + "The config option worker_main_http_uri is unused since Synapse 1.73. " "It can be safely removed from your configuration." ) -- cgit 1.5.1 From ae22e6e94f61b28715950d8a6c04c6ac44ac150d Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 17 Nov 2022 18:34:09 +0000 Subject: Enable 'strict_equality' checking for mypy (#14452) --- changelog.d/14452.misc | 1 + mypy.ini | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/14452.misc diff --git a/changelog.d/14452.misc b/changelog.d/14452.misc new file mode 100644 index 0000000000..cb190c0823 --- /dev/null +++ b/changelog.d/14452.misc @@ -0,0 +1 @@ +Enable mypy's [`strict_equality` check](https://mypy.readthedocs.io/en/stable/command_line.html#cmdoption-mypy-strict-equality) by default. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 53512b2584..4cd61e0484 100644 --- a/mypy.ini +++ b/mypy.ini @@ -11,6 +11,7 @@ warn_unused_ignores = True local_partial_types = True no_implicit_optional = True disallow_untyped_defs = True +strict_equality = True files = docker/, -- cgit 1.5.1 From 78e23eea056cbf75b9478140f17699195dd490f2 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 18 Nov 2022 18:10:01 +0000 Subject: Reduce default third party invite rate limit to 216 invites per day (#14487) The previous default was the same as the `rc_message` rate limit, which defaults to 17,280 per day. Signed-off-by: Sean Quah --- changelog.d/14487.misc | 1 + synapse/config/ratelimiting.py | 5 +---- 2 files changed, 2 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14487.misc diff --git a/changelog.d/14487.misc b/changelog.d/14487.misc new file mode 100644 index 0000000000..f6b47a1d8e --- /dev/null +++ b/changelog.d/14487.misc @@ -0,0 +1 @@ +Reduce default third party invite rate limit to 216 invites per day. diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 1ed001e105..5c13fe428a 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -150,8 +150,5 @@ class RatelimitConfig(Config): self.rc_third_party_invite = RatelimitSettings( config.get("rc_third_party_invite", {}), - defaults={ - "per_second": self.rc_message.per_second, - "burst_count": self.rc_message.burst_count, - }, + defaults={"per_second": 0.0025, "burst_count": 5}, ) -- cgit 1.5.1 From e1b15f25f3ad4b45b381544ca6b3cd2caf43d25d Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 18 Nov 2022 19:56:42 +0000 Subject: Fix /key/v2/server calls with URL-unsafe key IDs (#14490) Co-authored-by: Patrick Cloke --- changelog.d/14490.misc | 1 + synapse/crypto/keyring.py | 2 +- tests/crypto/test_keyring.py | 12 ++++++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14490.misc diff --git a/changelog.d/14490.misc b/changelog.d/14490.misc new file mode 100644 index 0000000000..c0a4daa885 --- /dev/null +++ b/changelog.d/14490.misc @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 0.9 where it would fail to fetch server keys whose IDs contain a forward slash. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index dd9b8089ec..ed15f88350 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -857,7 +857,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher): response = await self.client.get_json( destination=server_name, path="/_matrix/key/v2/server/" - + urllib.parse.quote(requested_key_id), + + urllib.parse.quote(requested_key_id, safe=""), ignore_backoff=True, # we only give the remote server 10s to respond. It should be an # easy request to handle, so if it doesn't reply within 10s, it's diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 820a1a54e2..63628aa6b0 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -469,6 +469,18 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) self.assertEqual(keys, {}) + def test_keyid_containing_forward_slash(self) -> None: + """We should url-encode any url unsafe chars in key ids. + + Detects https://github.com/matrix-org/synapse/issues/14488. + """ + fetcher = ServerKeyFetcher(self.hs) + self.get_success(fetcher.get_keys("example.com", ["key/potato"], 0)) + + self.http_client.get_json.assert_called_once() + args, kwargs = self.http_client.get_json.call_args + self.assertEqual(kwargs["path"], "/_matrix/key/v2/server/key%2Fpotato") + class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): -- cgit 1.5.1